aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/mac80211.tmpl12
-rw-r--r--Documentation/feature-removal-schedule.txt18
-rw-r--r--Documentation/networking/LICENSE.qlge46
-rw-r--r--Documentation/networking/can.txt44
-rw-r--r--Documentation/networking/multiqueue.txt54
-rw-r--r--Documentation/networking/phonet.txt121
-rw-r--r--Documentation/networking/regulatory.txt194
-rw-r--r--Documentation/rfkill.txt32
-rw-r--r--MAINTAINERS20
-rw-r--r--arch/arm/mach-kirkwood/db88f6281-bp-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/rd88f6192-nas-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/rd88f6281-setup.c2
-rw-r--r--arch/arm/mach-loki/lb88rc8480-setup.c2
-rw-r--r--arch/arm/mach-mv78xx0/common.c6
-rw-r--r--arch/arm/mach-mv78xx0/db78x00-bp-setup.c8
-rw-r--r--arch/arm/mach-orion5x/db88f5281-setup.c2
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c2
-rw-r--r--arch/arm/mach-orion5x/kurobox_pro-setup.c2
-rw-r--r--arch/arm/mach-orion5x/mss2-setup.c2
-rw-r--r--arch/arm/mach-orion5x/mv2120-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-ge-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5182-setup.c2
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c3
-rw-r--r--arch/arm/mach-orion5x/tsx09-common.c2
-rw-r--r--arch/arm/mach-orion5x/wnr854t-setup.c2
-rw-r--r--arch/arm/mach-orion5x/wrt350n-v2-setup.c2
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c6
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/toshiba_acpi.c261
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/horizon.c8
-rw-r--r--drivers/atm/idt77252.c32
-rw-r--r--drivers/atm/idt77252.h4
-rw-r--r--drivers/atm/zatm.c6
-rw-r--r--drivers/block/aoe/aoe.h9
-rw-r--r--drivers/block/aoe/aoeblk.c8
-rw-r--r--drivers/block/aoe/aoechr.c8
-rw-r--r--drivers/block/aoe/aoecmd.c85
-rw-r--r--drivers/block/aoe/aoedev.c12
-rw-r--r--drivers/block/aoe/aoemain.c1
-rw-r--r--drivers/block/aoe/aoenet.c9
-rw-r--r--drivers/bluetooth/hci_bcsp.c18
-rw-r--r--drivers/bluetooth/hci_usb.h10
-rw-r--r--drivers/isdn/capi/kcapi.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_pci.h4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c23
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c352
-rw-r--r--drivers/isdn/mISDN/timerdev.c22
-rw-r--r--drivers/net/3c505.c4
-rw-r--r--drivers/net/8139cp.c14
-rw-r--r--drivers/net/8139too.c7
-rw-r--r--drivers/net/Kconfig55
-rw-r--r--drivers/net/Makefile6
-rw-r--r--drivers/net/arcnet/arcnet.c18
-rw-r--r--drivers/net/arcnet/com20020.c16
-rw-r--r--drivers/net/atl1e/atl1e_hw.c6
-rw-r--r--drivers/net/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/atlx/Makefile2
-rw-r--r--drivers/net/atlx/atl2.c3129
-rw-r--r--drivers/net/atlx/atl2.h530
-rw-r--r--drivers/net/au1000_eth.c3
-rw-r--r--drivers/net/ax88796.c14
-rw-r--r--drivers/net/bfin_mac.c8
-rw-r--r--drivers/net/bnx2.c22
-rw-r--r--drivers/net/bnx2.h5
-rw-r--r--drivers/net/bnx2x_main.c95
-rw-r--r--drivers/net/bonding/bond_alb.c28
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/bonding/bonding.h10
-rw-r--r--drivers/net/cassini.c56
-rw-r--r--drivers/net/cassini.h1522
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/adapter.h3
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c8
-rw-r--r--drivers/net/cxgb3/l2t.c39
-rw-r--r--drivers/net/cxgb3/l2t.h3
-rw-r--r--drivers/net/cxgb3/sge.c80
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000/e1000.h17
-rw-r--r--drivers/net/e1000/e1000_main.c416
-rw-r--r--drivers/net/e1000e/82571.c153
-rw-r--r--drivers/net/e1000e/defines.h15
-rw-r--r--drivers/net/e1000e/e1000.h31
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/ethtool.c60
-rw-r--r--drivers/net/e1000e/hw.h15
-rw-r--r--drivers/net/e1000e/ich8lan.c175
-rw-r--r--drivers/net/e1000e/lib.c7
-rw-r--r--drivers/net/e1000e/netdev.c441
-rw-r--r--drivers/net/e1000e/param.c27
-rw-r--r--drivers/net/e1000e/phy.c194
-rw-r--r--drivers/net/ehea/ehea.h4
-rw-r--r--drivers/net/ehea/ehea_phyp.c2
-rw-r--r--drivers/net/ehea/ehea_qmr.c3
-rw-r--r--drivers/net/enc28j60.c56
-rw-r--r--drivers/net/enic/Makefile5
-rw-r--r--drivers/net/enic/cq_desc.h79
-rw-r--r--drivers/net/enic/cq_enet_desc.h169
-rw-r--r--drivers/net/enic/enic.h114
-rw-r--r--drivers/net/enic/enic_main.c1934
-rw-r--r--drivers/net/enic/enic_res.c370
-rw-r--r--drivers/net/enic/enic_res.h151
-rw-r--r--drivers/net/enic/rq_enet_desc.h60
-rw-r--r--drivers/net/enic/vnic_cq.c89
-rw-r--r--drivers/net/enic/vnic_cq.h113
-rw-r--r--drivers/net/enic/vnic_dev.c674
-rw-r--r--drivers/net/enic/vnic_dev.h106
-rw-r--r--drivers/net/enic/vnic_devcmd.h282
-rw-r--r--drivers/net/enic/vnic_enet.h47
-rw-r--r--drivers/net/enic/vnic_intr.c62
-rw-r--r--drivers/net/enic/vnic_intr.h92
-rw-r--r--drivers/net/enic/vnic_nic.h65
-rw-r--r--drivers/net/enic/vnic_resource.h63
-rw-r--r--drivers/net/enic/vnic_rq.c199
-rw-r--r--drivers/net/enic/vnic_rq.h204
-rw-r--r--drivers/net/enic/vnic_rss.h32
-rw-r--r--drivers/net/enic/vnic_stats.h70
-rw-r--r--drivers/net/enic/vnic_wq.c184
-rw-r--r--drivers/net/enic/vnic_wq.h154
-rw-r--r--drivers/net/enic/wq_enet_desc.h98
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c22
-rw-r--r--drivers/net/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c12
-rw-r--r--drivers/net/fs_enet/mac-fec.c30
-rw-r--r--drivers/net/fs_enet/mac-scc.c26
-rw-r--r--drivers/net/gianfar_mii.c18
-rw-r--r--drivers/net/ibm_newemac/Kconfig2
-rw-r--r--drivers/net/ibm_newemac/mal.h4
-rw-r--r--drivers/net/ibm_newemac/phy.c2
-rw-r--r--drivers/net/igb/igb_main.c12
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/ixgb/ixgb.h2
-rw-r--r--drivers/net/ixgbe/ixgbe.h103
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c628
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c1060
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h58
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c302
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1938
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c244
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h63
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h563
-rw-r--r--drivers/net/jme.c3019
-rw-r--r--drivers/net/jme.h1199
-rw-r--r--drivers/net/meth.c2
-rw-r--r--drivers/net/mipsnet.c2
-rw-r--r--drivers/net/mlx4/alloc.c1
-rw-r--r--drivers/net/mv643xx_eth.c1462
-rw-r--r--drivers/net/myri10ge/myri10ge.c33
-rw-r--r--drivers/net/ne.c281
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c20
-rw-r--r--drivers/net/pci-skeleton.c4
-rw-r--r--drivers/net/phy/mdio_bus.c89
-rw-r--r--drivers/net/ppp_generic.c10
-rw-r--r--drivers/net/pppol2tp.c2
-rw-r--r--drivers/net/qlge/Makefile7
-rw-r--r--drivers/net/qlge/qlge.h1593
-rw-r--r--drivers/net/qlge/qlge_dbg.c858
-rw-r--r--drivers/net/qlge/qlge_ethtool.c415
-rw-r--r--drivers/net/qlge/qlge_main.c3956
-rw-r--r--drivers/net/qlge/qlge_mpi.c150
-rw-r--r--drivers/net/r6040.c6
-rw-r--r--drivers/net/r8169.c428
-rw-r--r--drivers/net/s2io.c62
-rw-r--r--drivers/net/s2io.h1
-rw-r--r--drivers/net/sb1250-mac.c12
-rw-r--r--drivers/net/sfc/bitfield.h178
-rw-r--r--drivers/net/sfc/boards.c12
-rw-r--r--drivers/net/sfc/boards.h2
-rw-r--r--drivers/net/sfc/efx.c489
-rw-r--r--drivers/net/sfc/efx.h14
-rw-r--r--drivers/net/sfc/enum.h9
-rw-r--r--drivers/net/sfc/ethtool.c184
-rw-r--r--drivers/net/sfc/falcon.c1019
-rw-r--r--drivers/net/sfc/falcon.h17
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h80
-rw-r--r--drivers/net/sfc/falcon_io.h1
-rw-r--r--drivers/net/sfc/falcon_xmac.c346
-rw-r--r--drivers/net/sfc/mac.h4
-rw-r--r--drivers/net/sfc/mdio_10g.c16
-rw-r--r--drivers/net/sfc/mdio_10g.h13
-rw-r--r--drivers/net/sfc/net_driver.h144
-rw-r--r--drivers/net/sfc/phy.h10
-rw-r--r--drivers/net/sfc/rx.c78
-rw-r--r--drivers/net/sfc/rx.h4
-rw-r--r--drivers/net/sfc/selftest.c391
-rw-r--r--drivers/net/sfc/selftest.h13
-rw-r--r--drivers/net/sfc/sfe4001.c248
-rw-r--r--drivers/net/sfc/spi.h89
-rw-r--r--drivers/net/sfc/tenxpress.c149
-rw-r--r--drivers/net/sfc/tx.c385
-rw-r--r--drivers/net/sfc/tx.h2
-rw-r--r--drivers/net/sfc/workarounds.h4
-rw-r--r--drivers/net/sfc/xfp_phy.c12
-rw-r--r--drivers/net/skfp/pmf.c29
-rw-r--r--drivers/net/sky2.c170
-rw-r--r--drivers/net/smc911x.c68
-rw-r--r--drivers/net/smc91x.c43
-rw-r--r--drivers/net/smc91x.h2
-rw-r--r--drivers/net/sundance.c95
-rw-r--r--drivers/net/tehuti.h8
-rw-r--r--drivers/net/tg3.c91
-rw-r--r--drivers/net/tg3.h1
-rw-r--r--drivers/net/tsi108_eth.c6
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/tulip/de4x5.c38
-rw-r--r--drivers/net/ucc_geth.c116
-rw-r--r--drivers/net/usb/hso.c335
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/pegasus.c20
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/via-rhine.c8
-rw-r--r--drivers/net/via-velocity.h2
-rw-r--r--drivers/net/wan/cycx_drv.c6
-rw-r--r--drivers/net/wan/cycx_x25.c12
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hdlc_x25.c8
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wireless/Kconfig17
-rw-r--r--drivers/net/wireless/Makefile4
-rw-r--r--drivers/net/wireless/adm8211.c23
-rw-r--r--drivers/net/wireless/airo.c18
-rw-r--r--drivers/net/wireless/airo_cs.c2
-rw-r--r--drivers/net/wireless/airport.c3
-rw-r--r--drivers/net/wireless/ath5k/Makefile12
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h615
-rw-r--r--drivers/net/wireless/ath5k/attach.c359
-rw-r--r--drivers/net/wireless/ath5k/base.c524
-rw-r--r--drivers/net/wireless/ath5k/base.h10
-rw-r--r--drivers/net/wireless/ath5k/caps.c193
-rw-r--r--drivers/net/wireless/ath5k/debug.c4
-rw-r--r--drivers/net/wireless/ath5k/desc.c667
-rw-r--r--drivers/net/wireless/ath5k/desc.h (renamed from drivers/net/wireless/ath5k/hw.h)400
-rw-r--r--drivers/net/wireless/ath5k/dma.c605
-rw-r--r--drivers/net/wireless/ath5k/eeprom.c466
-rw-r--r--drivers/net/wireless/ath5k/eeprom.h215
-rw-r--r--drivers/net/wireless/ath5k/gpio.c176
-rw-r--r--drivers/net/wireless/ath5k/hw.c4529
-rw-r--r--drivers/net/wireless/ath5k/initvals.c22
-rw-r--r--drivers/net/wireless/ath5k/pcu.c1014
-rw-r--r--drivers/net/wireless/ath5k/phy.c12
-rw-r--r--drivers/net/wireless/ath5k/qcu.c488
-rw-r--r--drivers/net/wireless/ath5k/reg.h679
-rw-r--r--drivers/net/wireless/ath5k/reset.c931
-rw-r--r--drivers/net/wireless/ath9k/Kconfig3
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h62
-rw-r--r--drivers/net/wireless/ath9k/beacon.c375
-rw-r--r--drivers/net/wireless/ath9k/core.c309
-rw-r--r--drivers/net/wireless/ath9k/core.h264
-rw-r--r--drivers/net/wireless/ath9k/hw.c238
-rw-r--r--drivers/net/wireless/ath9k/hw.h120
-rw-r--r--drivers/net/wireless/ath9k/main.c1247
-rw-r--r--drivers/net/wireless/ath9k/phy.h12
-rw-r--r--drivers/net/wireless/ath9k/rc.c280
-rw-r--r--drivers/net/wireless/ath9k/rc.h222
-rw-r--r--drivers/net/wireless/ath9k/recv.c81
-rw-r--r--drivers/net/wireless/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath9k/xmit.c400
-rw-r--r--drivers/net/wireless/atmel.c2
-rw-r--r--drivers/net/wireless/atmel_cs.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig12
-rw-r--r--drivers/net/wireless/b43/Makefile7
-rw-r--r--drivers/net/wireless/b43/b43.h146
-rw-r--r--drivers/net/wireless/b43/debugfs.c79
-rw-r--r--drivers/net/wireless/b43/lo.c120
-rw-r--r--drivers/net/wireless/b43/lo.h4
-rw-r--r--drivers/net/wireless/b43/main.c403
-rw-r--r--drivers/net/wireless/b43/phy.h340
-rw-r--r--drivers/net/wireless/b43/phy_a.c643
-rw-r--r--drivers/net/wireless/b43/phy_a.h130
-rw-r--r--drivers/net/wireless/b43/phy_common.c381
-rw-r--r--drivers/net/wireless/b43/phy_common.h413
-rw-r--r--drivers/net/wireless/b43/phy_g.c (renamed from drivers/net/wireless/b43/phy.c)4420
-rw-r--r--drivers/net/wireless/b43/phy_g.h209
-rw-r--r--drivers/net/wireless/b43/phy_lp.c155
-rw-r--r--drivers/net/wireless/b43/phy_lp.h540
-rw-r--r--drivers/net/wireless/b43/phy_n.c (renamed from drivers/net/wireless/b43/nphy.c)154
-rw-r--r--drivers/net/wireless/b43/phy_n.h (renamed from drivers/net/wireless/b43/nphy.h)54
-rw-r--r--drivers/net/wireless/b43/rfkill.c10
-rw-r--r--drivers/net/wireless/b43/sysfs.c23
-rw-r--r--drivers/net/wireless/b43/tables.c43
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c4
-rw-r--r--drivers/net/wireless/b43/wa.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c8
-rw-r--r--drivers/net/wireless/b43legacy/main.c37
-rw-r--r--drivers/net/wireless/b43legacy/phy.c36
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c6
-rw-r--r--drivers/net/wireless/hermes.c124
-rw-r--r--drivers/net/wireless/hermes.h45
-rw-r--r--drivers/net/wireless/hermes_dld.c730
-rw-r--r--drivers/net/wireless/hermes_dld.h48
-rw-r--r--drivers/net/wireless/hermes_rid.h17
-rw-r--r--drivers/net/wireless/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2200.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debug.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-io.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c200
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c205
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c331
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c251
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c75
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c136
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h107
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c163
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c31
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c250
-rw-r--r--drivers/net/wireless/libertas/assoc.c750
-rw-r--r--drivers/net/wireless/libertas/assoc.h18
-rw-r--r--drivers/net/wireless/libertas/cmd.c430
-rw-r--r--drivers/net/wireless/libertas/cmd.h22
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c80
-rw-r--r--drivers/net/wireless/libertas/decl.h1
-rw-r--r--drivers/net/wireless/libertas/defs.h41
-rw-r--r--drivers/net/wireless/libertas/dev.h11
-rw-r--r--drivers/net/wireless/libertas/host.h51
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h84
-rw-r--r--drivers/net/wireless/libertas/if_cs.c2
-rw-r--r--drivers/net/wireless/libertas/if_usb.c182
-rw-r--r--drivers/net/wireless/libertas/if_usb.h5
-rw-r--r--drivers/net/wireless/libertas/main.c41
-rw-r--r--drivers/net/wireless/libertas/scan.c5
-rw-r--r--drivers/net/wireless/libertas/wext.c363
-rw-r--r--drivers/net/wireless/libertas_tf/Makefile6
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c669
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c766
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.h98
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h514
-rw-r--r--drivers/net/wireless/libertas_tf/main.c662
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c211
-rw-r--r--drivers/net/wireless/netwave_cs.c2
-rw-r--r--drivers/net/wireless/orinoco.c1951
-rw-r--r--drivers/net/wireless/orinoco.h61
-rw-r--r--drivers/net/wireless/orinoco_cs.c5
-rw-r--r--drivers/net/wireless/orinoco_nortel.c3
-rw-r--r--drivers/net/wireless/orinoco_pci.c3
-rw-r--r--drivers/net/wireless/orinoco_plx.c3
-rw-r--r--drivers/net/wireless/orinoco_tmd.c3
-rw-r--r--drivers/net/wireless/p54/p54.h55
-rw-r--r--drivers/net/wireless/p54/p54common.c690
-rw-r--r--drivers/net/wireless/p54/p54common.h124
-rw-r--r--drivers/net/wireless/p54/p54pci.c427
-rw-r--r--drivers/net/wireless/p54/p54pci.h20
-rw-r--r--drivers/net/wireless/p54/p54usb.c200
-rw-r--r--drivers/net/wireless/p54/p54usb.h11
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c8
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig128
-rw-r--r--drivers/net/wireless/rt2x00/Makefile1
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c58
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h22
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c59
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h17
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c68
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c215
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c97
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c129
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h49
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c245
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c133
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h82
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h19
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00rfkill.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c21
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c447
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h38
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c457
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h38
-rw-r--r--drivers/net/wireless/rtl8180.h31
-rw-r--r--drivers/net/wireless/rtl8180_dev.c44
-rw-r--r--drivers/net/wireless/rtl8187.h6
-rw-r--r--drivers/net/wireless/rtl8187_dev.c20
-rw-r--r--drivers/net/wireless/rtl818x.h35
-rw-r--r--drivers/net/wireless/spectrum_cs.c428
-rw-r--r--drivers/net/wireless/wavelan_cs.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c10
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.c100
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.h95
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c67
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h65
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c4
-rw-r--r--drivers/s390/net/qeth_l2_main.c14
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/ssb/pci.c84
-rw-r--r--drivers/usb/atm/usbatm.c5
-rw-r--r--firmware/Makefile1
-rw-r--r--firmware/WHENCE10
-rw-r--r--firmware/sun/cassini.bin.ihex143
-rw-r--r--include/asm-um/dma-mapping.h7
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/ieee80211.h36
-rw-r--r--include/linux/if.h1
-rw-r--r--include/linux/if_ether.h2
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/if_phonet.h19
-rw-r--r--include/linux/ip_vs.h160
-rw-r--r--include/linux/isdn_ppp.h2
-rw-r--r--include/linux/mv643xx_eth.h13
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/netlink.h2
-rw-r--r--include/linux/nl80211.h133
-rw-r--r--include/linux/pci_ids.h13
-rw-r--r--include/linux/phonet.h160
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/pkt_sched.h7
-rw-r--r--include/linux/rfkill.h7
-rw-r--r--include/linux/rtnetlink.h4
-rw-r--r--include/linux/skbuff.h152
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/ssb/ssb_regs.h19
-rw-r--r--include/linux/tc_act/Kbuild1
-rw-r--r--include/linux/tc_act/tc_skbedit.h44
-rw-r--r--include/linux/tcp.h3
-rw-r--r--include/net/cfg80211.h89
-rw-r--r--include/net/ieee80211.h4
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/ip_vs.h310
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/mac80211.h206
-rw-r--r--include/net/netlink.h82
-rw-r--r--include/net/phonet/phonet.h112
-rw-r--r--include/net/phonet/pn_dev.h50
-rw-r--r--include/net/pkt_sched.h5
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tc_act/tc_skbedit.h34
-rw-r--r--include/net/tcp.h49
-rw-r--r--include/net/wireless.h65
-rw-r--r--include/net/xfrm.h25
-rw-r--r--net/8021q/vlan.c1
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/Kconfig9
-rw-r--r--net/Makefile1
-rw-r--r--net/atm/br2684.c8
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/bridge/br.c22
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_if.c15
-rw-r--r--net/bridge/br_ioctl.c20
-rw-r--r--net/bridge/br_netlink.c15
-rw-r--r--net/bridge/br_notify.c3
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/bridge/br_stp_bpdu.c3
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c96
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/neighbour.c21
-rw-r--r--net/core/net-sysfs.c36
-rw-r--r--net/core/rtnetlink.c13
-rw-r--r--net/core/skb_dma_map.c66
-rw-r--r--net/core/skbuff.c41
-rw-r--r--net/core/sock.c9
-rw-r--r--net/dccp/ccids/ccid2.c2
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/ccids/lib/loss_interval.c6
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dccp/input.c4
-rw-r--r--net/dccp/options.c13
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee80211/ieee80211_module.c8
-rw-r--r--net/ipv4/inet_diag.c6
-rw-r--r--net/ipv4/ipvs/Kconfig17
-rw-r--r--net/ipv4/ipvs/Makefile3
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c249
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c817
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c1370
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c5
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c58
-rw-r--r--net/ipv4/ipvs/ip_vs_ftp.c61
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c220
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c249
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c32
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c39
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c65
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_ah.c178
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_ah_esp.c235
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_esp.c176
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_tcp.c254
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_udp.c227
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c20
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c39
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c5
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c40
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c39
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c15
-rw-r--r--net/ipv4/ipvs/ip_vs_xmit.c471
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv4/tcp_input.c314
-rw-r--r--net/ipv4/tcp_ipv4.c30
-rw-r--r--net/ipv4/tcp_output.c202
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c32
-rw-r--r--net/ipv6/reassembly.c11
-rw-r--r--net/ipv6/route.c20
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/mac80211/Kconfig13
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/cfg.c201
-rw-r--r--net/mac80211/debugfs.c4
-rw-r--r--net/mac80211/debugfs_key.c3
-rw-r--r--net/mac80211/debugfs_netdev.c72
-rw-r--r--net/mac80211/debugfs_sta.c11
-rw-r--r--net/mac80211/event.c5
-rw-r--r--net/mac80211/ht.c992
-rw-r--r--net/mac80211/ieee80211_i.h393
-rw-r--r--net/mac80211/iface.c620
-rw-r--r--net/mac80211/key.c8
-rw-r--r--net/mac80211/main.c998
-rw-r--r--net/mac80211/mesh.c368
-rw-r--r--net/mac80211/mesh.h80
-rw-r--r--net/mac80211/mesh_hwmp.c230
-rw-r--r--net/mac80211/mesh_pathtbl.c211
-rw-r--r--net/mac80211/mesh_plink.c98
-rw-r--r--net/mac80211/mlme.c3899
-rw-r--r--net/mac80211/rate.c71
-rw-r--r--net/mac80211/rate.h102
-rw-r--r--net/mac80211/rc80211_pid.h4
-rw-r--r--net/mac80211/rc80211_pid_algo.c191
-rw-r--r--net/mac80211/rx.c367
-rw-r--r--net/mac80211/scan.c937
-rw-r--r--net/mac80211/spectmgmt.c86
-rw-r--r--net/mac80211/sta_info.c100
-rw-r--r--net/mac80211/sta_info.h42
-rw-r--r--net/mac80211/tkip.c2
-rw-r--r--net/mac80211/tx.c308
-rw-r--r--net/mac80211/util.c365
-rw-r--r--net/mac80211/wep.c14
-rw-r--r--net/mac80211/wext.c165
-rw-r--r--net/mac80211/wme.c14
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/wpa.c4
-rw-r--r--net/netfilter/xt_time.c6
-rw-r--r--net/phonet/Kconfig16
-rw-r--r--net/phonet/Makefile9
-rw-r--r--net/phonet/af_phonet.c468
-rw-r--r--net/phonet/datagram.c197
-rw-r--r--net/phonet/pn_dev.c208
-rw-r--r--net/phonet/pn_netlink.c165
-rw-r--r--net/phonet/socket.c312
-rw-r--r--net/phonet/sysctl.c113
-rw-r--r--net/rfkill/rfkill-input.h1
-rw-r--r--net/rfkill/rfkill.c254
-rw-r--r--net/sched/Kconfig20
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_skbedit.c203
-rw-r--r--net/sched/cls_flow.c28
-rw-r--r--net/sched/em_cmp.c9
-rw-r--r--net/sched/sch_dsmark.c8
-rw-r--r--net/sched/sch_generic.c27
-rw-r--r--net/sched/sch_multiq.c477
-rw-r--r--net/sched/sch_netem.c18
-rw-r--r--net/sched/sch_prio.c6
-rw-r--r--net/sched/sch_sfq.c4
-rw-r--r--net/sctp/ulpqueue.c5
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c4
-rw-r--r--net/wireless/Kconfig32
-rw-r--r--net/wireless/core.c45
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c280
-rw-r--r--net/wireless/reg.c910
-rw-r--r--net/wireless/reg.h13
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c72
593 files changed, 69155 insertions, 30896 deletions
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
index b651e0a4b1c0..77c3c202991b 100644
--- a/Documentation/DocBook/mac80211.tmpl
+++ b/Documentation/DocBook/mac80211.tmpl
@@ -145,7 +145,6 @@ usage should require reading the full document.
145 this though and the recommendation to allow only a single 145 this though and the recommendation to allow only a single
146 interface in STA mode at first! 146 interface in STA mode at first!
147 </para> 147 </para>
148!Finclude/net/mac80211.h ieee80211_if_types
149!Finclude/net/mac80211.h ieee80211_if_init_conf 148!Finclude/net/mac80211.h ieee80211_if_init_conf
150!Finclude/net/mac80211.h ieee80211_if_conf 149!Finclude/net/mac80211.h ieee80211_if_conf
151 </chapter> 150 </chapter>
@@ -177,8 +176,7 @@ usage should require reading the full document.
177 <title>functions/definitions</title> 176 <title>functions/definitions</title>
178!Finclude/net/mac80211.h ieee80211_rx_status 177!Finclude/net/mac80211.h ieee80211_rx_status
179!Finclude/net/mac80211.h mac80211_rx_flags 178!Finclude/net/mac80211.h mac80211_rx_flags
180!Finclude/net/mac80211.h ieee80211_tx_control 179!Finclude/net/mac80211.h ieee80211_tx_info
181!Finclude/net/mac80211.h ieee80211_tx_status_flags
182!Finclude/net/mac80211.h ieee80211_rx 180!Finclude/net/mac80211.h ieee80211_rx
183!Finclude/net/mac80211.h ieee80211_rx_irqsafe 181!Finclude/net/mac80211.h ieee80211_rx_irqsafe
184!Finclude/net/mac80211.h ieee80211_tx_status 182!Finclude/net/mac80211.h ieee80211_tx_status
@@ -189,12 +187,11 @@ usage should require reading the full document.
189!Finclude/net/mac80211.h ieee80211_ctstoself_duration 187!Finclude/net/mac80211.h ieee80211_ctstoself_duration
190!Finclude/net/mac80211.h ieee80211_generic_frame_duration 188!Finclude/net/mac80211.h ieee80211_generic_frame_duration
191!Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb 189!Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb
192!Finclude/net/mac80211.h ieee80211_get_hdrlen 190!Finclude/net/mac80211.h ieee80211_hdrlen
193!Finclude/net/mac80211.h ieee80211_wake_queue 191!Finclude/net/mac80211.h ieee80211_wake_queue
194!Finclude/net/mac80211.h ieee80211_stop_queue 192!Finclude/net/mac80211.h ieee80211_stop_queue
195!Finclude/net/mac80211.h ieee80211_start_queues
196!Finclude/net/mac80211.h ieee80211_stop_queues
197!Finclude/net/mac80211.h ieee80211_wake_queues 193!Finclude/net/mac80211.h ieee80211_wake_queues
194!Finclude/net/mac80211.h ieee80211_stop_queues
198 </sect1> 195 </sect1>
199 </chapter> 196 </chapter>
200 197
@@ -230,8 +227,7 @@ usage should require reading the full document.
230 <title>Multiple queues and QoS support</title> 227 <title>Multiple queues and QoS support</title>
231 <para>TBD</para> 228 <para>TBD</para>
232!Finclude/net/mac80211.h ieee80211_tx_queue_params 229!Finclude/net/mac80211.h ieee80211_tx_queue_params
233!Finclude/net/mac80211.h ieee80211_tx_queue_stats_data 230!Finclude/net/mac80211.h ieee80211_tx_queue_stats
234!Finclude/net/mac80211.h ieee80211_tx_queue
235 </chapter> 231 </chapter>
236 232
237 <chapter id="AP"> 233 <chapter id="AP">
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 83c88cae1eda..d0f22fac55da 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,6 +6,24 @@ be removed from this file.
6 6
7--------------------------- 7---------------------------
8 8
9What: old static regulatory information and ieee80211_regdom module parameter
10When: 2.6.29
11Why: The old regulatory infrastructure has been replaced with a new one
12 which does not require statically defined regulatory domains. We do
13 not want to keep static regulatory domains in the kernel due to the
14 the dynamic nature of regulatory law and localization. We kept around
15 the old static definitions for the regulatory domains of:
16 * US
17 * JP
18 * EU
19 and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was
20 set. We also kept around the ieee80211_regdom module parameter in case
21 some applications were relying on it. Changing regulatory domains
22 can now be done instead by using nl80211, as is done with iw.
23Who: Luis R. Rodriguez <lrodriguez@atheros.com>
24
25---------------------------
26
9What: dev->power.power_state 27What: dev->power.power_state
10When: July 2007 28When: July 2007
11Why: Broken design for runtime control over driver power states, confusing 29Why: Broken design for runtime control over driver power states, confusing
diff --git a/Documentation/networking/LICENSE.qlge b/Documentation/networking/LICENSE.qlge
new file mode 100644
index 000000000000..123b6edd7f18
--- /dev/null
+++ b/Documentation/networking/LICENSE.qlge
@@ -0,0 +1,46 @@
1Copyright (c) 2003-2008 QLogic Corporation
2QLogic Linux Networking HBA Driver
3
4This program includes a device driver for Linux 2.6 that may be
5distributed with QLogic hardware specific firmware binary file.
6You may modify and redistribute the device driver code under the
7GNU General Public License as published by the Free Software
8Foundation (version 2 or a later version).
9
10You may redistribute the hardware specific firmware binary file
11under the following terms:
12
13 1. Redistribution of source code (only if applicable),
14 must retain the above copyright notice, this list of
15 conditions and the following disclaimer.
16
17 2. Redistribution in binary form must reproduce the above
18 copyright notice, this list of conditions and the
19 following disclaimer in the documentation and/or other
20 materials provided with the distribution.
21
22 3. The name of QLogic Corporation may not be used to
23 endorse or promote products derived from this software
24 without specific prior written permission
25
26REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
27THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
28EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
30PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
31BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
33TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
35ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38POSSIBILITY OF SUCH DAMAGE.
39
40USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
41CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
42OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
43TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
44ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
45COMBINATION WITH THIS PROGRAM.
46
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index 297ba7b1ccaf..2035bc4932f2 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -35,8 +35,9 @@ This file contains
35 6.1 general settings 35 6.1 general settings
36 6.2 local loopback of sent frames 36 6.2 local loopback of sent frames
37 6.3 CAN controller hardware filters 37 6.3 CAN controller hardware filters
38 6.4 currently supported CAN hardware 38 6.4 The virtual CAN driver (vcan)
39 6.5 todo 39 6.5 currently supported CAN hardware
40 6.6 todo
40 41
41 7 Credits 42 7 Credits
42 43
@@ -584,7 +585,42 @@ solution for a couple of reasons:
584 @133MHz with four SJA1000 CAN controllers from 2002 under heavy bus 585 @133MHz with four SJA1000 CAN controllers from 2002 under heavy bus
585 load without any problems ... 586 load without any problems ...
586 587
587 6.4 currently supported CAN hardware (September 2007) 588 6.4 The virtual CAN driver (vcan)
589
590 Similar to the network loopback devices, vcan offers a virtual local
591 CAN interface. A full qualified address on CAN consists of
592
593 - a unique CAN Identifier (CAN ID)
594 - the CAN bus this CAN ID is transmitted on (e.g. can0)
595
596 so in common use cases more than one virtual CAN interface is needed.
597
598 The virtual CAN interfaces allow the transmission and reception of CAN
599 frames without real CAN controller hardware. Virtual CAN network
600 devices are usually named 'vcanX', like vcan0 vcan1 vcan2 ...
601 When compiled as a module the virtual CAN driver module is called vcan.ko
602
603 Since Linux Kernel version 2.6.24 the vcan driver supports the Kernel
604 netlink interface to create vcan network devices. The creation and
605 removal of vcan network devices can be managed with the ip(8) tool:
606
607 - Create a virtual CAN network interface:
608 ip link add type vcan
609
610 - Create a virtual CAN network interface with a specific name 'vcan42':
611 ip link add dev vcan42 type vcan
612
613 - Remove a (virtual CAN) network interface 'vcan42':
614 ip link del vcan42
615
616 The tool 'vcan' from the SocketCAN SVN repository on BerliOS is obsolete.
617
618 Virtual CAN network device creation in older Kernels:
619 In Linux Kernel versions < 2.6.24 the vcan driver creates 4 vcan
620 netdevices at module load time by default. This value can be changed
621 with the module parameter 'numdev'. E.g. 'modprobe vcan numdev=8'
622
623 6.5 currently supported CAN hardware
588 624
589 On the project website http://developer.berlios.de/projects/socketcan 625 On the project website http://developer.berlios.de/projects/socketcan
590 there are different drivers available: 626 there are different drivers available:
@@ -603,7 +639,7 @@ solution for a couple of reasons:
603 639
604 Please check the Mailing Lists on the berlios OSS project website. 640 Please check the Mailing Lists on the berlios OSS project website.
605 641
606 6.5 todo (September 2007) 642 6.6 todo
607 643
608 The configuration interface for CAN network drivers is still an open 644 The configuration interface for CAN network drivers is still an open
609 issue that has not been finalized in the socketcan project. Also the 645 issue that has not been finalized in the socketcan project. Also the
diff --git a/Documentation/networking/multiqueue.txt b/Documentation/networking/multiqueue.txt
index d391ea631141..4caa0e314cc2 100644
--- a/Documentation/networking/multiqueue.txt
+++ b/Documentation/networking/multiqueue.txt
@@ -24,4 +24,56 @@ netif_{start|stop|wake}_subqueue() functions to manage each queue while the
24device is still operational. netdev->queue_lock is still used when the device 24device is still operational. netdev->queue_lock is still used when the device
25comes online or when it's completely shut down (unregister_netdev(), etc.). 25comes online or when it's completely shut down (unregister_netdev(), etc.).
26 26
27Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com> 27
28Section 2: Qdisc support for multiqueue devices
29
30-----------------------------------------------
31
32Currently two qdiscs are optimized for multiqueue devices. The first is the
33default pfifo_fast qdisc. This qdisc supports one qdisc per hardware queue.
34A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The
35qdisc is responsible for classifying the skb's and then directing the skb's to
36bands and queues based on the value in skb->queue_mapping. Use this field in
37the base driver to determine which queue to send the skb to.
38
39sch_multiq has been added for hardware that wishes to avoid head-of-line
40blocking. It will cycle though the bands and verify that the hardware queue
41associated with the band is not stopped prior to dequeuing a packet.
42
43On qdisc load, the number of bands is based on the number of queues on the
44hardware. Once the association is made, any skb with skb->queue_mapping set,
45will be queued to the band associated with the hardware queue.
46
47
48Section 3: Brief howto using MULTIQ for multiqueue devices
49---------------------------------------------------------------
50
51The userspace command 'tc,' part of the iproute2 package, is used to configure
52qdiscs. To add the MULTIQ qdisc to your network device, assuming the device
53is called eth0, run the following command:
54
55# tc qdisc add dev eth0 root handle 1: multiq
56
57The qdisc will allocate the number of bands to equal the number of queues that
58the device reports, and bring the qdisc online. Assuming eth0 has 4 Tx
59queues, the band mapping would look like:
60
61band 0 => queue 0
62band 1 => queue 1
63band 2 => queue 2
64band 3 => queue 3
65
66Traffic will begin flowing through each queue based on either the simple_tx_hash
67function or based on netdev->select_queue() if you have it defined.
68
69The behavior of tc filters remains the same. However a new tc action,
70skbedit, has been added. Assuming you wanted to route all traffic to a
71specific host, for example 192.168.0.3, through a specific queue you could use
72this action and establish a filter such as:
73
74tc filter add dev eth0 parent 1: protocol ip prio 1 u32 \
75 match ip dst 192.168.0.3 \
76 action skbedit queue_mapping 3
77
78Author: Alexander Duyck <alexander.h.duyck@intel.com>
79Original Author: Peter P. Waskiewicz Jr. <peter.p.waskiewicz.jr@intel.com>
diff --git a/Documentation/networking/phonet.txt b/Documentation/networking/phonet.txt
new file mode 100644
index 000000000000..57d3e59edb13
--- /dev/null
+++ b/Documentation/networking/phonet.txt
@@ -0,0 +1,121 @@
1Linux Phonet protocol family
2============================
3
4Introduction
5------------
6
7Phonet is a packet protocol used by Nokia cellular modems for both IPC
8and RPC. With the Linux Phonet socket family, Linux host processes can
9receive and send messages from/to the modem, or any other external
10device attached to the modem. The modem takes care of routing.
11
12Phonet packets can be exchanged through various hardware connections
13depending on the device, such as:
14 - USB with the CDC Phonet interface,
15 - infrared,
16 - Bluetooth,
17 - an RS232 serial port (with a dedicated "FBUS" line discipline),
18 - the SSI bus with some TI OMAP processors.
19
20
21Packets format
22--------------
23
24Phonet packets have a common header as follows:
25
26 struct phonethdr {
27 uint8_t pn_media; /* Media type (link-layer identifier) */
28 uint8_t pn_rdev; /* Receiver device ID */
29 uint8_t pn_sdev; /* Sender device ID */
30 uint8_t pn_res; /* Resource ID or function */
31 uint16_t pn_length; /* Big-endian message byte length (minus 6) */
32 uint8_t pn_robj; /* Receiver object ID */
33 uint8_t pn_sobj; /* Sender object ID */
34 };
35
36On Linux, the link-layer header includes the pn_media byte (see below).
37The next 7 bytes are part of the network-layer header.
38
39The device ID is split: the 6 higher-order bits consitute the device
40address, while the 2 lower-order bits are used for multiplexing, as are
41the 8-bit object identifiers. As such, Phonet can be considered as a
42network layer with 6 bits of address space and 10 bits for transport
43protocol (much like port numbers in IP world).
44
45The modem always has address number zero. All other device have a their
46own 6-bit address.
47
48
49Link layer
50----------
51
52Phonet links are always point-to-point links. The link layer header
53consists of a single Phonet media type byte. It uniquely identifies the
54link through which the packet is transmitted, from the modem's
55perspective. Each Phonet network device shall prepend and set the media
56type byte as appropriate. For convenience, a common phonet_header_ops
57link-layer header operations structure is provided. It sets the
58media type according to the network device hardware address.
59
60Linux Phonet network interfaces support a dedicated link layer packets
61type (ETH_P_PHONET) which is out of the Ethernet type range. They can
62only send and receive Phonet packets.
63
64The virtual TUN tunnel device driver can also be used for Phonet. This
65requires IFF_TUN mode, _without_ the IFF_NO_PI flag. In this case,
66there is no link-layer header, so there is no Phonet media type byte.
67
68Note that Phonet interfaces are not allowed to re-order packets, so
69only the (default) Linux FIFO qdisc should be used with them.
70
71
72Network layer
73-------------
74
75The Phonet socket address family maps the Phonet packet header:
76
77 struct sockaddr_pn {
78 sa_family_t spn_family; /* AF_PHONET */
79 uint8_t spn_obj; /* Object ID */
80 uint8_t spn_dev; /* Device ID */
81 uint8_t spn_resource; /* Resource or function */
82 uint8_t spn_zero[...]; /* Padding */
83 };
84
85The resource field is only used when sending and receiving;
86It is ignored by bind() and getsockname().
87
88
89Low-level datagram protocol
90---------------------------
91
92Applications can send Phonet messages using the Phonet datagram socket
93protocol from the PF_PHONET family. Each socket is bound to one of the
942^10 object IDs available, and can send and receive packets with any
95other peer.
96
97 struct sockaddr_pn addr = { .spn_family = AF_PHONET, };
98 ssize_t len;
99 socklen_t addrlen = sizeof(addr);
100 int fd;
101
102 fd = socket(PF_PHONET, SOCK_DGRAM, 0);
103 bind(fd, (struct sockaddr *)&addr, sizeof(addr));
104 /* ... */
105
106 sendto(fd, msg, msglen, 0, (struct sockaddr *)&addr, sizeof(addr));
107 len = recvfrom(fd, buf, sizeof(buf), 0,
108 (struct sockaddr *)&addr, &addrlen);
109
110This protocol follows the SOCK_DGRAM connection-less semantics.
111However, connect() and getpeername() are not supported, as they did
112not seem useful with Phonet usages (could be added easily).
113
114
115Authors
116-------
117
118Linux Phonet was initially written by Sakari Ailus.
119Other contributors include Mikä Liljeberg, Andras Domokos,
120Carlos Chinea and Rémi Denis-Courmont.
121Copyright (C) 2008 Nokia Corporation.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
new file mode 100644
index 000000000000..a96989a8ff35
--- /dev/null
+++ b/Documentation/networking/regulatory.txt
@@ -0,0 +1,194 @@
1Linux wireless regulatory documentation
2---------------------------------------
3
4This document gives a brief review over how the Linux wireless
5regulatory infrastructure works.
6
7More up to date information can be obtained at the project's web page:
8
9http://wireless.kernel.org/en/developers/Regulatory
10
11Keeping regulatory domains in userspace
12---------------------------------------
13
14Due to the dynamic nature of regulatory domains we keep them
15in userspace and provide a framework for userspace to upload
16to the kernel one regulatory domain to be used as the central
17core regulatory domain all wireless devices should adhere to.
18
19How to get regulatory domains to the kernel
20-------------------------------------------
21
22Userspace gets a regulatory domain in the kernel by having
23a userspace agent build it and send it via nl80211. Only
24expected regulatory domains will be respected by the kernel.
25
26A currently available userspace agent which can accomplish this
27is CRDA - central regulatory domain agent. Its documented here:
28
29http://wireless.kernel.org/en/developers/Regulatory/CRDA
30
31Essentially the kernel will send a udev event when it knows
32it needs a new regulatory domain. A udev rule can be put in place
33to trigger crda to send the respective regulatory domain for a
34specific ISO/IEC 3166 alpha2.
35
36Below is an example udev rule which can be used:
37
38# Example file, should be put in /etc/udev/rules.d/regulatory.rules
39KERNEL=="regulatory*", ACTION=="change", SUBSYSTEM=="platform", RUN+="/sbin/crda"
40
41The alpha2 is passed as an environment variable under the variable COUNTRY.
42
43Who asks for regulatory domains?
44--------------------------------
45
46* Users
47
48Users can use iw:
49
50http://wireless.kernel.org/en/users/Documentation/iw
51
52An example:
53
54 # set regulatory domain to "Costa Rica"
55 iw reg set CR
56
57This will request the kernel to set the regulatory domain to
58the specificied alpha2. The kernel in turn will then ask userspace
59to provide a regulatory domain for the alpha2 specified by the user
60by sending a uevent.
61
62* Wireless subsystems for Country Information elements
63
64The kernel will send a uevent to inform userspace a new
65regulatory domain is required. More on this to be added
66as its integration is added.
67
68* Drivers
69
70If drivers determine they need a specific regulatory domain
71set they can inform the wireless core using regulatory_hint().
72They have two options -- they either provide an alpha2 so that
73crda can provide back a regulatory domain for that country or
74they can build their own regulatory domain based on internal
75custom knowledge so the wireless core can respect it.
76
77*Most* drivers will rely on the first mechanism of providing a
78regulatory hint with an alpha2. For these drivers there is an additional
79check that can be used to ensure compliance based on custom EEPROM
80regulatory data. This additional check can be used by drivers by
81registering on its struct wiphy a reg_notifier() callback. This notifier
82is called when the core's regulatory domain has been changed. The driver
83can use this to review the changes made and also review who made them
84(driver, user, country IE) and determine what to allow based on its
85internal EEPROM data. Devices drivers wishing to be capable of world
86roaming should use this callback. More on world roaming will be
87added to this document when its support is enabled.
88
89Device drivers who provide their own built regulatory domain
90do not need a callback as the channels registered by them are
91the only ones that will be allowed and therefore *additional*
92cannels cannot be enabled.
93
94Example code - drivers hinting an alpha2:
95------------------------------------------
96
97This example comes from the zd1211rw device driver. You can start
98by having a mapping of your device's EEPROM country/regulatory
99domain value to to a specific alpha2 as follows:
100
101static struct zd_reg_alpha2_map reg_alpha2_map[] = {
102 { ZD_REGDOMAIN_FCC, "US" },
103 { ZD_REGDOMAIN_IC, "CA" },
104 { ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
105 { ZD_REGDOMAIN_JAPAN, "JP" },
106 { ZD_REGDOMAIN_JAPAN_ADD, "JP" },
107 { ZD_REGDOMAIN_SPAIN, "ES" },
108 { ZD_REGDOMAIN_FRANCE, "FR" },
109
110Then you can define a routine to map your read EEPROM value to an alpha2,
111as follows:
112
113static int zd_reg2alpha2(u8 regdomain, char *alpha2)
114{
115 unsigned int i;
116 struct zd_reg_alpha2_map *reg_map;
117 for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) {
118 reg_map = &reg_alpha2_map[i];
119 if (regdomain == reg_map->reg) {
120 alpha2[0] = reg_map->alpha2[0];
121 alpha2[1] = reg_map->alpha2[1];
122 return 0;
123 }
124 }
125 return 1;
126}
127
128Lastly, you can then hint to the core of your discovered alpha2, if a match
129was found. You need to do this after you have registered your wiphy. You
130are expected to do this during initialization.
131
132 r = zd_reg2alpha2(mac->regdomain, alpha2);
133 if (!r)
134 regulatory_hint(hw->wiphy, alpha2, NULL);
135
136Example code - drivers providing a built in regulatory domain:
137--------------------------------------------------------------
138
139If you have regulatory information you can obtain from your
140driver and you *need* to use this we let you build a regulatory domain
141structure and pass it to the wireless core. To do this you should
142kmalloc() a structure big enough to hold your regulatory domain
143structure and you should then fill it with your data. Finally you simply
144call regulatory_hint() with the regulatory domain structure in it.
145
146Bellow is a simple example, with a regulatory domain cached using the stack.
147Your implementation may vary (read EEPROM cache instead, for example).
148
149Example cache of some regulatory domain
150
151struct ieee80211_regdomain mydriver_jp_regdom = {
152 .n_reg_rules = 3,
153 .alpha2 = "JP",
154 //.alpha2 = "99", /* If I have no alpha2 to map it to */
155 .reg_rules = {
156 /* IEEE 802.11b/g, channels 1..14 */
157 REG_RULE(2412-20, 2484+20, 40, 6, 20, 0),
158 /* IEEE 802.11a, channels 34..48 */
159 REG_RULE(5170-20, 5240+20, 40, 6, 20,
160 NL80211_RRF_PASSIVE_SCAN),
161 /* IEEE 802.11a, channels 52..64 */
162 REG_RULE(5260-20, 5320+20, 40, 6, 20,
163 NL80211_RRF_NO_IBSS |
164 NL80211_RRF_DFS),
165 }
166};
167
168Then in some part of your code after your wiphy has been registered:
169
170 int r;
171 struct ieee80211_regdomain *rd;
172 int size_of_regd;
173 int num_rules = mydriver_jp_regdom.n_reg_rules;
174 unsigned int i;
175
176 size_of_regd = sizeof(struct ieee80211_regdomain) +
177 (num_rules * sizeof(struct ieee80211_reg_rule));
178
179 rd = kzalloc(size_of_regd, GFP_KERNEL);
180 if (!rd)
181 return -ENOMEM;
182
183 memcpy(rd, &mydriver_jp_regdom, sizeof(struct ieee80211_regdomain));
184
185 for (i=0; i < num_rules; i++) {
186 memcpy(&rd->reg_rules[i], &mydriver_jp_regdom.reg_rules[i],
187 sizeof(struct ieee80211_reg_rule));
188 }
189 r = regulatory_hint(hw->wiphy, NULL, rd);
190 if (r) {
191 kfree(rd);
192 return r;
193 }
194
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index 6fcb3060dec5..b65f0799df48 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -341,6 +341,8 @@ key that does nothing by itself, as well as any hot key that is type-specific
3413.1 Guidelines for wireless device drivers 3413.1 Guidelines for wireless device drivers
342------------------------------------------ 342------------------------------------------
343 343
344(in this text, rfkill->foo means the foo field of struct rfkill).
345
3441. Each independent transmitter in a wireless device (usually there is only one 3461. Each independent transmitter in a wireless device (usually there is only one
345transmitter per device) should have a SINGLE rfkill class attached to it. 347transmitter per device) should have a SINGLE rfkill class attached to it.
346 348
@@ -363,10 +365,32 @@ This rule exists because users of the rfkill subsystem expect to get (and set,
363when possible) the overall transmitter rfkill state, not of a particular rfkill 365when possible) the overall transmitter rfkill state, not of a particular rfkill
364line. 366line.
365 367
3665. During suspend, the rfkill class will attempt to soft-block the radio 3685. The wireless device driver MUST NOT leave the transmitter enabled during
367through a call to rfkill->toggle_radio, and will try to restore its previous 369suspend and hibernation unless:
368state during resume. After a rfkill class is suspended, it will *not* call 370
369rfkill->toggle_radio until it is resumed. 371 5.1. The transmitter has to be enabled for some sort of functionality
372 like wake-on-wireless-packet or autonomous packed forwarding in a mesh
373 network, and that functionality is enabled for this suspend/hibernation
374 cycle.
375
376AND
377
378 5.2. The device was not on a user-requested BLOCKED state before
379 the suspend (i.e. the driver must NOT unblock a device, not even
380 to support wake-on-wireless-packet or remain in the mesh).
381
382In other words, there is absolutely no allowed scenario where a driver can
383automatically take action to unblock a rfkill controller (obviously, this deals
384with scenarios where soft-blocking or both soft and hard blocking is happening.
385Scenarios where hardware rfkill lines are the only ones blocking the
386transmitter are outside of this rule, since the wireless device driver does not
387control its input hardware rfkill lines in the first place).
388
3896. During resume, rfkill will try to restore its previous state.
390
3917. After a rfkill class is suspended, it will *not* call rfkill->toggle_radio
392until it is resumed.
393
370 394
371Example of a WLAN wireless driver connected to the rfkill subsystem: 395Example of a WLAN wireless driver connected to the rfkill subsystem:
372-------------------------------------------------------------------- 396--------------------------------------------------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 3596d1782264..e6aa6aa789f5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1048,6 +1048,13 @@ L: cbe-oss-dev@ozlabs.org
1048W: http://www.ibm.com/developerworks/power/cell/ 1048W: http://www.ibm.com/developerworks/power/cell/
1049S: Supported 1049S: Supported
1050 1050
1051CISCO 10G ETHERNET DRIVER
1052P: Scott Feldman
1053M: scofeldm@cisco.com
1054P: Joe Eykholt
1055M: jeykholt@cisco.com
1056S: Supported
1057
1051CFAG12864B LCD DRIVER 1058CFAG12864B LCD DRIVER
1052P: Miguel Ojeda Sandonis 1059P: Miguel Ojeda Sandonis
1053M: miguel.ojeda.sandonis@gmail.com 1060M: miguel.ojeda.sandonis@gmail.com
@@ -2321,6 +2328,12 @@ L: video4linux-list@redhat.com
2321W: http://www.ivtvdriver.org 2328W: http://www.ivtvdriver.org
2322S: Maintained 2329S: Maintained
2323 2330
2331JME NETWORK DRIVER
2332P: Guo-Fu Tseng
2333M: cooldavid@cooldavid.org
2334L: netdev@vger.kernel.org
2335S: Maintained
2336
2324JOURNALLING FLASH FILE SYSTEM V2 (JFFS2) 2337JOURNALLING FLASH FILE SYSTEM V2 (JFFS2)
2325P: David Woodhouse 2338P: David Woodhouse
2326M: dwmw2@infradead.org 2339M: dwmw2@infradead.org
@@ -3385,6 +3398,13 @@ M: linux-driver@qlogic.com
3385L: netdev@vger.kernel.org 3398L: netdev@vger.kernel.org
3386S: Supported 3399S: Supported
3387 3400
3401QLOGIC QLGE 10Gb ETHERNET DRIVER
3402P: Ron Mercer
3403M: linux-driver@qlogic.com
3404M: ron.mercer@qlogic.com
3405L: netdev@vger.kernel.org
3406S: Supported
3407
3388QNX4 FILESYSTEM 3408QNX4 FILESYSTEM
3389P: Anders Larsen 3409P: Anders Larsen
3390M: al@alarsen.net 3410M: al@alarsen.net
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index 610fb24d8ae2..cd317795f355 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -25,7 +25,7 @@
25#include "common.h" 25#include "common.h"
26 26
27static struct mv643xx_eth_platform_data db88f6281_ge00_data = { 27static struct mv643xx_eth_platform_data db88f6281_ge00_data = {
28 .phy_addr = 8, 28 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
29}; 29};
30 30
31static struct mv_sata_platform_data db88f6281_sata_data = { 31static struct mv_sata_platform_data db88f6281_sata_data = {
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index a3012d445971..b1d1a87a6821 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -30,7 +30,7 @@
30#define RD88F6192_GPIO_USB_VBUS 10 30#define RD88F6192_GPIO_USB_VBUS 10
31 31
32static struct mv643xx_eth_platform_data rd88f6192_ge00_data = { 32static struct mv643xx_eth_platform_data rd88f6192_ge00_data = {
33 .phy_addr = 8, 33 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
34}; 34};
35 35
36static struct mv_sata_platform_data rd88f6192_sata_data = { 36static struct mv_sata_platform_data rd88f6192_sata_data = {
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index d96487a0f18b..b6416615c0b9 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -69,7 +69,7 @@ static struct platform_device rd88f6281_nand_flash = {
69}; 69};
70 70
71static struct mv643xx_eth_platform_data rd88f6281_ge00_data = { 71static struct mv643xx_eth_platform_data rd88f6281_ge00_data = {
72 .phy_addr = -1, 72 .phy_addr = MV643XX_ETH_PHY_NONE,
73 .speed = SPEED_1000, 73 .speed = SPEED_1000,
74 .duplex = DUPLEX_FULL, 74 .duplex = DUPLEX_FULL,
75}; 75};
diff --git a/arch/arm/mach-loki/lb88rc8480-setup.c b/arch/arm/mach-loki/lb88rc8480-setup.c
index 2cc9ac9b488f..85f9c1296aa0 100644
--- a/arch/arm/mach-loki/lb88rc8480-setup.c
+++ b/arch/arm/mach-loki/lb88rc8480-setup.c
@@ -67,7 +67,7 @@ static struct platform_device lb88rc8480_boot_flash = {
67}; 67};
68 68
69static struct mv643xx_eth_platform_data lb88rc8480_ge0_data = { 69static struct mv643xx_eth_platform_data lb88rc8480_ge0_data = {
70 .phy_addr = 1, 70 .phy_addr = MV643XX_ETH_PHY_ADDR(1),
71 .mac_addr = { 0x00, 0x50, 0x43, 0x11, 0x22, 0x33 }, 71 .mac_addr = { 0x00, 0x50, 0x43, 0x11, 0x22, 0x33 },
72}; 72};
73 73
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 953a26c469cb..5842d3bb02b2 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -330,6 +330,7 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
330struct mv643xx_eth_shared_platform_data mv78xx0_ge01_shared_data = { 330struct mv643xx_eth_shared_platform_data mv78xx0_ge01_shared_data = {
331 .t_clk = 0, 331 .t_clk = 0,
332 .dram = &mv78xx0_mbus_dram_info, 332 .dram = &mv78xx0_mbus_dram_info,
333 .shared_smi = &mv78xx0_ge00_shared,
333}; 334};
334 335
335static struct resource mv78xx0_ge01_shared_resources[] = { 336static struct resource mv78xx0_ge01_shared_resources[] = {
@@ -370,7 +371,6 @@ static struct platform_device mv78xx0_ge01 = {
370void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data) 371void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
371{ 372{
372 eth_data->shared = &mv78xx0_ge01_shared; 373 eth_data->shared = &mv78xx0_ge01_shared;
373 eth_data->shared_smi = &mv78xx0_ge00_shared;
374 mv78xx0_ge01.dev.platform_data = eth_data; 374 mv78xx0_ge01.dev.platform_data = eth_data;
375 375
376 platform_device_register(&mv78xx0_ge01_shared); 376 platform_device_register(&mv78xx0_ge01_shared);
@@ -384,6 +384,7 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
384struct mv643xx_eth_shared_platform_data mv78xx0_ge10_shared_data = { 384struct mv643xx_eth_shared_platform_data mv78xx0_ge10_shared_data = {
385 .t_clk = 0, 385 .t_clk = 0,
386 .dram = &mv78xx0_mbus_dram_info, 386 .dram = &mv78xx0_mbus_dram_info,
387 .shared_smi = &mv78xx0_ge00_shared,
387}; 388};
388 389
389static struct resource mv78xx0_ge10_shared_resources[] = { 390static struct resource mv78xx0_ge10_shared_resources[] = {
@@ -424,7 +425,6 @@ static struct platform_device mv78xx0_ge10 = {
424void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data) 425void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
425{ 426{
426 eth_data->shared = &mv78xx0_ge10_shared; 427 eth_data->shared = &mv78xx0_ge10_shared;
427 eth_data->shared_smi = &mv78xx0_ge00_shared;
428 mv78xx0_ge10.dev.platform_data = eth_data; 428 mv78xx0_ge10.dev.platform_data = eth_data;
429 429
430 platform_device_register(&mv78xx0_ge10_shared); 430 platform_device_register(&mv78xx0_ge10_shared);
@@ -438,6 +438,7 @@ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
438struct mv643xx_eth_shared_platform_data mv78xx0_ge11_shared_data = { 438struct mv643xx_eth_shared_platform_data mv78xx0_ge11_shared_data = {
439 .t_clk = 0, 439 .t_clk = 0,
440 .dram = &mv78xx0_mbus_dram_info, 440 .dram = &mv78xx0_mbus_dram_info,
441 .shared_smi = &mv78xx0_ge00_shared,
441}; 442};
442 443
443static struct resource mv78xx0_ge11_shared_resources[] = { 444static struct resource mv78xx0_ge11_shared_resources[] = {
@@ -478,7 +479,6 @@ static struct platform_device mv78xx0_ge11 = {
478void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data) 479void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
479{ 480{
480 eth_data->shared = &mv78xx0_ge11_shared; 481 eth_data->shared = &mv78xx0_ge11_shared;
481 eth_data->shared_smi = &mv78xx0_ge00_shared;
482 mv78xx0_ge11.dev.platform_data = eth_data; 482 mv78xx0_ge11.dev.platform_data = eth_data;
483 483
484 platform_device_register(&mv78xx0_ge11_shared); 484 platform_device_register(&mv78xx0_ge11_shared);
diff --git a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
index a2d0c9783604..49f434c39eb7 100644
--- a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
+++ b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
@@ -19,19 +19,19 @@
19#include "common.h" 19#include "common.h"
20 20
21static struct mv643xx_eth_platform_data db78x00_ge00_data = { 21static struct mv643xx_eth_platform_data db78x00_ge00_data = {
22 .phy_addr = 8, 22 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
23}; 23};
24 24
25static struct mv643xx_eth_platform_data db78x00_ge01_data = { 25static struct mv643xx_eth_platform_data db78x00_ge01_data = {
26 .phy_addr = 9, 26 .phy_addr = MV643XX_ETH_PHY_ADDR(9),
27}; 27};
28 28
29static struct mv643xx_eth_platform_data db78x00_ge10_data = { 29static struct mv643xx_eth_platform_data db78x00_ge10_data = {
30 .phy_addr = -1, 30 .phy_addr = MV643XX_ETH_PHY_NONE,
31}; 31};
32 32
33static struct mv643xx_eth_platform_data db78x00_ge11_data = { 33static struct mv643xx_eth_platform_data db78x00_ge11_data = {
34 .phy_addr = -1, 34 .phy_addr = MV643XX_ETH_PHY_NONE,
35}; 35};
36 36
37static struct mv_sata_platform_data db78x00_sata_data = { 37static struct mv_sata_platform_data db78x00_sata_data = {
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c
index ff13e9060b18..d318bea2af91 100644
--- a/arch/arm/mach-orion5x/db88f5281-setup.c
+++ b/arch/arm/mach-orion5x/db88f5281-setup.c
@@ -285,7 +285,7 @@ subsys_initcall(db88f5281_pci_init);
285 * Ethernet 285 * Ethernet
286 ****************************************************************************/ 286 ****************************************************************************/
287static struct mv643xx_eth_platform_data db88f5281_eth_data = { 287static struct mv643xx_eth_platform_data db88f5281_eth_data = {
288 .phy_addr = 8, 288 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
289}; 289};
290 290
291/***************************************************************************** 291/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index b38c65ccfb15..3e66098340a5 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -79,7 +79,7 @@ subsys_initcall(dns323_pci_init);
79 */ 79 */
80 80
81static struct mv643xx_eth_platform_data dns323_eth_data = { 81static struct mv643xx_eth_platform_data dns323_eth_data = {
82 .phy_addr = 8, 82 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
83}; 83};
84 84
85/**************************************************************************** 85/****************************************************************************
diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c
index e321ec331839..610f2a6297f8 100644
--- a/arch/arm/mach-orion5x/kurobox_pro-setup.c
+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c
@@ -161,7 +161,7 @@ subsys_initcall(kurobox_pro_pci_init);
161 ****************************************************************************/ 161 ****************************************************************************/
162 162
163static struct mv643xx_eth_platform_data kurobox_pro_eth_data = { 163static struct mv643xx_eth_platform_data kurobox_pro_eth_data = {
164 .phy_addr = 8, 164 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
165}; 165};
166 166
167/***************************************************************************** 167/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/mss2-setup.c b/arch/arm/mach-orion5x/mss2-setup.c
index 53ff1893b883..68acca98e638 100644
--- a/arch/arm/mach-orion5x/mss2-setup.c
+++ b/arch/arm/mach-orion5x/mss2-setup.c
@@ -109,7 +109,7 @@ subsys_initcall(mss2_pci_init);
109 ****************************************************************************/ 109 ****************************************************************************/
110 110
111static struct mv643xx_eth_platform_data mss2_eth_data = { 111static struct mv643xx_eth_platform_data mss2_eth_data = {
112 .phy_addr = 8, 112 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
113}; 113};
114 114
115/***************************************************************************** 115/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c
index 978d4d599396..97c9ccb2ac60 100644
--- a/arch/arm/mach-orion5x/mv2120-setup.c
+++ b/arch/arm/mach-orion5x/mv2120-setup.c
@@ -39,7 +39,7 @@
39 * Ethernet 39 * Ethernet
40 ****************************************************************************/ 40 ****************************************************************************/
41static struct mv643xx_eth_platform_data mv2120_eth_data = { 41static struct mv643xx_eth_platform_data mv2120_eth_data = {
42 .phy_addr = 8, 42 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
43}; 43};
44 44
45static struct mv_sata_platform_data mv2120_sata_data = { 45static struct mv_sata_platform_data mv2120_sata_data = {
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index e72fe1e065e8..500cdadaf09c 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -88,7 +88,7 @@ static struct orion5x_mpp_mode rd88f5181l_fxo_mpp_modes[] __initdata = {
88}; 88};
89 89
90static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = { 90static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = {
91 .phy_addr = -1, 91 .phy_addr = MV643XX_ETH_PHY_NONE,
92 .speed = SPEED_1000, 92 .speed = SPEED_1000,
93 .duplex = DUPLEX_FULL, 93 .duplex = DUPLEX_FULL,
94}; 94};
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index a1fe3257320d..ebde81416499 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -89,7 +89,7 @@ static struct orion5x_mpp_mode rd88f5181l_ge_mpp_modes[] __initdata = {
89}; 89};
90 90
91static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = { 91static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = {
92 .phy_addr = -1, 92 .phy_addr = MV643XX_ETH_PHY_NONE,
93 .speed = SPEED_1000, 93 .speed = SPEED_1000,
94 .duplex = DUPLEX_FULL, 94 .duplex = DUPLEX_FULL,
95}; 95};
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c
index 4c3bcd76ac85..a04f9e4b633a 100644
--- a/arch/arm/mach-orion5x/rd88f5182-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c
@@ -221,7 +221,7 @@ subsys_initcall(rd88f5182_pci_init);
221 ****************************************************************************/ 221 ****************************************************************************/
222 222
223static struct mv643xx_eth_platform_data rd88f5182_eth_data = { 223static struct mv643xx_eth_platform_data rd88f5182_eth_data = {
224 .phy_addr = 8, 224 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
225}; 225};
226 226
227/***************************************************************************** 227/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index ae0a5dccd2a1..1368e9fd1a06 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -103,8 +103,7 @@ static struct platform_device ts78xx_nor_boot_flash = {
103 * Ethernet 103 * Ethernet
104 ****************************************************************************/ 104 ****************************************************************************/
105static struct mv643xx_eth_platform_data ts78xx_eth_data = { 105static struct mv643xx_eth_platform_data ts78xx_eth_data = {
106 .phy_addr = 0, 106 .phy_addr = MV643XX_ETH_PHY_ADDR(0),
107 .force_phy_addr = 1,
108}; 107};
109 108
110/***************************************************************************** 109/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
index 83feac3147a6..19cde24fbfdf 100644
--- a/arch/arm/mach-orion5x/tsx09-common.c
+++ b/arch/arm/mach-orion5x/tsx09-common.c
@@ -48,7 +48,7 @@ void qnap_tsx09_power_off(void)
48 ****************************************************************************/ 48 ****************************************************************************/
49 49
50struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { 50struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
51 .phy_addr = 8, 51 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
52}; 52};
53 53
54static int __init qnap_tsx09_parse_hex_nibble(char n) 54static int __init qnap_tsx09_parse_hex_nibble(char n)
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index b6bc43e07eed..7ddc22c2bb54 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -92,7 +92,7 @@ static struct platform_device wnr854t_nor_flash = {
92}; 92};
93 93
94static struct mv643xx_eth_platform_data wnr854t_eth_data = { 94static struct mv643xx_eth_platform_data wnr854t_eth_data = {
95 .phy_addr = -1, 95 .phy_addr = MV643XX_ETH_PHY_NONE,
96 .speed = SPEED_1000, 96 .speed = SPEED_1000,
97 .duplex = DUPLEX_FULL, 97 .duplex = DUPLEX_FULL,
98}; 98};
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index b10da17b3fbd..9a4fd5256462 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -100,7 +100,7 @@ static struct platform_device wrt350n_v2_nor_flash = {
100}; 100};
101 101
102static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = { 102static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = {
103 .phy_addr = -1, 103 .phy_addr = MV643XX_ETH_PHY_NONE,
104 .speed = SPEED_1000, 104 .speed = SPEED_1000,
105 .duplex = DUPLEX_FULL, 105 .duplex = DUPLEX_FULL,
106}; 106};
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 32e0ad0ebea8..b6bd775d2e22 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -293,10 +293,8 @@ static int __init mv64x60_eth_device_setup(struct device_node *np, int id,
293 return -ENODEV; 293 return -ENODEV;
294 294
295 prop = of_get_property(phy, "reg", NULL); 295 prop = of_get_property(phy, "reg", NULL);
296 if (prop) { 296 if (prop)
297 pdata.force_phy_addr = 1; 297 pdata.phy_addr = MV643XX_ETH_PHY_ADDR(*prop);
298 pdata.phy_addr = *prop;
299 }
300 298
301 of_node_put(phy); 299 of_node_put(phy);
302 300
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 735f5ea17473..12cf5d491f0d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -260,6 +260,9 @@ config ACPI_ASUS
260config ACPI_TOSHIBA 260config ACPI_TOSHIBA
261 tristate "Toshiba Laptop Extras" 261 tristate "Toshiba Laptop Extras"
262 depends on X86 262 depends on X86
263 select INPUT_POLLDEV
264 select NET
265 select RFKILL
263 select BACKLIGHT_CLASS_DEVICE 266 select BACKLIGHT_CLASS_DEVICE
264 ---help--- 267 ---help---
265 This driver adds support for access to certain system settings 268 This driver adds support for access to certain system settings
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index 0a43c8e0eff3..8a649f40d162 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * 4 *
5 * Copyright (C) 2002-2004 John Belmonte 5 * Copyright (C) 2002-2004 John Belmonte
6 * Copyright (C) 2008 Philip Langdale
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -33,7 +34,7 @@
33 * 34 *
34 */ 35 */
35 36
36#define TOSHIBA_ACPI_VERSION "0.18" 37#define TOSHIBA_ACPI_VERSION "0.19"
37#define PROC_INTERFACE_VERSION 1 38#define PROC_INTERFACE_VERSION 1
38 39
39#include <linux/kernel.h> 40#include <linux/kernel.h>
@@ -42,6 +43,9 @@
42#include <linux/types.h> 43#include <linux/types.h>
43#include <linux/proc_fs.h> 44#include <linux/proc_fs.h>
44#include <linux/backlight.h> 45#include <linux/backlight.h>
46#include <linux/platform_device.h>
47#include <linux/rfkill.h>
48#include <linux/input-polldev.h>
45 49
46#include <asm/uaccess.h> 50#include <asm/uaccess.h>
47 51
@@ -90,6 +94,7 @@ MODULE_LICENSE("GPL");
90#define HCI_VIDEO_OUT 0x001c 94#define HCI_VIDEO_OUT 0x001c
91#define HCI_HOTKEY_EVENT 0x001e 95#define HCI_HOTKEY_EVENT 0x001e
92#define HCI_LCD_BRIGHTNESS 0x002a 96#define HCI_LCD_BRIGHTNESS 0x002a
97#define HCI_WIRELESS 0x0056
93 98
94/* field definitions */ 99/* field definitions */
95#define HCI_LCD_BRIGHTNESS_BITS 3 100#define HCI_LCD_BRIGHTNESS_BITS 3
@@ -98,9 +103,14 @@ MODULE_LICENSE("GPL");
98#define HCI_VIDEO_OUT_LCD 0x1 103#define HCI_VIDEO_OUT_LCD 0x1
99#define HCI_VIDEO_OUT_CRT 0x2 104#define HCI_VIDEO_OUT_CRT 0x2
100#define HCI_VIDEO_OUT_TV 0x4 105#define HCI_VIDEO_OUT_TV 0x4
106#define HCI_WIRELESS_KILL_SWITCH 0x01
107#define HCI_WIRELESS_BT_PRESENT 0x0f
108#define HCI_WIRELESS_BT_ATTACH 0x40
109#define HCI_WIRELESS_BT_POWER 0x80
101 110
102static const struct acpi_device_id toshiba_device_ids[] = { 111static const struct acpi_device_id toshiba_device_ids[] = {
103 {"TOS6200", 0}, 112 {"TOS6200", 0},
113 {"TOS6208", 0},
104 {"TOS1900", 0}, 114 {"TOS1900", 0},
105 {"", 0}, 115 {"", 0},
106}; 116};
@@ -193,7 +203,7 @@ static acpi_status hci_raw(const u32 in[HCI_WORDS], u32 out[HCI_WORDS])
193 return status; 203 return status;
194} 204}
195 205
196/* common hci tasks (get or set one value) 206/* common hci tasks (get or set one or two value)
197 * 207 *
198 * In addition to the ACPI status, the HCI system returns a result which 208 * In addition to the ACPI status, the HCI system returns a result which
199 * may be useful (such as "not supported"). 209 * may be useful (such as "not supported").
@@ -218,6 +228,152 @@ static acpi_status hci_read1(u32 reg, u32 * out1, u32 * result)
218 return status; 228 return status;
219} 229}
220 230
231static acpi_status hci_write2(u32 reg, u32 in1, u32 in2, u32 *result)
232{
233 u32 in[HCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 };
234 u32 out[HCI_WORDS];
235 acpi_status status = hci_raw(in, out);
236 *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
237 return status;
238}
239
240static acpi_status hci_read2(u32 reg, u32 *out1, u32 *out2, u32 *result)
241{
242 u32 in[HCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
243 u32 out[HCI_WORDS];
244 acpi_status status = hci_raw(in, out);
245 *out1 = out[2];
246 *out2 = out[3];
247 *result = (status == AE_OK) ? out[0] : HCI_FAILURE;
248 return status;
249}
250
251struct toshiba_acpi_dev {
252 struct platform_device *p_dev;
253 struct rfkill *rfk_dev;
254 struct input_polled_dev *poll_dev;
255
256 const char *bt_name;
257 const char *rfk_name;
258
259 bool last_rfk_state;
260
261 struct mutex mutex;
262};
263
264static struct toshiba_acpi_dev toshiba_acpi = {
265 .bt_name = "Toshiba Bluetooth",
266 .rfk_name = "Toshiba RFKill Switch",
267 .last_rfk_state = false,
268};
269
270/* Bluetooth rfkill handlers */
271
272static u32 hci_get_bt_present(bool *present)
273{
274 u32 hci_result;
275 u32 value, value2;
276
277 value = 0;
278 value2 = 0;
279 hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
280 if (hci_result == HCI_SUCCESS)
281 *present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
282
283 return hci_result;
284}
285
286static u32 hci_get_bt_on(bool *on)
287{
288 u32 hci_result;
289 u32 value, value2;
290
291 value = 0;
292 value2 = 0x0001;
293 hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
294 if (hci_result == HCI_SUCCESS)
295 *on = (value & HCI_WIRELESS_BT_POWER) &&
296 (value & HCI_WIRELESS_BT_ATTACH);
297
298 return hci_result;
299}
300
301static u32 hci_get_radio_state(bool *radio_state)
302{
303 u32 hci_result;
304 u32 value, value2;
305
306 value = 0;
307 value2 = 0x0001;
308 hci_read2(HCI_WIRELESS, &value, &value2, &hci_result);
309
310 *radio_state = value & HCI_WIRELESS_KILL_SWITCH;
311 return hci_result;
312}
313
314static int bt_rfkill_toggle_radio(void *data, enum rfkill_state state)
315{
316 u32 result1, result2;
317 u32 value;
318 bool radio_state;
319 struct toshiba_acpi_dev *dev = data;
320
321 value = (state == RFKILL_STATE_UNBLOCKED);
322
323 if (hci_get_radio_state(&radio_state) != HCI_SUCCESS)
324 return -EFAULT;
325
326 switch (state) {
327 case RFKILL_STATE_UNBLOCKED:
328 if (!radio_state)
329 return -EPERM;
330 break;
331 case RFKILL_STATE_SOFT_BLOCKED:
332 break;
333 default:
334 return -EINVAL;
335 }
336
337 mutex_lock(&dev->mutex);
338 hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER, &result1);
339 hci_write2(HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH, &result2);
340 mutex_unlock(&dev->mutex);
341
342 if (result1 != HCI_SUCCESS || result2 != HCI_SUCCESS)
343 return -EFAULT;
344
345 return 0;
346}
347
348static void bt_poll_rfkill(struct input_polled_dev *poll_dev)
349{
350 bool state_changed;
351 bool new_rfk_state;
352 bool value;
353 u32 hci_result;
354 struct toshiba_acpi_dev *dev = poll_dev->private;
355
356 hci_result = hci_get_radio_state(&value);
357 if (hci_result != HCI_SUCCESS)
358 return; /* Can't do anything useful */
359
360 new_rfk_state = value;
361
362 mutex_lock(&dev->mutex);
363 state_changed = new_rfk_state != dev->last_rfk_state;
364 dev->last_rfk_state = new_rfk_state;
365 mutex_unlock(&dev->mutex);
366
367 if (unlikely(state_changed)) {
368 rfkill_force_state(dev->rfk_dev,
369 new_rfk_state ?
370 RFKILL_STATE_SOFT_BLOCKED :
371 RFKILL_STATE_HARD_BLOCKED);
372 input_report_switch(poll_dev->input, SW_RFKILL_ALL,
373 new_rfk_state);
374 }
375}
376
221static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ; 377static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
222static struct backlight_device *toshiba_backlight_device; 378static struct backlight_device *toshiba_backlight_device;
223static int force_fan; 379static int force_fan;
@@ -547,6 +703,14 @@ static struct backlight_ops toshiba_backlight_data = {
547 703
548static void toshiba_acpi_exit(void) 704static void toshiba_acpi_exit(void)
549{ 705{
706 if (toshiba_acpi.poll_dev) {
707 input_unregister_polled_device(toshiba_acpi.poll_dev);
708 input_free_polled_device(toshiba_acpi.poll_dev);
709 }
710
711 if (toshiba_acpi.rfk_dev)
712 rfkill_unregister(toshiba_acpi.rfk_dev);
713
550 if (toshiba_backlight_device) 714 if (toshiba_backlight_device)
551 backlight_device_unregister(toshiba_backlight_device); 715 backlight_device_unregister(toshiba_backlight_device);
552 716
@@ -555,6 +719,8 @@ static void toshiba_acpi_exit(void)
555 if (toshiba_proc_dir) 719 if (toshiba_proc_dir)
556 remove_proc_entry(PROC_TOSHIBA, acpi_root_dir); 720 remove_proc_entry(PROC_TOSHIBA, acpi_root_dir);
557 721
722 platform_device_unregister(toshiba_acpi.p_dev);
723
558 return; 724 return;
559} 725}
560 726
@@ -562,6 +728,10 @@ static int __init toshiba_acpi_init(void)
562{ 728{
563 acpi_status status = AE_OK; 729 acpi_status status = AE_OK;
564 u32 hci_result; 730 u32 hci_result;
731 bool bt_present;
732 bool bt_on;
733 bool radio_on;
734 int ret = 0;
565 735
566 if (acpi_disabled) 736 if (acpi_disabled)
567 return -ENODEV; 737 return -ENODEV;
@@ -578,6 +748,18 @@ static int __init toshiba_acpi_init(void)
578 TOSHIBA_ACPI_VERSION); 748 TOSHIBA_ACPI_VERSION);
579 printk(MY_INFO " HCI method: %s\n", method_hci); 749 printk(MY_INFO " HCI method: %s\n", method_hci);
580 750
751 mutex_init(&toshiba_acpi.mutex);
752
753 toshiba_acpi.p_dev = platform_device_register_simple("toshiba_acpi",
754 -1, NULL, 0);
755 if (IS_ERR(toshiba_acpi.p_dev)) {
756 ret = PTR_ERR(toshiba_acpi.p_dev);
757 printk(MY_ERR "unable to register platform device\n");
758 toshiba_acpi.p_dev = NULL;
759 toshiba_acpi_exit();
760 return ret;
761 }
762
581 force_fan = 0; 763 force_fan = 0;
582 key_event_valid = 0; 764 key_event_valid = 0;
583 765
@@ -586,19 +768,23 @@ static int __init toshiba_acpi_init(void)
586 768
587 toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir); 769 toshiba_proc_dir = proc_mkdir(PROC_TOSHIBA, acpi_root_dir);
588 if (!toshiba_proc_dir) { 770 if (!toshiba_proc_dir) {
589 status = AE_ERROR; 771 toshiba_acpi_exit();
772 return -ENODEV;
590 } else { 773 } else {
591 toshiba_proc_dir->owner = THIS_MODULE; 774 toshiba_proc_dir->owner = THIS_MODULE;
592 status = add_device(); 775 status = add_device();
593 if (ACPI_FAILURE(status)) 776 if (ACPI_FAILURE(status)) {
594 remove_proc_entry(PROC_TOSHIBA, acpi_root_dir); 777 toshiba_acpi_exit();
778 return -ENODEV;
779 }
595 } 780 }
596 781
597 toshiba_backlight_device = backlight_device_register("toshiba",NULL, 782 toshiba_backlight_device = backlight_device_register("toshiba",
783 &toshiba_acpi.p_dev->dev,
598 NULL, 784 NULL,
599 &toshiba_backlight_data); 785 &toshiba_backlight_data);
600 if (IS_ERR(toshiba_backlight_device)) { 786 if (IS_ERR(toshiba_backlight_device)) {
601 int ret = PTR_ERR(toshiba_backlight_device); 787 ret = PTR_ERR(toshiba_backlight_device);
602 788
603 printk(KERN_ERR "Could not register toshiba backlight device\n"); 789 printk(KERN_ERR "Could not register toshiba backlight device\n");
604 toshiba_backlight_device = NULL; 790 toshiba_backlight_device = NULL;
@@ -607,7 +793,66 @@ static int __init toshiba_acpi_init(void)
607 } 793 }
608 toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 794 toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
609 795
610 return (ACPI_SUCCESS(status)) ? 0 : -ENODEV; 796 /* Register rfkill switch for Bluetooth */
797 if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) {
798 toshiba_acpi.rfk_dev = rfkill_allocate(&toshiba_acpi.p_dev->dev,
799 RFKILL_TYPE_BLUETOOTH);
800 if (!toshiba_acpi.rfk_dev) {
801 printk(MY_ERR "unable to allocate rfkill device\n");
802 toshiba_acpi_exit();
803 return -ENOMEM;
804 }
805
806 toshiba_acpi.rfk_dev->name = toshiba_acpi.bt_name;
807 toshiba_acpi.rfk_dev->toggle_radio = bt_rfkill_toggle_radio;
808 toshiba_acpi.rfk_dev->user_claim_unsupported = 1;
809 toshiba_acpi.rfk_dev->data = &toshiba_acpi;
810
811 if (hci_get_bt_on(&bt_on) == HCI_SUCCESS && bt_on) {
812 toshiba_acpi.rfk_dev->state = RFKILL_STATE_UNBLOCKED;
813 } else if (hci_get_radio_state(&radio_on) == HCI_SUCCESS &&
814 radio_on) {
815 toshiba_acpi.rfk_dev->state = RFKILL_STATE_SOFT_BLOCKED;
816 } else {
817 toshiba_acpi.rfk_dev->state = RFKILL_STATE_HARD_BLOCKED;
818 }
819
820 ret = rfkill_register(toshiba_acpi.rfk_dev);
821 if (ret) {
822 printk(MY_ERR "unable to register rfkill device\n");
823 toshiba_acpi_exit();
824 return -ENOMEM;
825 }
826 }
827
828 /* Register input device for kill switch */
829 toshiba_acpi.poll_dev = input_allocate_polled_device();
830 if (!toshiba_acpi.poll_dev) {
831 printk(MY_ERR "unable to allocate kill-switch input device\n");
832 toshiba_acpi_exit();
833 return -ENOMEM;
834 }
835 toshiba_acpi.poll_dev->private = &toshiba_acpi;
836 toshiba_acpi.poll_dev->poll = bt_poll_rfkill;
837 toshiba_acpi.poll_dev->poll_interval = 1000; /* msecs */
838
839 toshiba_acpi.poll_dev->input->name = toshiba_acpi.rfk_name;
840 toshiba_acpi.poll_dev->input->id.bustype = BUS_HOST;
841 toshiba_acpi.poll_dev->input->id.vendor = 0x0930; /* Toshiba USB ID */
842 set_bit(EV_SW, toshiba_acpi.poll_dev->input->evbit);
843 set_bit(SW_RFKILL_ALL, toshiba_acpi.poll_dev->input->swbit);
844 input_report_switch(toshiba_acpi.poll_dev->input, SW_RFKILL_ALL, TRUE);
845
846 ret = input_register_polled_device(toshiba_acpi.poll_dev);
847 if (ret) {
848 printk(MY_ERR "unable to register kill-switch input device\n");
849 rfkill_free(toshiba_acpi.rfk_dev);
850 toshiba_acpi.rfk_dev = NULL;
851 toshiba_acpi_exit();
852 return ret;
853 }
854
855 return 0;
611} 856}
612 857
613module_init(toshiba_acpi_init); 858module_init(toshiba_acpi_init);
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 41b2204ebc6e..5503bfc8e132 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1270,7 +1270,7 @@ static int comp_tx(struct eni_dev *eni_dev,int *pcr,int reserved,int *pre,
1270 if (*pre < 3) (*pre)++; /* else fail later */ 1270 if (*pre < 3) (*pre)++; /* else fail later */
1271 div = pre_div[*pre]*-*pcr; 1271 div = pre_div[*pre]*-*pcr;
1272 DPRINTK("max div %d\n",div); 1272 DPRINTK("max div %d\n",div);
1273 *res = (TS_CLOCK+div-1)/div-1; 1273 *res = DIV_ROUND_UP(TS_CLOCK, div)-1;
1274 } 1274 }
1275 if (*res < 0) *res = 0; 1275 if (*res < 0) *res = 0;
1276 if (*res > MID_SEG_MAX_RATE) *res = MID_SEG_MAX_RATE; 1276 if (*res > MID_SEG_MAX_RATE) *res = MID_SEG_MAX_RATE;
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index c0ac728dc564..615412364e99 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -635,7 +635,7 @@ static int make_rate (const hrz_dev * dev, u32 c, rounding r,
635 // take care of rounding 635 // take care of rounding
636 switch (r) { 636 switch (r) {
637 case round_down: 637 case round_down:
638 pre = (br+(c<<div)-1)/(c<<div); 638 pre = DIV_ROUND_UP(br, c<<div);
639 // but p must be non-zero 639 // but p must be non-zero
640 if (!pre) 640 if (!pre)
641 pre = 1; 641 pre = 1;
@@ -668,7 +668,7 @@ static int make_rate (const hrz_dev * dev, u32 c, rounding r,
668 // take care of rounding 668 // take care of rounding
669 switch (r) { 669 switch (r) {
670 case round_down: 670 case round_down:
671 pre = (br+(c<<div)-1)/(c<<div); 671 pre = DIV_ROUND_UP(br, c<<div);
672 break; 672 break;
673 case round_nearest: 673 case round_nearest:
674 pre = (br+(c<<div)/2)/(c<<div); 674 pre = (br+(c<<div)/2)/(c<<div);
@@ -698,7 +698,7 @@ got_it:
698 if (bits) 698 if (bits)
699 *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1); 699 *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1);
700 if (actual) { 700 if (actual) {
701 *actual = (br + (pre<<div) - 1) / (pre<<div); 701 *actual = DIV_ROUND_UP(br, pre<<div);
702 PRINTD (DBG_QOS, "actual rate: %u", *actual); 702 PRINTD (DBG_QOS, "actual rate: %u", *actual);
703 } 703 }
704 return 0; 704 return 0;
@@ -1967,7 +1967,7 @@ static int __devinit hrz_init (hrz_dev * dev) {
1967 // Set the max AAL5 cell count to be just enough to contain the 1967 // Set the max AAL5 cell count to be just enough to contain the
1968 // largest AAL5 frame that the user wants to receive 1968 // largest AAL5 frame that the user wants to receive
1969 wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF, 1969 wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF,
1970 (max_rx_size + ATM_AAL5_TRAILER + ATM_CELL_PAYLOAD - 1) / ATM_CELL_PAYLOAD); 1970 DIV_ROUND_UP(max_rx_size + ATM_AAL5_TRAILER, ATM_CELL_PAYLOAD));
1971 1971
1972 // Enable receive 1972 // Enable receive
1973 wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE); 1973 wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 3a504e94a4d9..e33ae0025b12 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1114,11 +1114,8 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1114 1114
1115 rpp = &vc->rcv.rx_pool; 1115 rpp = &vc->rcv.rx_pool;
1116 1116
1117 __skb_queue_tail(&rpp->queue, skb);
1117 rpp->len += skb->len; 1118 rpp->len += skb->len;
1118 if (!rpp->count++)
1119 rpp->first = skb;
1120 *rpp->last = skb;
1121 rpp->last = &skb->next;
1122 1119
1123 if (stat & SAR_RSQE_EPDU) { 1120 if (stat & SAR_RSQE_EPDU) {
1124 unsigned char *l1l2; 1121 unsigned char *l1l2;
@@ -1145,7 +1142,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1145 atomic_inc(&vcc->stats->rx_err); 1142 atomic_inc(&vcc->stats->rx_err);
1146 return; 1143 return;
1147 } 1144 }
1148 if (rpp->count > 1) { 1145 if (skb_queue_len(&rpp->queue) > 1) {
1149 struct sk_buff *sb; 1146 struct sk_buff *sb;
1150 1147
1151 skb = dev_alloc_skb(rpp->len); 1148 skb = dev_alloc_skb(rpp->len);
@@ -1161,12 +1158,9 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1161 dev_kfree_skb(skb); 1158 dev_kfree_skb(skb);
1162 return; 1159 return;
1163 } 1160 }
1164 sb = rpp->first; 1161 skb_queue_walk(&rpp->queue, sb)
1165 for (i = 0; i < rpp->count; i++) {
1166 memcpy(skb_put(skb, sb->len), 1162 memcpy(skb_put(skb, sb->len),
1167 sb->data, sb->len); 1163 sb->data, sb->len);
1168 sb = sb->next;
1169 }
1170 1164
1171 recycle_rx_pool_skb(card, rpp); 1165 recycle_rx_pool_skb(card, rpp);
1172 1166
@@ -1180,7 +1174,6 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
1180 return; 1174 return;
1181 } 1175 }
1182 1176
1183 skb->next = NULL;
1184 flush_rx_pool(card, rpp); 1177 flush_rx_pool(card, rpp);
1185 1178
1186 if (!atm_charge(vcc, skb->truesize)) { 1179 if (!atm_charge(vcc, skb->truesize)) {
@@ -1918,25 +1911,18 @@ recycle_rx_skb(struct idt77252_dev *card, struct sk_buff *skb)
1918static void 1911static void
1919flush_rx_pool(struct idt77252_dev *card, struct rx_pool *rpp) 1912flush_rx_pool(struct idt77252_dev *card, struct rx_pool *rpp)
1920{ 1913{
1914 skb_queue_head_init(&rpp->queue);
1921 rpp->len = 0; 1915 rpp->len = 0;
1922 rpp->count = 0;
1923 rpp->first = NULL;
1924 rpp->last = &rpp->first;
1925} 1916}
1926 1917
1927static void 1918static void
1928recycle_rx_pool_skb(struct idt77252_dev *card, struct rx_pool *rpp) 1919recycle_rx_pool_skb(struct idt77252_dev *card, struct rx_pool *rpp)
1929{ 1920{
1930 struct sk_buff *skb, *next; 1921 struct sk_buff *skb, *tmp;
1931 int i;
1932 1922
1933 skb = rpp->first; 1923 skb_queue_walk_safe(&rpp->queue, skb, tmp)
1934 for (i = 0; i < rpp->count; i++) {
1935 next = skb->next;
1936 skb->next = NULL;
1937 recycle_rx_skb(card, skb); 1924 recycle_rx_skb(card, skb);
1938 skb = next; 1925
1939 }
1940 flush_rx_pool(card, rpp); 1926 flush_rx_pool(card, rpp);
1941} 1927}
1942 1928
@@ -2537,7 +2523,7 @@ idt77252_close(struct atm_vcc *vcc)
2537 waitfor_idle(card); 2523 waitfor_idle(card);
2538 spin_unlock_irqrestore(&card->cmd_lock, flags); 2524 spin_unlock_irqrestore(&card->cmd_lock, flags);
2539 2525
2540 if (vc->rcv.rx_pool.count) { 2526 if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
2541 DPRINTK("%s: closing a VC with pending rx buffers.\n", 2527 DPRINTK("%s: closing a VC with pending rx buffers.\n",
2542 card->name); 2528 card->name);
2543 2529
@@ -2970,7 +2956,7 @@ close_card_oam(struct idt77252_dev *card)
2970 waitfor_idle(card); 2956 waitfor_idle(card);
2971 spin_unlock_irqrestore(&card->cmd_lock, flags); 2957 spin_unlock_irqrestore(&card->cmd_lock, flags);
2972 2958
2973 if (vc->rcv.rx_pool.count) { 2959 if (skb_queue_len(&vc->rcv.rx_pool.queue) != 0) {
2974 DPRINTK("%s: closing a VC " 2960 DPRINTK("%s: closing a VC "
2975 "with pending rx buffers.\n", 2961 "with pending rx buffers.\n",
2976 card->name); 2962 card->name);
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index e83eaf120da0..5042bb2dab15 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -173,10 +173,8 @@ struct scq_info
173}; 173};
174 174
175struct rx_pool { 175struct rx_pool {
176 struct sk_buff *first; 176 struct sk_buff_head queue;
177 struct sk_buff **last;
178 unsigned int len; 177 unsigned int len;
179 unsigned int count;
180}; 178};
181 179
182struct aal1 { 180struct aal1 {
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 58583c6ac5be..752b1ba81f7e 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -496,8 +496,8 @@ static int open_rx_first(struct atm_vcc *vcc)
496 vcc->qos.rxtp.max_sdu = 65464; 496 vcc->qos.rxtp.max_sdu = 65464;
497 /* fix this - we may want to receive 64kB SDUs 497 /* fix this - we may want to receive 64kB SDUs
498 later */ 498 later */
499 cells = (vcc->qos.rxtp.max_sdu+ATM_AAL5_TRAILER+ 499 cells = DIV_ROUND_UP(vcc->qos.rxtp.max_sdu + ATM_AAL5_TRAILER,
500 ATM_CELL_PAYLOAD-1)/ATM_CELL_PAYLOAD; 500 ATM_CELL_PAYLOAD);
501 zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD); 501 zatm_vcc->pool = pool_index(cells*ATM_CELL_PAYLOAD);
502 } 502 }
503 else { 503 else {
@@ -820,7 +820,7 @@ static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr)
820 } 820 }
821 else { 821 else {
822 i = 255; 822 i = 255;
823 m = (ATM_OC3_PCR*255+max-1)/max; 823 m = DIV_ROUND_UP(ATM_OC3_PCR*255, max);
824 } 824 }
825 } 825 }
826 if (i > m) { 826 if (i > m) {
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 5b4c6e649c11..93f3690396a5 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -159,11 +159,8 @@ struct aoedev {
159 sector_t ssize; 159 sector_t ssize;
160 struct timer_list timer; 160 struct timer_list timer;
161 spinlock_t lock; 161 spinlock_t lock;
162 struct sk_buff *sendq_hd; /* packets needing to be sent, list head */ 162 struct sk_buff_head sendq;
163 struct sk_buff *sendq_tl; 163 struct sk_buff_head skbpool;
164 struct sk_buff *skbpool_hd;
165 struct sk_buff *skbpool_tl;
166 int nskbpool;
167 mempool_t *bufpool; /* for deadlock-free Buf allocation */ 164 mempool_t *bufpool; /* for deadlock-free Buf allocation */
168 struct list_head bufq; /* queue of bios to work on */ 165 struct list_head bufq; /* queue of bios to work on */
169 struct buf *inprocess; /* the one we're currently working on */ 166 struct buf *inprocess; /* the one we're currently working on */
@@ -199,7 +196,7 @@ int aoedev_flush(const char __user *str, size_t size);
199 196
200int aoenet_init(void); 197int aoenet_init(void);
201void aoenet_exit(void); 198void aoenet_exit(void);
202void aoenet_xmit(struct sk_buff *); 199void aoenet_xmit(struct sk_buff_head *);
203int is_aoe_netif(struct net_device *ifp); 200int is_aoe_netif(struct net_device *ifp);
204int set_aoe_iflist(const char __user *str, size_t size); 201int set_aoe_iflist(const char __user *str, size_t size);
205 202
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 0c39782b2660..fd2cf5439a1c 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -158,9 +158,9 @@ aoeblk_release(struct inode *inode, struct file *filp)
158static int 158static int
159aoeblk_make_request(struct request_queue *q, struct bio *bio) 159aoeblk_make_request(struct request_queue *q, struct bio *bio)
160{ 160{
161 struct sk_buff_head queue;
161 struct aoedev *d; 162 struct aoedev *d;
162 struct buf *buf; 163 struct buf *buf;
163 struct sk_buff *sl;
164 ulong flags; 164 ulong flags;
165 165
166 blk_queue_bounce(q, &bio); 166 blk_queue_bounce(q, &bio);
@@ -213,11 +213,11 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
213 list_add_tail(&buf->bufs, &d->bufq); 213 list_add_tail(&buf->bufs, &d->bufq);
214 214
215 aoecmd_work(d); 215 aoecmd_work(d);
216 sl = d->sendq_hd; 216 __skb_queue_head_init(&queue);
217 d->sendq_hd = d->sendq_tl = NULL; 217 skb_queue_splice_init(&d->sendq, &queue);
218 218
219 spin_unlock_irqrestore(&d->lock, flags); 219 spin_unlock_irqrestore(&d->lock, flags);
220 aoenet_xmit(sl); 220 aoenet_xmit(&queue);
221 221
222 return 0; 222 return 0;
223} 223}
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 181ebb85f0be..1f56d2c5b7fc 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -9,6 +9,7 @@
9#include <linux/completion.h> 9#include <linux/completion.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/smp_lock.h> 11#include <linux/smp_lock.h>
12#include <linux/skbuff.h>
12#include "aoe.h" 13#include "aoe.h"
13 14
14enum { 15enum {
@@ -103,7 +104,12 @@ loop:
103 spin_lock_irqsave(&d->lock, flags); 104 spin_lock_irqsave(&d->lock, flags);
104 goto loop; 105 goto loop;
105 } 106 }
106 aoenet_xmit(skb); 107 if (skb) {
108 struct sk_buff_head queue;
109 __skb_queue_head_init(&queue);
110 __skb_queue_tail(&queue, skb);
111 aoenet_xmit(&queue);
112 }
107 aoecmd_cfg(major, minor); 113 aoecmd_cfg(major, minor);
108 return 0; 114 return 0;
109} 115}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 2f1746295d06..e33da30be4c4 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -114,29 +114,22 @@ ifrotate(struct aoetgt *t)
114static void 114static void
115skb_pool_put(struct aoedev *d, struct sk_buff *skb) 115skb_pool_put(struct aoedev *d, struct sk_buff *skb)
116{ 116{
117 if (!d->skbpool_hd) 117 __skb_queue_tail(&d->skbpool, skb);
118 d->skbpool_hd = skb;
119 else
120 d->skbpool_tl->next = skb;
121 d->skbpool_tl = skb;
122} 118}
123 119
124static struct sk_buff * 120static struct sk_buff *
125skb_pool_get(struct aoedev *d) 121skb_pool_get(struct aoedev *d)
126{ 122{
127 struct sk_buff *skb; 123 struct sk_buff *skb = skb_peek(&d->skbpool);
128 124
129 skb = d->skbpool_hd;
130 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) { 125 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
131 d->skbpool_hd = skb->next; 126 __skb_unlink(skb, &d->skbpool);
132 skb->next = NULL;
133 return skb; 127 return skb;
134 } 128 }
135 if (d->nskbpool < NSKBPOOLMAX 129 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
136 && (skb = new_skb(ETH_ZLEN))) { 130 (skb = new_skb(ETH_ZLEN)))
137 d->nskbpool++;
138 return skb; 131 return skb;
139 } 132
140 return NULL; 133 return NULL;
141} 134}
142 135
@@ -293,29 +286,22 @@ aoecmd_ata_rw(struct aoedev *d)
293 286
294 skb->dev = t->ifp->nd; 287 skb->dev = t->ifp->nd;
295 skb = skb_clone(skb, GFP_ATOMIC); 288 skb = skb_clone(skb, GFP_ATOMIC);
296 if (skb) { 289 if (skb)
297 if (d->sendq_hd) 290 __skb_queue_tail(&d->sendq, skb);
298 d->sendq_tl->next = skb;
299 else
300 d->sendq_hd = skb;
301 d->sendq_tl = skb;
302 }
303 return 1; 291 return 1;
304} 292}
305 293
306/* some callers cannot sleep, and they can call this function, 294/* some callers cannot sleep, and they can call this function,
307 * transmitting the packets later, when interrupts are on 295 * transmitting the packets later, when interrupts are on
308 */ 296 */
309static struct sk_buff * 297static void
310aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail) 298aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
311{ 299{
312 struct aoe_hdr *h; 300 struct aoe_hdr *h;
313 struct aoe_cfghdr *ch; 301 struct aoe_cfghdr *ch;
314 struct sk_buff *skb, *sl, *sl_tail; 302 struct sk_buff *skb;
315 struct net_device *ifp; 303 struct net_device *ifp;
316 304
317 sl = sl_tail = NULL;
318
319 read_lock(&dev_base_lock); 305 read_lock(&dev_base_lock);
320 for_each_netdev(&init_net, ifp) { 306 for_each_netdev(&init_net, ifp) {
321 dev_hold(ifp); 307 dev_hold(ifp);
@@ -329,8 +315,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
329 } 315 }
330 skb_put(skb, sizeof *h + sizeof *ch); 316 skb_put(skb, sizeof *h + sizeof *ch);
331 skb->dev = ifp; 317 skb->dev = ifp;
332 if (sl_tail == NULL) 318 __skb_queue_tail(queue, skb);
333 sl_tail = skb;
334 h = (struct aoe_hdr *) skb_mac_header(skb); 319 h = (struct aoe_hdr *) skb_mac_header(skb);
335 memset(h, 0, sizeof *h + sizeof *ch); 320 memset(h, 0, sizeof *h + sizeof *ch);
336 321
@@ -342,16 +327,10 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
342 h->minor = aoeminor; 327 h->minor = aoeminor;
343 h->cmd = AOECMD_CFG; 328 h->cmd = AOECMD_CFG;
344 329
345 skb->next = sl;
346 sl = skb;
347cont: 330cont:
348 dev_put(ifp); 331 dev_put(ifp);
349 } 332 }
350 read_unlock(&dev_base_lock); 333 read_unlock(&dev_base_lock);
351
352 if (tail != NULL)
353 *tail = sl_tail;
354 return sl;
355} 334}
356 335
357static void 336static void
@@ -406,11 +385,7 @@ resend(struct aoedev *d, struct aoetgt *t, struct frame *f)
406 skb = skb_clone(skb, GFP_ATOMIC); 385 skb = skb_clone(skb, GFP_ATOMIC);
407 if (skb == NULL) 386 if (skb == NULL)
408 return; 387 return;
409 if (d->sendq_hd) 388 __skb_queue_tail(&d->sendq, skb);
410 d->sendq_tl->next = skb;
411 else
412 d->sendq_hd = skb;
413 d->sendq_tl = skb;
414} 389}
415 390
416static int 391static int
@@ -508,16 +483,15 @@ ata_scnt(unsigned char *packet) {
508static void 483static void
509rexmit_timer(ulong vp) 484rexmit_timer(ulong vp)
510{ 485{
486 struct sk_buff_head queue;
511 struct aoedev *d; 487 struct aoedev *d;
512 struct aoetgt *t, **tt, **te; 488 struct aoetgt *t, **tt, **te;
513 struct aoeif *ifp; 489 struct aoeif *ifp;
514 struct frame *f, *e; 490 struct frame *f, *e;
515 struct sk_buff *sl;
516 register long timeout; 491 register long timeout;
517 ulong flags, n; 492 ulong flags, n;
518 493
519 d = (struct aoedev *) vp; 494 d = (struct aoedev *) vp;
520 sl = NULL;
521 495
522 /* timeout is always ~150% of the moving average */ 496 /* timeout is always ~150% of the moving average */
523 timeout = d->rttavg; 497 timeout = d->rttavg;
@@ -589,7 +563,7 @@ rexmit_timer(ulong vp)
589 } 563 }
590 } 564 }
591 565
592 if (d->sendq_hd) { 566 if (!skb_queue_empty(&d->sendq)) {
593 n = d->rttavg <<= 1; 567 n = d->rttavg <<= 1;
594 if (n > MAXTIMER) 568 if (n > MAXTIMER)
595 d->rttavg = MAXTIMER; 569 d->rttavg = MAXTIMER;
@@ -600,15 +574,15 @@ rexmit_timer(ulong vp)
600 aoecmd_work(d); 574 aoecmd_work(d);
601 } 575 }
602 576
603 sl = d->sendq_hd; 577 __skb_queue_head_init(&queue);
604 d->sendq_hd = d->sendq_tl = NULL; 578 skb_queue_splice_init(&d->sendq, &queue);
605 579
606 d->timer.expires = jiffies + TIMERTICK; 580 d->timer.expires = jiffies + TIMERTICK;
607 add_timer(&d->timer); 581 add_timer(&d->timer);
608 582
609 spin_unlock_irqrestore(&d->lock, flags); 583 spin_unlock_irqrestore(&d->lock, flags);
610 584
611 aoenet_xmit(sl); 585 aoenet_xmit(&queue);
612} 586}
613 587
614/* enters with d->lock held */ 588/* enters with d->lock held */
@@ -767,12 +741,12 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
767void 741void
768aoecmd_ata_rsp(struct sk_buff *skb) 742aoecmd_ata_rsp(struct sk_buff *skb)
769{ 743{
744 struct sk_buff_head queue;
770 struct aoedev *d; 745 struct aoedev *d;
771 struct aoe_hdr *hin, *hout; 746 struct aoe_hdr *hin, *hout;
772 struct aoe_atahdr *ahin, *ahout; 747 struct aoe_atahdr *ahin, *ahout;
773 struct frame *f; 748 struct frame *f;
774 struct buf *buf; 749 struct buf *buf;
775 struct sk_buff *sl;
776 struct aoetgt *t; 750 struct aoetgt *t;
777 struct aoeif *ifp; 751 struct aoeif *ifp;
778 register long n; 752 register long n;
@@ -893,21 +867,21 @@ aoecmd_ata_rsp(struct sk_buff *skb)
893 867
894 aoecmd_work(d); 868 aoecmd_work(d);
895xmit: 869xmit:
896 sl = d->sendq_hd; 870 __skb_queue_head_init(&queue);
897 d->sendq_hd = d->sendq_tl = NULL; 871 skb_queue_splice_init(&d->sendq, &queue);
898 872
899 spin_unlock_irqrestore(&d->lock, flags); 873 spin_unlock_irqrestore(&d->lock, flags);
900 aoenet_xmit(sl); 874 aoenet_xmit(&queue);
901} 875}
902 876
903void 877void
904aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) 878aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
905{ 879{
906 struct sk_buff *sl; 880 struct sk_buff_head queue;
907
908 sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
909 881
910 aoenet_xmit(sl); 882 __skb_queue_head_init(&queue);
883 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
884 aoenet_xmit(&queue);
911} 885}
912 886
913struct sk_buff * 887struct sk_buff *
@@ -1076,7 +1050,12 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
1076 1050
1077 spin_unlock_irqrestore(&d->lock, flags); 1051 spin_unlock_irqrestore(&d->lock, flags);
1078 1052
1079 aoenet_xmit(sl); 1053 if (sl) {
1054 struct sk_buff_head queue;
1055 __skb_queue_head_init(&queue);
1056 __skb_queue_tail(&queue, sl);
1057 aoenet_xmit(&queue);
1058 }
1080} 1059}
1081 1060
1082void 1061void
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index a1d813ab0d6b..75a610adf515 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -188,14 +188,12 @@ skbfree(struct sk_buff *skb)
188static void 188static void
189skbpoolfree(struct aoedev *d) 189skbpoolfree(struct aoedev *d)
190{ 190{
191 struct sk_buff *skb; 191 struct sk_buff *skb, *tmp;
192 192
193 while ((skb = d->skbpool_hd)) { 193 skb_queue_walk_safe(&d->skbpool, skb, tmp)
194 d->skbpool_hd = skb->next;
195 skb->next = NULL;
196 skbfree(skb); 194 skbfree(skb);
197 } 195
198 d->skbpool_tl = NULL; 196 __skb_queue_head_init(&d->skbpool);
199} 197}
200 198
201/* find it or malloc it */ 199/* find it or malloc it */
@@ -217,6 +215,8 @@ aoedev_by_sysminor_m(ulong sysminor)
217 goto out; 215 goto out;
218 INIT_WORK(&d->work, aoecmd_sleepwork); 216 INIT_WORK(&d->work, aoecmd_sleepwork);
219 spin_lock_init(&d->lock); 217 spin_lock_init(&d->lock);
218 skb_queue_head_init(&d->sendq);
219 skb_queue_head_init(&d->skbpool);
220 init_timer(&d->timer); 220 init_timer(&d->timer);
221 d->timer.data = (ulong) d; 221 d->timer.data = (ulong) d;
222 d->timer.function = dummy_timer; 222 d->timer.function = dummy_timer;
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 7b15a5e9cec0..7f83ad90e76f 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -7,6 +7,7 @@
7#include <linux/hdreg.h> 7#include <linux/hdreg.h>
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/skbuff.h>
10#include "aoe.h" 11#include "aoe.h"
11 12
12MODULE_LICENSE("GPL"); 13MODULE_LICENSE("GPL");
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index 0c81ca731287..9157d64270cb 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -95,13 +95,12 @@ mac_addr(char addr[6])
95} 95}
96 96
97void 97void
98aoenet_xmit(struct sk_buff *sl) 98aoenet_xmit(struct sk_buff_head *queue)
99{ 99{
100 struct sk_buff *skb; 100 struct sk_buff *skb, *tmp;
101 101
102 while ((skb = sl)) { 102 skb_queue_walk_safe(queue, skb, tmp) {
103 sl = sl->next; 103 __skb_unlink(skb, queue);
104 skb->next = skb->prev = NULL;
105 dev_queue_xmit(skb); 104 dev_queue_xmit(skb);
106 } 105 }
107} 106}
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 4d37bb312ee3..7938062c1cc7 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -352,14 +352,14 @@ static int bcsp_flush(struct hci_uart *hu)
352/* Remove ack'ed packets */ 352/* Remove ack'ed packets */
353static void bcsp_pkt_cull(struct bcsp_struct *bcsp) 353static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
354{ 354{
355 struct sk_buff *skb, *tmp;
355 unsigned long flags; 356 unsigned long flags;
356 struct sk_buff *skb;
357 int i, pkts_to_be_removed; 357 int i, pkts_to_be_removed;
358 u8 seqno; 358 u8 seqno;
359 359
360 spin_lock_irqsave(&bcsp->unack.lock, flags); 360 spin_lock_irqsave(&bcsp->unack.lock, flags);
361 361
362 pkts_to_be_removed = bcsp->unack.qlen; 362 pkts_to_be_removed = skb_queue_len(&bcsp->unack);
363 seqno = bcsp->msgq_txseq; 363 seqno = bcsp->msgq_txseq;
364 364
365 while (pkts_to_be_removed) { 365 while (pkts_to_be_removed) {
@@ -373,19 +373,19 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
373 BT_ERR("Peer acked invalid packet"); 373 BT_ERR("Peer acked invalid packet");
374 374
375 BT_DBG("Removing %u pkts out of %u, up to seqno %u", 375 BT_DBG("Removing %u pkts out of %u, up to seqno %u",
376 pkts_to_be_removed, bcsp->unack.qlen, (seqno - 1) & 0x07); 376 pkts_to_be_removed, skb_queue_len(&bcsp->unack),
377 (seqno - 1) & 0x07);
377 378
378 for (i = 0, skb = ((struct sk_buff *) &bcsp->unack)->next; i < pkts_to_be_removed 379 i = 0;
379 && skb != (struct sk_buff *) &bcsp->unack; i++) { 380 skb_queue_walk_safe(&bcsp->unack, skb, tmp) {
380 struct sk_buff *nskb; 381 if (i++ >= pkts_to_be_removed)
382 break;
381 383
382 nskb = skb->next;
383 __skb_unlink(skb, &bcsp->unack); 384 __skb_unlink(skb, &bcsp->unack);
384 kfree_skb(skb); 385 kfree_skb(skb);
385 skb = nskb;
386 } 386 }
387 387
388 if (bcsp->unack.qlen == 0) 388 if (skb_queue_empty(&bcsp->unack))
389 del_timer(&bcsp->tbcsp); 389 del_timer(&bcsp->tbcsp);
390 390
391 spin_unlock_irqrestore(&bcsp->unack.lock, flags); 391 spin_unlock_irqrestore(&bcsp->unack.lock, flags);
diff --git a/drivers/bluetooth/hci_usb.h b/drivers/bluetooth/hci_usb.h
index 1790cc8e431e..8e659914523f 100644
--- a/drivers/bluetooth/hci_usb.h
+++ b/drivers/bluetooth/hci_usb.h
@@ -70,8 +70,8 @@ static inline void _urb_queue_head(struct _urb_queue *q, struct _urb *_urb)
70{ 70{
71 unsigned long flags; 71 unsigned long flags;
72 spin_lock_irqsave(&q->lock, flags); 72 spin_lock_irqsave(&q->lock, flags);
73 /* _urb_unlink needs to know which spinlock to use, thus mb(). */ 73 /* _urb_unlink needs to know which spinlock to use, thus smp_mb(). */
74 _urb->queue = q; mb(); list_add(&_urb->list, &q->head); 74 _urb->queue = q; smp_mb(); list_add(&_urb->list, &q->head);
75 spin_unlock_irqrestore(&q->lock, flags); 75 spin_unlock_irqrestore(&q->lock, flags);
76} 76}
77 77
@@ -79,8 +79,8 @@ static inline void _urb_queue_tail(struct _urb_queue *q, struct _urb *_urb)
79{ 79{
80 unsigned long flags; 80 unsigned long flags;
81 spin_lock_irqsave(&q->lock, flags); 81 spin_lock_irqsave(&q->lock, flags);
82 /* _urb_unlink needs to know which spinlock to use, thus mb(). */ 82 /* _urb_unlink needs to know which spinlock to use, thus smp_mb(). */
83 _urb->queue = q; mb(); list_add_tail(&_urb->list, &q->head); 83 _urb->queue = q; smp_mb(); list_add_tail(&_urb->list, &q->head);
84 spin_unlock_irqrestore(&q->lock, flags); 84 spin_unlock_irqrestore(&q->lock, flags);
85} 85}
86 86
@@ -89,7 +89,7 @@ static inline void _urb_unlink(struct _urb *_urb)
89 struct _urb_queue *q; 89 struct _urb_queue *q;
90 unsigned long flags; 90 unsigned long flags;
91 91
92 mb(); 92 smp_mb();
93 q = _urb->queue; 93 q = _urb->queue;
94 /* If q is NULL, it will die at easy-to-debug NULL pointer dereference. 94 /* If q is NULL, it will die at easy-to-debug NULL pointer dereference.
95 No need to BUG(). */ 95 No need to BUG(). */
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 75726ea0fbbd..5360c4fd4739 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -828,15 +828,18 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
828 return -ESRCH; 828 return -ESRCH;
829 if (card->load_firmware == NULL) { 829 if (card->load_firmware == NULL) {
830 printk(KERN_DEBUG "kcapi: load: no load function\n"); 830 printk(KERN_DEBUG "kcapi: load: no load function\n");
831 capi_ctr_put(card);
831 return -ESRCH; 832 return -ESRCH;
832 } 833 }
833 834
834 if (ldef.t4file.len <= 0) { 835 if (ldef.t4file.len <= 0) {
835 printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len); 836 printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
837 capi_ctr_put(card);
836 return -EINVAL; 838 return -EINVAL;
837 } 839 }
838 if (ldef.t4file.data == NULL) { 840 if (ldef.t4file.data == NULL) {
839 printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n"); 841 printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
842 capi_ctr_put(card);
840 return -EINVAL; 843 return -EINVAL;
841 } 844 }
842 845
@@ -849,6 +852,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
849 852
850 if (card->cardstate != CARD_DETECTED) { 853 if (card->cardstate != CARD_DETECTED) {
851 printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr); 854 printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr);
855 capi_ctr_put(card);
852 return -EBUSY; 856 return -EBUSY;
853 } 857 }
854 card->cardstate = CARD_LOADING; 858 card->cardstate = CARD_LOADING;
diff --git a/drivers/isdn/hardware/mISDN/hfc_pci.h b/drivers/isdn/hardware/mISDN/hfc_pci.h
index fd2c9be6d849..5783d22a18fe 100644
--- a/drivers/isdn/hardware/mISDN/hfc_pci.h
+++ b/drivers/isdn/hardware/mISDN/hfc_pci.h
@@ -183,8 +183,8 @@
183#define D_FREG_MASK 0xF 183#define D_FREG_MASK 0xF
184 184
185struct zt { 185struct zt {
186 unsigned short z1; /* Z1 pointer 16 Bit */ 186 __le16 z1; /* Z1 pointer 16 Bit */
187 unsigned short z2; /* Z2 pointer 16 Bit */ 187 __le16 z2; /* Z2 pointer 16 Bit */
188}; 188};
189 189
190struct dfifo { 190struct dfifo {
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 9cf5edbb1a9b..cd8302af40eb 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
43module_param(debug, uint, 0); 43module_param(debug, uint, 0);
44 44
45static LIST_HEAD(HFClist); 45static LIST_HEAD(HFClist);
46DEFINE_RWLOCK(HFClock); 46static DEFINE_RWLOCK(HFClock);
47 47
48enum { 48enum {
49 HFC_CCD_2BD0, 49 HFC_CCD_2BD0,
@@ -88,7 +88,7 @@ struct hfcPCI_hw {
88 unsigned char bswapped; 88 unsigned char bswapped;
89 unsigned char protocol; 89 unsigned char protocol;
90 int nt_timer; 90 int nt_timer;
91 unsigned char *pci_io; /* start of PCI IO memory */ 91 unsigned char __iomem *pci_io; /* start of PCI IO memory */
92 dma_addr_t dmahandle; 92 dma_addr_t dmahandle;
93 void *fifos; /* FIFO memory */ 93 void *fifos; /* FIFO memory */
94 int last_bfifo_cnt[2]; 94 int last_bfifo_cnt[2];
@@ -153,7 +153,7 @@ release_io_hfcpci(struct hfc_pci *hc)
153 pci_write_config_word(hc->pdev, PCI_COMMAND, 0); 153 pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
154 del_timer(&hc->hw.timer); 154 del_timer(&hc->hw.timer);
155 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle); 155 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
156 iounmap((void *)hc->hw.pci_io); 156 iounmap(hc->hw.pci_io);
157} 157}
158 158
159/* 159/*
@@ -366,8 +366,7 @@ static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
366 bzt->f2 = MAX_B_FRAMES; 366 bzt->f2 = MAX_B_FRAMES;
367 bzt->f1 = bzt->f2; /* init F pointers to remain constant */ 367 bzt->f1 = bzt->f2; /* init F pointers to remain constant */
368 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1); 368 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
369 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16( 369 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 2);
370 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1 - 1));
371 if (fifo_state) 370 if (fifo_state)
372 hc->hw.fifo_en |= fifo_state; 371 hc->hw.fifo_en |= fifo_state;
373 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en); 372 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
@@ -482,7 +481,7 @@ receive_dmsg(struct hfc_pci *hc)
482 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) | 481 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
483 (MAX_D_FRAMES + 1); /* next buffer */ 482 (MAX_D_FRAMES + 1); /* next buffer */
484 df->za[df->f2 & D_FREG_MASK].z2 = 483 df->za[df->f2 & D_FREG_MASK].z2 =
485 cpu_to_le16((zp->z2 + rcnt) & (D_FIFO_SIZE - 1)); 484 cpu_to_le16((le16_to_cpu(zp->z2) + rcnt) & (D_FIFO_SIZE - 1));
486 } else { 485 } else {
487 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC); 486 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
488 if (!dch->rx_skb) { 487 if (!dch->rx_skb) {
@@ -523,10 +522,10 @@ receive_dmsg(struct hfc_pci *hc)
523/* 522/*
524 * check for transparent receive data and read max one threshold size if avail 523 * check for transparent receive data and read max one threshold size if avail
525 */ 524 */
526int 525static int
527hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata) 526hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
528{ 527{
529 unsigned short *z1r, *z2r; 528 __le16 *z1r, *z2r;
530 int new_z2, fcnt, maxlen; 529 int new_z2, fcnt, maxlen;
531 u_char *ptr, *ptr1; 530 u_char *ptr, *ptr1;
532 531
@@ -576,7 +575,7 @@ hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
576/* 575/*
577 * B-channel main receive routine 576 * B-channel main receive routine
578 */ 577 */
579void 578static void
580main_rec_hfcpci(struct bchannel *bch) 579main_rec_hfcpci(struct bchannel *bch)
581{ 580{
582 struct hfc_pci *hc = bch->hw; 581 struct hfc_pci *hc = bch->hw;
@@ -724,7 +723,7 @@ hfcpci_fill_fifo(struct bchannel *bch)
724 struct bzfifo *bz; 723 struct bzfifo *bz;
725 u_char *bdata; 724 u_char *bdata;
726 u_char new_f1, *src, *dst; 725 u_char new_f1, *src, *dst;
727 unsigned short *z1t, *z2t; 726 __le16 *z1t, *z2t;
728 727
729 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO)) 728 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
730 printk(KERN_DEBUG "%s\n", __func__); 729 printk(KERN_DEBUG "%s\n", __func__);
@@ -1679,7 +1678,7 @@ hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1679 * called for card init message 1678 * called for card init message
1680 */ 1679 */
1681 1680
1682void 1681static void
1683inithfcpci(struct hfc_pci *hc) 1682inithfcpci(struct hfc_pci *hc)
1684{ 1683{
1685 printk(KERN_DEBUG "inithfcpci: entered\n"); 1684 printk(KERN_DEBUG "inithfcpci: entered\n");
@@ -1966,7 +1965,7 @@ setup_hw(struct hfc_pci *hc)
1966 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n"); 1965 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1967 return 1; 1966 return 1;
1968 } 1967 }
1969 hc->hw.pci_io = (char *)(ulong)hc->pdev->resource[1].start; 1968 hc->hw.pci_io = (char __iomem *)(unsigned long)hc->pdev->resource[1].start;
1970 1969
1971 if (!hc->hw.pci_io) { 1970 if (!hc->hw.pci_io) {
1972 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n"); 1971 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 127cfdad68e7..77c280ef2eb6 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1533,8 +1533,10 @@ static int isdn_ppp_mp_bundle_array_init(void)
1533 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle); 1533 int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle);
1534 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL ) 1534 if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL )
1535 return -ENOMEM; 1535 return -ENOMEM;
1536 for( i = 0; i < ISDN_MAX_CHANNELS; i++ ) 1536 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
1537 spin_lock_init(&isdn_ppp_bundle_arr[i].lock); 1537 spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
1538 skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags);
1539 }
1538 return 0; 1540 return 0;
1539} 1541}
1540 1542
@@ -1567,7 +1569,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1567 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL) 1569 if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
1568 return -ENOMEM; 1570 return -ENOMEM;
1569 lp->next = lp->last = lp; /* nobody else in a queue */ 1571 lp->next = lp->last = lp; /* nobody else in a queue */
1570 lp->netdev->pb->frags = NULL; 1572 skb_queue_head_init(&lp->netdev->pb->frags);
1571 lp->netdev->pb->frames = 0; 1573 lp->netdev->pb->frames = 0;
1572 lp->netdev->pb->seq = UINT_MAX; 1574 lp->netdev->pb->seq = UINT_MAX;
1573 } 1575 }
@@ -1579,28 +1581,29 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
1579 1581
1580static u32 isdn_ppp_mp_get_seq( int short_seq, 1582static u32 isdn_ppp_mp_get_seq( int short_seq,
1581 struct sk_buff * skb, u32 last_seq ); 1583 struct sk_buff * skb, u32 last_seq );
1582static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, 1584static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
1583 struct sk_buff * from, struct sk_buff * to ); 1585 struct sk_buff *to);
1584static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, 1586static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
1585 struct sk_buff * from, struct sk_buff * to ); 1587 struct sk_buff *from, struct sk_buff *to,
1586static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb ); 1588 u32 lastseq);
1589static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
1587static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb ); 1590static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb );
1588 1591
1589static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, 1592static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1590 struct sk_buff *skb) 1593 struct sk_buff *skb)
1591{ 1594{
1592 struct ippp_struct *is; 1595 struct sk_buff *newfrag, *frag, *start, *nextf;
1593 isdn_net_local * lpq;
1594 ippp_bundle * mp;
1595 isdn_mppp_stats * stats;
1596 struct sk_buff * newfrag, * frag, * start, *nextf;
1597 u32 newseq, minseq, thisseq; 1596 u32 newseq, minseq, thisseq;
1597 isdn_mppp_stats *stats;
1598 struct ippp_struct *is;
1598 unsigned long flags; 1599 unsigned long flags;
1600 isdn_net_local *lpq;
1601 ippp_bundle *mp;
1599 int slot; 1602 int slot;
1600 1603
1601 spin_lock_irqsave(&net_dev->pb->lock, flags); 1604 spin_lock_irqsave(&net_dev->pb->lock, flags);
1602 mp = net_dev->pb; 1605 mp = net_dev->pb;
1603 stats = &mp->stats; 1606 stats = &mp->stats;
1604 slot = lp->ppp_slot; 1607 slot = lp->ppp_slot;
1605 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) { 1608 if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
1606 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n", 1609 printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
@@ -1611,20 +1614,19 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1611 return; 1614 return;
1612 } 1615 }
1613 is = ippp_table[slot]; 1616 is = ippp_table[slot];
1614 if( ++mp->frames > stats->max_queue_len ) 1617 if (++mp->frames > stats->max_queue_len)
1615 stats->max_queue_len = mp->frames; 1618 stats->max_queue_len = mp->frames;
1616 1619
1617 if (is->debug & 0x8) 1620 if (is->debug & 0x8)
1618 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb); 1621 isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
1619 1622
1620 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, 1623 newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
1621 skb, is->last_link_seqno); 1624 skb, is->last_link_seqno);
1622
1623 1625
1624 /* if this packet seq # is less than last already processed one, 1626 /* if this packet seq # is less than last already processed one,
1625 * toss it right away, but check for sequence start case first 1627 * toss it right away, but check for sequence start case first
1626 */ 1628 */
1627 if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) { 1629 if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) {
1628 mp->seq = newseq; /* the first packet: required for 1630 mp->seq = newseq; /* the first packet: required for
1629 * rfc1990 non-compliant clients -- 1631 * rfc1990 non-compliant clients --
1630 * prevents constant packet toss */ 1632 * prevents constant packet toss */
@@ -1634,7 +1636,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1634 spin_unlock_irqrestore(&mp->lock, flags); 1636 spin_unlock_irqrestore(&mp->lock, flags);
1635 return; 1637 return;
1636 } 1638 }
1637 1639
1638 /* find the minimum received sequence number over all links */ 1640 /* find the minimum received sequence number over all links */
1639 is->last_link_seqno = minseq = newseq; 1641 is->last_link_seqno = minseq = newseq;
1640 for (lpq = net_dev->queue;;) { 1642 for (lpq = net_dev->queue;;) {
@@ -1655,22 +1657,31 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1655 * packets */ 1657 * packets */
1656 newfrag = skb; 1658 newfrag = skb;
1657 1659
1658 /* if this new fragment is before the first one, then enqueue it now. */ 1660 /* Insert new fragment into the proper sequence slot. */
1659 if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) { 1661 skb_queue_walk(&mp->frags, frag) {
1660 newfrag->next = frag; 1662 if (MP_SEQ(frag) == newseq) {
1661 mp->frags = frag = newfrag; 1663 isdn_ppp_mp_free_skb(mp, newfrag);
1662 newfrag = NULL; 1664 newfrag = NULL;
1663 } 1665 break;
1666 }
1667 if (MP_LT(newseq, MP_SEQ(frag))) {
1668 __skb_queue_before(&mp->frags, frag, newfrag);
1669 newfrag = NULL;
1670 break;
1671 }
1672 }
1673 if (newfrag)
1674 __skb_queue_tail(&mp->frags, newfrag);
1664 1675
1665 start = MP_FLAGS(frag) & MP_BEGIN_FRAG && 1676 frag = skb_peek(&mp->frags);
1666 MP_SEQ(frag) == mp->seq ? frag : NULL; 1677 start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) &&
1678 (MP_SEQ(frag) == mp->seq)) ? frag : NULL;
1679 if (!start)
1680 goto check_overflow;
1667 1681
1668 /* 1682 /* main fragment traversing loop
1669 * main fragment traversing loop
1670 * 1683 *
1671 * try to accomplish several tasks: 1684 * try to accomplish several tasks:
1672 * - insert new fragment into the proper sequence slot (once that's done
1673 * newfrag will be set to NULL)
1674 * - reassemble any complete fragment sequence (non-null 'start' 1685 * - reassemble any complete fragment sequence (non-null 'start'
1675 * indicates there is a continguous sequence present) 1686 * indicates there is a continguous sequence present)
1676 * - discard any incomplete sequences that are below minseq -- due 1687 * - discard any incomplete sequences that are below minseq -- due
@@ -1679,71 +1690,46 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1679 * come to complete such sequence and it should be discarded 1690 * come to complete such sequence and it should be discarded
1680 * 1691 *
1681 * loop completes when we accomplished the following tasks: 1692 * loop completes when we accomplished the following tasks:
1682 * - new fragment is inserted in the proper sequence ('newfrag' is
1683 * set to NULL)
1684 * - we hit a gap in the sequence, so no reassembly/processing is 1693 * - we hit a gap in the sequence, so no reassembly/processing is
1685 * possible ('start' would be set to NULL) 1694 * possible ('start' would be set to NULL)
1686 * 1695 *
1687 * algorithm for this code is derived from code in the book 1696 * algorithm for this code is derived from code in the book
1688 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley) 1697 * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
1689 */ 1698 */
1690 while (start != NULL || newfrag != NULL) { 1699 skb_queue_walk_safe(&mp->frags, frag, nextf) {
1691 1700 thisseq = MP_SEQ(frag);
1692 thisseq = MP_SEQ(frag); 1701
1693 nextf = frag->next; 1702 /* check for misplaced start */
1694 1703 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
1695 /* drop any duplicate fragments */ 1704 printk(KERN_WARNING"isdn_mppp(seq %d): new "
1696 if (newfrag != NULL && thisseq == newseq) { 1705 "BEGIN flag with no prior END", thisseq);
1697 isdn_ppp_mp_free_skb(mp, newfrag); 1706 stats->seqerrs++;
1698 newfrag = NULL; 1707 stats->frame_drops++;
1699 } 1708 isdn_ppp_mp_discard(mp, start, frag);
1700 1709 start = frag;
1701 /* insert new fragment before next element if possible. */ 1710 } else if (MP_LE(thisseq, minseq)) {
1702 if (newfrag != NULL && (nextf == NULL || 1711 if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
1703 MP_LT(newseq, MP_SEQ(nextf)))) {
1704 newfrag->next = nextf;
1705 frag->next = nextf = newfrag;
1706 newfrag = NULL;
1707 }
1708
1709 if (start != NULL) {
1710 /* check for misplaced start */
1711 if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
1712 printk(KERN_WARNING"isdn_mppp(seq %d): new "
1713 "BEGIN flag with no prior END", thisseq);
1714 stats->seqerrs++;
1715 stats->frame_drops++;
1716 start = isdn_ppp_mp_discard(mp, start,frag);
1717 nextf = frag->next;
1718 }
1719 } else if (MP_LE(thisseq, minseq)) {
1720 if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
1721 start = frag; 1712 start = frag;
1722 else { 1713 else {
1723 if (MP_FLAGS(frag) & MP_END_FRAG) 1714 if (MP_FLAGS(frag) & MP_END_FRAG)
1724 stats->frame_drops++; 1715 stats->frame_drops++;
1725 if( mp->frags == frag ) 1716 __skb_unlink(skb, &mp->frags);
1726 mp->frags = nextf;
1727 isdn_ppp_mp_free_skb(mp, frag); 1717 isdn_ppp_mp_free_skb(mp, frag);
1728 frag = nextf;
1729 continue; 1718 continue;
1730 } 1719 }
1731 } 1720 }
1732 1721
1733 /* if start is non-null and we have end fragment, then 1722 /* if we have end fragment, then we have full reassembly
1734 * we have full reassembly sequence -- reassemble 1723 * sequence -- reassemble and process packet now
1735 * and process packet now
1736 */ 1724 */
1737 if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) { 1725 if (MP_FLAGS(frag) & MP_END_FRAG) {
1738 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK; 1726 minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
1739 /* Reassemble the packet then dispatch it */ 1727 /* Reassemble the packet then dispatch it */
1740 isdn_ppp_mp_reassembly(net_dev, lp, start, nextf); 1728 isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq);
1741
1742 start = NULL;
1743 frag = NULL;
1744 1729
1745 mp->frags = nextf; 1730 start = NULL;
1746 } 1731 frag = NULL;
1732 }
1747 1733
1748 /* check if need to update start pointer: if we just 1734 /* check if need to update start pointer: if we just
1749 * reassembled the packet and sequence is contiguous 1735 * reassembled the packet and sequence is contiguous
@@ -1754,26 +1740,25 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1754 * below low watermark and set start to the next frag or 1740 * below low watermark and set start to the next frag or
1755 * clear start ptr. 1741 * clear start ptr.
1756 */ 1742 */
1757 if (nextf != NULL && 1743 if (nextf != (struct sk_buff *)&mp->frags &&
1758 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) { 1744 ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
1759 /* if we just reassembled and the next one is here, 1745 /* if we just reassembled and the next one is here,
1760 * then start another reassembly. */ 1746 * then start another reassembly.
1761 1747 */
1762 if (frag == NULL) { 1748 if (frag == NULL) {
1763 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG) 1749 if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
1764 start = nextf; 1750 start = nextf;
1765 else 1751 else {
1766 { 1752 printk(KERN_WARNING"isdn_mppp(seq %d):"
1767 printk(KERN_WARNING"isdn_mppp(seq %d):" 1753 " END flag with no following "
1768 " END flag with no following " 1754 "BEGIN", thisseq);
1769 "BEGIN", thisseq);
1770 stats->seqerrs++; 1755 stats->seqerrs++;
1771 } 1756 }
1772 } 1757 }
1773 1758 } else {
1774 } else { 1759 if (nextf != (struct sk_buff *)&mp->frags &&
1775 if ( nextf != NULL && frag != NULL && 1760 frag != NULL &&
1776 MP_LT(thisseq, minseq)) { 1761 MP_LT(thisseq, minseq)) {
1777 /* we've got a break in the sequence 1762 /* we've got a break in the sequence
1778 * and we not at the end yet 1763 * and we not at the end yet
1779 * and we did not just reassembled 1764 * and we did not just reassembled
@@ -1782,41 +1767,39 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
1782 * discard all the frames below low watermark 1767 * discard all the frames below low watermark
1783 * and start over */ 1768 * and start over */
1784 stats->frame_drops++; 1769 stats->frame_drops++;
1785 mp->frags = isdn_ppp_mp_discard(mp,start,nextf); 1770 isdn_ppp_mp_discard(mp, start, nextf);
1786 } 1771 }
1787 /* break in the sequence, no reassembly */ 1772 /* break in the sequence, no reassembly */
1788 start = NULL; 1773 start = NULL;
1789 } 1774 }
1790 1775 if (!start)
1791 frag = nextf; 1776 break;
1792 } /* while -- main loop */ 1777 }
1793 1778
1794 if (mp->frags == NULL) 1779check_overflow:
1795 mp->frags = frag;
1796
1797 /* rather straighforward way to deal with (not very) possible 1780 /* rather straighforward way to deal with (not very) possible
1798 * queue overflow */ 1781 * queue overflow
1782 */
1799 if (mp->frames > MP_MAX_QUEUE_LEN) { 1783 if (mp->frames > MP_MAX_QUEUE_LEN) {
1800 stats->overflows++; 1784 stats->overflows++;
1801 while (mp->frames > MP_MAX_QUEUE_LEN) { 1785 skb_queue_walk_safe(&mp->frags, frag, nextf) {
1802 frag = mp->frags->next; 1786 if (mp->frames <= MP_MAX_QUEUE_LEN)
1803 isdn_ppp_mp_free_skb(mp, mp->frags); 1787 break;
1804 mp->frags = frag; 1788 __skb_unlink(frag, &mp->frags);
1789 isdn_ppp_mp_free_skb(mp, frag);
1805 } 1790 }
1806 } 1791 }
1807 spin_unlock_irqrestore(&mp->lock, flags); 1792 spin_unlock_irqrestore(&mp->lock, flags);
1808} 1793}
1809 1794
1810static void isdn_ppp_mp_cleanup( isdn_net_local * lp ) 1795static void isdn_ppp_mp_cleanup(isdn_net_local *lp)
1811{ 1796{
1812 struct sk_buff * frag = lp->netdev->pb->frags; 1797 struct sk_buff *skb, *tmp;
1813 struct sk_buff * nextfrag; 1798
1814 while( frag ) { 1799 skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) {
1815 nextfrag = frag->next; 1800 __skb_unlink(skb, &lp->netdev->pb->frags);
1816 isdn_ppp_mp_free_skb(lp->netdev->pb, frag); 1801 isdn_ppp_mp_free_skb(lp->netdev->pb, skb);
1817 frag = nextfrag; 1802 }
1818 }
1819 lp->netdev->pb->frags = NULL;
1820} 1803}
1821 1804
1822static u32 isdn_ppp_mp_get_seq( int short_seq, 1805static u32 isdn_ppp_mp_get_seq( int short_seq,
@@ -1853,72 +1836,115 @@ static u32 isdn_ppp_mp_get_seq( int short_seq,
1853 return seq; 1836 return seq;
1854} 1837}
1855 1838
1856struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp, 1839static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
1857 struct sk_buff * from, struct sk_buff * to ) 1840 struct sk_buff *to)
1858{ 1841{
1859 if( from ) 1842 if (from) {
1860 while (from != to) { 1843 struct sk_buff *skb, *tmp;
1861 struct sk_buff * next = from->next; 1844 int freeing = 0;
1862 isdn_ppp_mp_free_skb(mp, from); 1845
1863 from = next; 1846 skb_queue_walk_safe(&mp->frags, skb, tmp) {
1847 if (skb == to)
1848 break;
1849 if (skb == from)
1850 freeing = 1;
1851 if (!freeing)
1852 continue;
1853 __skb_unlink(skb, &mp->frags);
1854 isdn_ppp_mp_free_skb(mp, skb);
1864 } 1855 }
1865 return from; 1856 }
1866} 1857}
1867 1858
1868void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp, 1859static unsigned int calc_tot_len(struct sk_buff_head *queue,
1869 struct sk_buff * from, struct sk_buff * to ) 1860 struct sk_buff *from, struct sk_buff *to)
1870{ 1861{
1871 ippp_bundle * mp = net_dev->pb; 1862 unsigned int tot_len = 0;
1872 int proto; 1863 struct sk_buff *skb;
1873 struct sk_buff * skb; 1864 int found_start = 0;
1865
1866 skb_queue_walk(queue, skb) {
1867 if (skb == from)
1868 found_start = 1;
1869 if (!found_start)
1870 continue;
1871 tot_len += skb->len - MP_HEADER_LEN;
1872 if (skb == to)
1873 break;
1874 }
1875 return tot_len;
1876}
1877
1878/* Reassemble packet using fragments in the reassembly queue from
1879 * 'from' until 'to', inclusive.
1880 */
1881static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
1882 struct sk_buff *from, struct sk_buff *to,
1883 u32 lastseq)
1884{
1885 ippp_bundle *mp = net_dev->pb;
1874 unsigned int tot_len; 1886 unsigned int tot_len;
1887 struct sk_buff *skb;
1888 int proto;
1875 1889
1876 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) { 1890 if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
1877 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n", 1891 printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
1878 __func__, lp->ppp_slot); 1892 __func__, lp->ppp_slot);
1879 return; 1893 return;
1880 } 1894 }
1881 if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) { 1895
1882 if( ippp_table[lp->ppp_slot]->debug & 0x40 ) 1896 tot_len = calc_tot_len(&mp->frags, from, to);
1897
1898 if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
1899 if (ippp_table[lp->ppp_slot]->debug & 0x40)
1883 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, " 1900 printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
1884 "len %d\n", MP_SEQ(from), from->len ); 1901 "len %d\n", MP_SEQ(from), from->len);
1885 skb = from; 1902 skb = from;
1886 skb_pull(skb, MP_HEADER_LEN); 1903 skb_pull(skb, MP_HEADER_LEN);
1904 __skb_unlink(skb, &mp->frags);
1887 mp->frames--; 1905 mp->frames--;
1888 } else { 1906 } else {
1889 struct sk_buff * frag; 1907 struct sk_buff *walk, *tmp;
1890 int n; 1908 int found_start = 0;
1891 1909
1892 for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++) 1910 if (ippp_table[lp->ppp_slot]->debug & 0x40)
1893 tot_len += frag->len - MP_HEADER_LEN;
1894
1895 if( ippp_table[lp->ppp_slot]->debug & 0x40 )
1896 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d " 1911 printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
1897 "to %d, len %d\n", MP_SEQ(from), 1912 "to %d, len %d\n", MP_SEQ(from), lastseq,
1898 (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len ); 1913 tot_len);
1899 if( (skb = dev_alloc_skb(tot_len)) == NULL ) { 1914
1915 skb = dev_alloc_skb(tot_len);
1916 if (!skb)
1900 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff " 1917 printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
1901 "of size %d\n", tot_len); 1918 "of size %d\n", tot_len);
1902 isdn_ppp_mp_discard(mp, from, to); 1919
1903 return; 1920 found_start = 0;
1904 } 1921 skb_queue_walk_safe(&mp->frags, walk, tmp) {
1922 if (walk == from)
1923 found_start = 1;
1924 if (!found_start)
1925 continue;
1905 1926
1906 while( from != to ) { 1927 if (skb) {
1907 unsigned int len = from->len - MP_HEADER_LEN; 1928 unsigned int len = walk->len - MP_HEADER_LEN;
1929 skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN,
1930 skb_put(skb, len),
1931 len);
1932 }
1933 __skb_unlink(walk, &mp->frags);
1934 isdn_ppp_mp_free_skb(mp, walk);
1908 1935
1909 skb_copy_from_linear_data_offset(from, MP_HEADER_LEN, 1936 if (walk == to)
1910 skb_put(skb,len), 1937 break;
1911 len);
1912 frag = from->next;
1913 isdn_ppp_mp_free_skb(mp, from);
1914 from = frag;
1915 } 1938 }
1916 } 1939 }
1940 if (!skb)
1941 return;
1942
1917 proto = isdn_ppp_strip_proto(skb); 1943 proto = isdn_ppp_strip_proto(skb);
1918 isdn_ppp_push_higher(net_dev, lp, skb, proto); 1944 isdn_ppp_push_higher(net_dev, lp, skb, proto);
1919} 1945}
1920 1946
1921static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb) 1947static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb)
1922{ 1948{
1923 dev_kfree_skb(skb); 1949 dev_kfree_skb(skb);
1924 mp->frames--; 1950 mp->frames--;
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index b5fabc7019d8..e7462924b505 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -124,18 +124,6 @@ mISDN_read(struct file *filep, char *buf, size_t count, loff_t *off)
124 return ret; 124 return ret;
125} 125}
126 126
127static loff_t
128mISDN_llseek(struct file *filep, loff_t offset, int orig)
129{
130 return -ESPIPE;
131}
132
133static ssize_t
134mISDN_write(struct file *filep, const char *buf, size_t count, loff_t *off)
135{
136 return -EOPNOTSUPP;
137}
138
139static unsigned int 127static unsigned int
140mISDN_poll(struct file *filep, poll_table *wait) 128mISDN_poll(struct file *filep, poll_table *wait)
141{ 129{
@@ -157,8 +145,9 @@ mISDN_poll(struct file *filep, poll_table *wait)
157} 145}
158 146
159static void 147static void
160dev_expire_timer(struct mISDNtimer *timer) 148dev_expire_timer(unsigned long data)
161{ 149{
150 struct mISDNtimer *timer = (void *)data;
162 u_long flags; 151 u_long flags;
163 152
164 spin_lock_irqsave(&timer->dev->lock, flags); 153 spin_lock_irqsave(&timer->dev->lock, flags);
@@ -191,7 +180,7 @@ misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
191 spin_unlock_irqrestore(&dev->lock, flags); 180 spin_unlock_irqrestore(&dev->lock, flags);
192 timer->dev = dev; 181 timer->dev = dev;
193 timer->tl.data = (long)timer; 182 timer->tl.data = (long)timer;
194 timer->tl.function = (void *) dev_expire_timer; 183 timer->tl.function = dev_expire_timer;
195 init_timer(&timer->tl); 184 init_timer(&timer->tl);
196 timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000); 185 timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000);
197 add_timer(&timer->tl); 186 add_timer(&timer->tl);
@@ -211,6 +200,9 @@ misdn_del_timer(struct mISDNtimerdev *dev, int id)
211 list_for_each_entry(timer, &dev->pending, list) { 200 list_for_each_entry(timer, &dev->pending, list) {
212 if (timer->id == id) { 201 if (timer->id == id) {
213 list_del_init(&timer->list); 202 list_del_init(&timer->list);
203 /* RED-PEN AK: race -- timer can be still running on
204 * other CPU. Needs reference count I think
205 */
214 del_timer(&timer->tl); 206 del_timer(&timer->tl);
215 ret = timer->id; 207 ret = timer->id;
216 kfree(timer); 208 kfree(timer);
@@ -268,9 +260,7 @@ mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
268} 260}
269 261
270static struct file_operations mISDN_fops = { 262static struct file_operations mISDN_fops = {
271 .llseek = mISDN_llseek,
272 .read = mISDN_read, 263 .read = mISDN_read,
273 .write = mISDN_write,
274 .poll = mISDN_poll, 264 .poll = mISDN_poll,
275 .ioctl = mISDN_ioctl, 265 .ioctl = mISDN_ioctl,
276 .open = mISDN_open, 266 .open = mISDN_open,
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index fdfb2b2cb734..a424869707a5 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -130,12 +130,12 @@ static const char filename[] = __FILE__;
130 130
131static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n"; 131static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n";
132#define TIMEOUT_MSG(lineno) \ 132#define TIMEOUT_MSG(lineno) \
133 printk(timeout_msg, filename,__FUNCTION__,(lineno)) 133 printk(timeout_msg, filename,__func__,(lineno))
134 134
135static const char invalid_pcb_msg[] = 135static const char invalid_pcb_msg[] =
136"*** invalid pcb length %d at %s:%s (line %d) ***\n"; 136"*** invalid pcb length %d at %s:%s (line %d) ***\n";
137#define INVALID_PCB_MSG(len) \ 137#define INVALID_PCB_MSG(len) \
138 printk(invalid_pcb_msg, (len),filename,__FUNCTION__,__LINE__) 138 printk(invalid_pcb_msg, (len),filename,__func__,__LINE__)
139 139
140static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x..."; 140static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x...";
141 141
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 6011d6fabef0..85fa40a0a667 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -127,7 +127,6 @@ MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered mu
127 (CP)->tx_tail - (CP)->tx_head - 1) 127 (CP)->tx_tail - (CP)->tx_head - 1)
128 128
129#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ 129#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
130#define RX_OFFSET 2
131#define CP_INTERNAL_PHY 32 130#define CP_INTERNAL_PHY 32
132 131
133/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */ 132/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
@@ -552,14 +551,14 @@ rx_status_loop:
552 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", 551 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
553 dev->name, rx_tail, status, len); 552 dev->name, rx_tail, status, len);
554 553
555 buflen = cp->rx_buf_sz + RX_OFFSET; 554 buflen = cp->rx_buf_sz + NET_IP_ALIGN;
556 new_skb = dev_alloc_skb (buflen); 555 new_skb = netdev_alloc_skb(dev, buflen);
557 if (!new_skb) { 556 if (!new_skb) {
558 dev->stats.rx_dropped++; 557 dev->stats.rx_dropped++;
559 goto rx_next; 558 goto rx_next;
560 } 559 }
561 560
562 skb_reserve(new_skb, RX_OFFSET); 561 skb_reserve(new_skb, NET_IP_ALIGN);
563 562
564 dma_unmap_single(&cp->pdev->dev, mapping, 563 dma_unmap_single(&cp->pdev->dev, mapping,
565 buflen, PCI_DMA_FROMDEVICE); 564 buflen, PCI_DMA_FROMDEVICE);
@@ -1051,19 +1050,20 @@ static void cp_init_hw (struct cp_private *cp)
1051 cpw8_f(Cfg9346, Cfg9346_Lock); 1050 cpw8_f(Cfg9346, Cfg9346_Lock);
1052} 1051}
1053 1052
1054static int cp_refill_rx (struct cp_private *cp) 1053static int cp_refill_rx(struct cp_private *cp)
1055{ 1054{
1055 struct net_device *dev = cp->dev;
1056 unsigned i; 1056 unsigned i;
1057 1057
1058 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1058 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1059 struct sk_buff *skb; 1059 struct sk_buff *skb;
1060 dma_addr_t mapping; 1060 dma_addr_t mapping;
1061 1061
1062 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); 1062 skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN);
1063 if (!skb) 1063 if (!skb)
1064 goto err_out; 1064 goto err_out;
1065 1065
1066 skb_reserve(skb, RX_OFFSET); 1066 skb_reserve(skb, NET_IP_ALIGN);
1067 1067
1068 mapping = dma_map_single(&cp->pdev->dev, skb->data, 1068 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1069 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1069 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 8a5b0d293f75..32e66f0d4344 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -309,7 +309,7 @@ enum RTL8139_registers {
309 Cfg9346 = 0x50, 309 Cfg9346 = 0x50,
310 Config0 = 0x51, 310 Config0 = 0x51,
311 Config1 = 0x52, 311 Config1 = 0x52,
312 FlashReg = 0x54, 312 TimerInt = 0x54,
313 MediaStatus = 0x58, 313 MediaStatus = 0x58,
314 Config3 = 0x59, 314 Config3 = 0x59,
315 Config4 = 0x5A, /* absent on RTL-8139A */ 315 Config4 = 0x5A, /* absent on RTL-8139A */
@@ -325,6 +325,7 @@ enum RTL8139_registers {
325 FIFOTMS = 0x70, /* FIFO Control and test. */ 325 FIFOTMS = 0x70, /* FIFO Control and test. */
326 CSCR = 0x74, /* Chip Status and Configuration Register. */ 326 CSCR = 0x74, /* Chip Status and Configuration Register. */
327 PARA78 = 0x78, 327 PARA78 = 0x78,
328 FlashReg = 0xD4, /* Communication with Flash ROM, four bytes. */
328 PARA7c = 0x7c, /* Magic transceiver parameter register. */ 329 PARA7c = 0x7c, /* Magic transceiver parameter register. */
329 Config5 = 0xD8, /* absent on RTL-8139A */ 330 Config5 = 0xD8, /* absent on RTL-8139A */
330}; 331};
@@ -2009,9 +2010,9 @@ no_early_rx:
2009 /* Malloc up new buffer, compatible with net-2e. */ 2010 /* Malloc up new buffer, compatible with net-2e. */
2010 /* Omit the four octet CRC from the length. */ 2011 /* Omit the four octet CRC from the length. */
2011 2012
2012 skb = dev_alloc_skb (pkt_size + 2); 2013 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
2013 if (likely(skb)) { 2014 if (likely(skb)) {
2014 skb_reserve (skb, 2); /* 16 byte align the IP fields. */ 2015 skb_reserve (skb, NET_IP_ALIGN); /* 16 byte align the IP fields. */
2015#if RX_BUF_IDX == 3 2016#if RX_BUF_IDX == 3
2016 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); 2017 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
2017#else 2018#else
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4a11296a9514..2d6a060d92e5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1813,7 +1813,7 @@ config FEC2
1813 1813
1814config FEC_MPC52xx 1814config FEC_MPC52xx
1815 tristate "MPC52xx FEC driver" 1815 tristate "MPC52xx FEC driver"
1816 depends on PPC_MERGE && PPC_MPC52xx && PPC_BESTCOMM_FEC 1816 depends on PPC_MPC52xx && PPC_BESTCOMM_FEC
1817 select CRC32 1817 select CRC32
1818 select PHYLIB 1818 select PHYLIB
1819 ---help--- 1819 ---help---
@@ -1840,6 +1840,17 @@ config NE_H8300
1840 Say Y here if you want to use the NE2000 compatible 1840 Say Y here if you want to use the NE2000 compatible
1841 controller on the Renesas H8/300 processor. 1841 controller on the Renesas H8/300 processor.
1842 1842
1843config ATL2
1844 tristate "Atheros L2 Fast Ethernet support"
1845 depends on PCI
1846 select CRC32
1847 select MII
1848 help
1849 This driver supports the Atheros L2 fast ethernet adapter.
1850
1851 To compile this driver as a module, choose M here. The module
1852 will be called atl2.
1853
1843source "drivers/net/fs_enet/Kconfig" 1854source "drivers/net/fs_enet/Kconfig"
1844 1855
1845endif # NET_ETHERNET 1856endif # NET_ETHERNET
@@ -1927,15 +1938,6 @@ config E1000
1927 To compile this driver as a module, choose M here. The module 1938 To compile this driver as a module, choose M here. The module
1928 will be called e1000. 1939 will be called e1000.
1929 1940
1930config E1000_DISABLE_PACKET_SPLIT
1931 bool "Disable Packet Split for PCI express adapters"
1932 depends on E1000
1933 help
1934 Say Y here if you want to use the legacy receive path for PCI express
1935 hardware.
1936
1937 If in doubt, say N.
1938
1939config E1000E 1941config E1000E
1940 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" 1942 tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
1941 depends on PCI && (!SPARC32 || BROKEN) 1943 depends on PCI && (!SPARC32 || BROKEN)
@@ -2046,6 +2048,7 @@ config R8169
2046 tristate "Realtek 8169 gigabit ethernet support" 2048 tristate "Realtek 8169 gigabit ethernet support"
2047 depends on PCI 2049 depends on PCI
2048 select CRC32 2050 select CRC32
2051 select MII
2049 ---help--- 2052 ---help---
2050 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. 2053 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
2051 2054
@@ -2262,7 +2265,7 @@ config UGETH_TX_ON_DEMAND
2262config MV643XX_ETH 2265config MV643XX_ETH
2263 tristate "Marvell Discovery (643XX) and Orion ethernet support" 2266 tristate "Marvell Discovery (643XX) and Orion ethernet support"
2264 depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || PLAT_ORION 2267 depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || PLAT_ORION
2265 select MII 2268 select PHYLIB
2266 help 2269 help
2267 This driver supports the gigabit ethernet MACs in the 2270 This driver supports the gigabit ethernet MACs in the
2268 Marvell Discovery PPC/MIPS chipset family (MV643XX) and 2271 Marvell Discovery PPC/MIPS chipset family (MV643XX) and
@@ -2302,6 +2305,18 @@ config ATL1E
2302 To compile this driver as a module, choose M here. The module 2305 To compile this driver as a module, choose M here. The module
2303 will be called atl1e. 2306 will be called atl1e.
2304 2307
2308config JME
2309 tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
2310 depends on PCI
2311 select CRC32
2312 select MII
2313 ---help---
2314 This driver supports the PCI-Express gigabit ethernet adapters
2315 based on JMicron JMC250 chipset.
2316
2317 To compile this driver as a module, choose M here. The module
2318 will be called jme.
2319
2305endif # NETDEV_1000 2320endif # NETDEV_1000
2306 2321
2307# 2322#
@@ -2377,10 +2392,18 @@ config EHEA
2377 To compile the driver as a module, choose M here. The module 2392 To compile the driver as a module, choose M here. The module
2378 will be called ehea. 2393 will be called ehea.
2379 2394
2395config ENIC
2396 tristate "E, the Cisco 10G Ethernet NIC"
2397 depends on PCI && INET
2398 select INET_LRO
2399 help
2400 This enables the support for the Cisco 10G Ethernet card.
2401
2380config IXGBE 2402config IXGBE
2381 tristate "Intel(R) 10GbE PCI Express adapters support" 2403 tristate "Intel(R) 10GbE PCI Express adapters support"
2382 depends on PCI && INET 2404 depends on PCI && INET
2383 select INET_LRO 2405 select INET_LRO
2406 select INTEL_IOATDMA
2384 ---help--- 2407 ---help---
2385 This driver supports Intel(R) 10GbE PCI Express family of 2408 This driver supports Intel(R) 10GbE PCI Express family of
2386 adapters. For more information on how to identify your adapter, go 2409 adapters. For more information on how to identify your adapter, go
@@ -2432,6 +2455,7 @@ config MYRI10GE
2432 select FW_LOADER 2455 select FW_LOADER
2433 select CRC32 2456 select CRC32
2434 select INET_LRO 2457 select INET_LRO
2458 select INTEL_IOATDMA
2435 ---help--- 2459 ---help---
2436 This driver supports Myricom Myri-10G Dual Protocol interface in 2460 This driver supports Myricom Myri-10G Dual Protocol interface in
2437 Ethernet mode. If the eeprom on your board is not recent enough, 2461 Ethernet mode. If the eeprom on your board is not recent enough,
@@ -2496,6 +2520,15 @@ config BNX2X
2496 To compile this driver as a module, choose M here: the module 2520 To compile this driver as a module, choose M here: the module
2497 will be called bnx2x. This is recommended. 2521 will be called bnx2x. This is recommended.
2498 2522
2523config QLGE
2524 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
2525 depends on PCI
2526 help
2527 This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
2528
2529 To compile this driver as a module, choose M here: the module
2530 will be called qlge.
2531
2499source "drivers/net/sfc/Kconfig" 2532source "drivers/net/sfc/Kconfig"
2500 2533
2501endif # NETDEV_10000 2534endif # NETDEV_10000
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7629c9017215..fa2510b2e609 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -15,9 +15,12 @@ obj-$(CONFIG_EHEA) += ehea/
15obj-$(CONFIG_CAN) += can/ 15obj-$(CONFIG_CAN) += can/
16obj-$(CONFIG_BONDING) += bonding/ 16obj-$(CONFIG_BONDING) += bonding/
17obj-$(CONFIG_ATL1) += atlx/ 17obj-$(CONFIG_ATL1) += atlx/
18obj-$(CONFIG_ATL2) += atlx/
18obj-$(CONFIG_ATL1E) += atl1e/ 19obj-$(CONFIG_ATL1E) += atl1e/
19obj-$(CONFIG_GIANFAR) += gianfar_driver.o 20obj-$(CONFIG_GIANFAR) += gianfar_driver.o
20obj-$(CONFIG_TEHUTI) += tehuti.o 21obj-$(CONFIG_TEHUTI) += tehuti.o
22obj-$(CONFIG_ENIC) += enic/
23obj-$(CONFIG_JME) += jme.o
21 24
22gianfar_driver-objs := gianfar.o \ 25gianfar_driver-objs := gianfar.o \
23 gianfar_ethtool.o \ 26 gianfar_ethtool.o \
@@ -111,7 +114,7 @@ obj-$(CONFIG_EL2) += 3c503.o 8390p.o
111obj-$(CONFIG_NE2000) += ne.o 8390p.o 114obj-$(CONFIG_NE2000) += ne.o 8390p.o
112obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o 115obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
113obj-$(CONFIG_HPLAN) += hp.o 8390p.o 116obj-$(CONFIG_HPLAN) += hp.o 8390p.o
114obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o 117obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390.o
115obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o 118obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
116obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o 119obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
117obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o 120obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
@@ -128,6 +131,7 @@ obj-$(CONFIG_AX88796) += ax88796.o
128obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o 131obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
129obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 132obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
130obj-$(CONFIG_QLA3XXX) += qla3xxx.o 133obj-$(CONFIG_QLA3XXX) += qla3xxx.o
134obj-$(CONFIG_QLGE) += qlge/
131 135
132obj-$(CONFIG_PPP) += ppp_generic.o 136obj-$(CONFIG_PPP) += ppp_generic.o
133obj-$(CONFIG_PPP_ASYNC) += ppp_async.o 137obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index bdc4c0bb56d9..a5b07691e466 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -442,24 +442,24 @@ static int arcnet_open(struct net_device *dev)
442 BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse " 442 BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse "
443 "DOS networking programs!\n"); 443 "DOS networking programs!\n");
444 444
445 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 445 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
446 if (ASTATUS() & RESETflag) { 446 if (ASTATUS() & RESETflag) {
447 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 447 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
448 ACOMMAND(CFLAGScmd | RESETclear); 448 ACOMMAND(CFLAGScmd | RESETclear);
449 } 449 }
450 450
451 451
452 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 452 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
453 /* make sure we're ready to receive IRQ's. */ 453 /* make sure we're ready to receive IRQ's. */
454 AINTMASK(0); 454 AINTMASK(0);
455 udelay(1); /* give it time to set the mask before 455 udelay(1); /* give it time to set the mask before
456 * we reset it again. (may not even be 456 * we reset it again. (may not even be
457 * necessary) 457 * necessary)
458 */ 458 */
459 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 459 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
460 lp->intmask = NORXflag | RECONflag; 460 lp->intmask = NORXflag | RECONflag;
461 AINTMASK(lp->intmask); 461 AINTMASK(lp->intmask);
462 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 462 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
463 463
464 netif_start_queue(dev); 464 netif_start_queue(dev);
465 465
@@ -670,14 +670,14 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
670 freeskb = 0; 670 freeskb = 0;
671 } 671 }
672 672
673 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 673 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
674 /* make sure we didn't ignore a TX IRQ while we were in here */ 674 /* make sure we didn't ignore a TX IRQ while we were in here */
675 AINTMASK(0); 675 AINTMASK(0);
676 676
677 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 677 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
678 lp->intmask |= TXFREEflag|EXCNAKflag; 678 lp->intmask |= TXFREEflag|EXCNAKflag;
679 AINTMASK(lp->intmask); 679 AINTMASK(lp->intmask);
680 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 680 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS());
681 681
682 spin_unlock_irqrestore(&lp->lock, flags); 682 spin_unlock_irqrestore(&lp->lock, flags);
683 if (freeskb) { 683 if (freeskb) {
@@ -798,7 +798,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
798 diagstatus = (status >> 8) & 0xFF; 798 diagstatus = (status >> 8) & 0xFF;
799 799
800 BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n", 800 BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n",
801 __FILE__,__LINE__,__FUNCTION__,status); 801 __FILE__,__LINE__,__func__,status);
802 didsomething = 0; 802 didsomething = 0;
803 803
804 /* 804 /*
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 8b51313b1300..70124a944e7d 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -238,15 +238,15 @@ static int com20020_reset(struct net_device *dev, int really_reset)
238 u_char inbyte; 238 u_char inbyte;
239 239
240 BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n", 240 BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
241 __FILE__,__LINE__,__FUNCTION__,dev,lp,dev->name); 241 __FILE__,__LINE__,__func__,dev,lp,dev->name);
242 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", 242 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n",
243 dev->name, ASTATUS()); 243 dev->name, ASTATUS());
244 244
245 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 245 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
246 lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2); 246 lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
247 /* power-up defaults */ 247 /* power-up defaults */
248 SETCONF; 248 SETCONF;
249 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 249 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
250 250
251 if (really_reset) { 251 if (really_reset) {
252 /* reset the card */ 252 /* reset the card */
@@ -254,22 +254,22 @@ static int com20020_reset(struct net_device *dev, int really_reset)
254 mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */ 254 mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */
255 } 255 }
256 /* clear flags & end reset */ 256 /* clear flags & end reset */
257 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 257 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
258 ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear); 258 ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
259 259
260 /* verify that the ARCnet signature byte is present */ 260 /* verify that the ARCnet signature byte is present */
261 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 261 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
262 262
263 com20020_copy_from_card(dev, 0, 0, &inbyte, 1); 263 com20020_copy_from_card(dev, 0, 0, &inbyte, 1);
264 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 264 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
265 if (inbyte != TESTvalue) { 265 if (inbyte != TESTvalue) {
266 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 266 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
267 BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n"); 267 BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
268 return 1; 268 return 1;
269 } 269 }
270 /* enable extended (512-byte) packets */ 270 /* enable extended (512-byte) packets */
271 ACOMMAND(CONFIGcmd | EXTconf); 271 ACOMMAND(CONFIGcmd | EXTconf);
272 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 272 BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
273 273
274 /* done! return success. */ 274 /* done! return success. */
275 return 0; 275 return 0;
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 949e75358bf0..8cbc1b59bd62 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -397,7 +397,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
397 */ 397 */
398int atl1e_phy_commit(struct atl1e_hw *hw) 398int atl1e_phy_commit(struct atl1e_hw *hw)
399{ 399{
400 struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter; 400 struct atl1e_adapter *adapter = hw->adapter;
401 struct pci_dev *pdev = adapter->pdev; 401 struct pci_dev *pdev = adapter->pdev;
402 int ret_val; 402 int ret_val;
403 u16 phy_data; 403 u16 phy_data;
@@ -431,7 +431,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
431 431
432int atl1e_phy_init(struct atl1e_hw *hw) 432int atl1e_phy_init(struct atl1e_hw *hw)
433{ 433{
434 struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter; 434 struct atl1e_adapter *adapter = hw->adapter;
435 struct pci_dev *pdev = adapter->pdev; 435 struct pci_dev *pdev = adapter->pdev;
436 s32 ret_val; 436 s32 ret_val;
437 u16 phy_val; 437 u16 phy_val;
@@ -525,7 +525,7 @@ int atl1e_phy_init(struct atl1e_hw *hw)
525 */ 525 */
526int atl1e_reset_hw(struct atl1e_hw *hw) 526int atl1e_reset_hw(struct atl1e_hw *hw)
527{ 527{
528 struct atl1e_adapter *adapter = (struct atl1e_adapter *)hw->adapter; 528 struct atl1e_adapter *adapter = hw->adapter;
529 struct pci_dev *pdev = adapter->pdev; 529 struct pci_dev *pdev = adapter->pdev;
530 530
531 u32 idle_status_data = 0; 531 u32 idle_status_data = 0;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 7685b995ff9b..9b603528143d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -2390,9 +2390,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2390 } 2390 }
2391 2391
2392 /* Init GPHY as early as possible due to power saving issue */ 2392 /* Init GPHY as early as possible due to power saving issue */
2393 spin_lock(&adapter->mdio_lock);
2394 atl1e_phy_init(&adapter->hw); 2393 atl1e_phy_init(&adapter->hw);
2395 spin_unlock(&adapter->mdio_lock);
2396 /* reset the controller to 2394 /* reset the controller to
2397 * put the device in a known good starting state */ 2395 * put the device in a known good starting state */
2398 err = atl1e_reset_hw(&adapter->hw); 2396 err = atl1e_reset_hw(&adapter->hw);
diff --git a/drivers/net/atlx/Makefile b/drivers/net/atlx/Makefile
index ca45553a040d..e4f6022ca552 100644
--- a/drivers/net/atlx/Makefile
+++ b/drivers/net/atlx/Makefile
@@ -1 +1,3 @@
1obj-$(CONFIG_ATL1) += atl1.o 1obj-$(CONFIG_ATL1) += atl1.o
2obj-$(CONFIG_ATL2) += atl2.o
3
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
new file mode 100644
index 000000000000..5ab9c7631002
--- /dev/null
+++ b/drivers/net/atlx/atl2.c
@@ -0,0 +1,3129 @@
1/*
2 * Copyright(c) 2006 - 2007 Atheros Corporation. All rights reserved.
3 * Copyright(c) 2007 - 2008 Chris Snook <csnook@redhat.com>
4 *
5 * Derived from Intel e1000 driver
6 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <asm/atomic.h>
24#include <linux/crc32.h>
25#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/ethtool.h>
28#include <linux/hardirq.h>
29#include <linux/if_vlan.h>
30#include <linux/in.h>
31#include <linux/interrupt.h>
32#include <linux/ip.h>
33#include <linux/irqflags.h>
34#include <linux/irqreturn.h>
35#include <linux/mii.h>
36#include <linux/net.h>
37#include <linux/netdevice.h>
38#include <linux/pci.h>
39#include <linux/pci_ids.h>
40#include <linux/pm.h>
41#include <linux/skbuff.h>
42#include <linux/spinlock.h>
43#include <linux/string.h>
44#include <linux/tcp.h>
45#include <linux/timer.h>
46#include <linux/types.h>
47#include <linux/workqueue.h>
48
49#include "atl2.h"
50
51#define ATL2_DRV_VERSION "2.2.3"
52
53static char atl2_driver_name[] = "atl2";
54static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
55static char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
56static char atl2_driver_version[] = ATL2_DRV_VERSION;
57
58MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
59MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
60MODULE_LICENSE("GPL");
61MODULE_VERSION(ATL2_DRV_VERSION);
62
63/*
64 * atl2_pci_tbl - PCI Device ID Table
65 */
66static struct pci_device_id atl2_pci_tbl[] = {
67 {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
68 /* required last entry */
69 {0,}
70};
71MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
72
73static void atl2_set_ethtool_ops(struct net_device *netdev);
74
75static void atl2_check_options(struct atl2_adapter *adapter);
76
77/*
78 * atl2_sw_init - Initialize general software structures (struct atl2_adapter)
79 * @adapter: board private structure to initialize
80 *
81 * atl2_sw_init initializes the Adapter private data structure.
82 * Fields are initialized based on PCI device information and
83 * OS network device settings (MTU size).
84 */
85static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
86{
87 struct atl2_hw *hw = &adapter->hw;
88 struct pci_dev *pdev = adapter->pdev;
89
90 /* PCI config space info */
91 hw->vendor_id = pdev->vendor;
92 hw->device_id = pdev->device;
93 hw->subsystem_vendor_id = pdev->subsystem_vendor;
94 hw->subsystem_id = pdev->subsystem_device;
95
96 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
97 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
98
99 adapter->wol = 0;
100 adapter->ict = 50000; /* ~100ms */
101 adapter->link_speed = SPEED_0; /* hardware init */
102 adapter->link_duplex = FULL_DUPLEX;
103
104 hw->phy_configured = false;
105 hw->preamble_len = 7;
106 hw->ipgt = 0x60;
107 hw->min_ifg = 0x50;
108 hw->ipgr1 = 0x40;
109 hw->ipgr2 = 0x60;
110 hw->retry_buf = 2;
111 hw->max_retry = 0xf;
112 hw->lcol = 0x37;
113 hw->jam_ipg = 7;
114 hw->fc_rxd_hi = 0;
115 hw->fc_rxd_lo = 0;
116 hw->max_frame_size = adapter->netdev->mtu;
117
118 spin_lock_init(&adapter->stats_lock);
119 spin_lock_init(&adapter->tx_lock);
120
121 set_bit(__ATL2_DOWN, &adapter->flags);
122
123 return 0;
124}
125
126/*
127 * atl2_set_multi - Multicast and Promiscuous mode set
128 * @netdev: network interface device structure
129 *
130 * The set_multi entry point is called whenever the multicast address
131 * list or the network interface flags are updated. This routine is
132 * responsible for configuring the hardware for proper multicast,
133 * promiscuous mode, and all-multi behavior.
134 */
135static void atl2_set_multi(struct net_device *netdev)
136{
137 struct atl2_adapter *adapter = netdev_priv(netdev);
138 struct atl2_hw *hw = &adapter->hw;
139 struct dev_mc_list *mc_ptr;
140 u32 rctl;
141 u32 hash_value;
142
143 /* Check for Promiscuous and All Multicast modes */
144 rctl = ATL2_READ_REG(hw, REG_MAC_CTRL);
145
146 if (netdev->flags & IFF_PROMISC) {
147 rctl |= MAC_CTRL_PROMIS_EN;
148 } else if (netdev->flags & IFF_ALLMULTI) {
149 rctl |= MAC_CTRL_MC_ALL_EN;
150 rctl &= ~MAC_CTRL_PROMIS_EN;
151 } else
152 rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
153
154 ATL2_WRITE_REG(hw, REG_MAC_CTRL, rctl);
155
156 /* clear the old settings from the multicast hash table */
157 ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
158 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
159
160 /* comoute mc addresses' hash value ,and put it into hash table */
161 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
162 hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
163 atl2_hash_set(hw, hash_value);
164 }
165}
166
167static void init_ring_ptrs(struct atl2_adapter *adapter)
168{
169 /* Read / Write Ptr Initialize: */
170 adapter->txd_write_ptr = 0;
171 atomic_set(&adapter->txd_read_ptr, 0);
172
173 adapter->rxd_read_ptr = 0;
174 adapter->rxd_write_ptr = 0;
175
176 atomic_set(&adapter->txs_write_ptr, 0);
177 adapter->txs_next_clear = 0;
178}
179
180/*
181 * atl2_configure - Configure Transmit&Receive Unit after Reset
182 * @adapter: board private structure
183 *
184 * Configure the Tx /Rx unit of the MAC after a reset.
185 */
186static int atl2_configure(struct atl2_adapter *adapter)
187{
188 struct atl2_hw *hw = &adapter->hw;
189 u32 value;
190
191 /* clear interrupt status */
192 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0xffffffff);
193
194 /* set MAC Address */
195 value = (((u32)hw->mac_addr[2]) << 24) |
196 (((u32)hw->mac_addr[3]) << 16) |
197 (((u32)hw->mac_addr[4]) << 8) |
198 (((u32)hw->mac_addr[5]));
199 ATL2_WRITE_REG(hw, REG_MAC_STA_ADDR, value);
200 value = (((u32)hw->mac_addr[0]) << 8) |
201 (((u32)hw->mac_addr[1]));
202 ATL2_WRITE_REG(hw, (REG_MAC_STA_ADDR+4), value);
203
204 /* HI base address */
205 ATL2_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI,
206 (u32)((adapter->ring_dma & 0xffffffff00000000ULL) >> 32));
207
208 /* LO base address */
209 ATL2_WRITE_REG(hw, REG_TXD_BASE_ADDR_LO,
210 (u32)(adapter->txd_dma & 0x00000000ffffffffULL));
211 ATL2_WRITE_REG(hw, REG_TXS_BASE_ADDR_LO,
212 (u32)(adapter->txs_dma & 0x00000000ffffffffULL));
213 ATL2_WRITE_REG(hw, REG_RXD_BASE_ADDR_LO,
214 (u32)(adapter->rxd_dma & 0x00000000ffffffffULL));
215
216 /* element count */
217 ATL2_WRITE_REGW(hw, REG_TXD_MEM_SIZE, (u16)(adapter->txd_ring_size/4));
218 ATL2_WRITE_REGW(hw, REG_TXS_MEM_SIZE, (u16)adapter->txs_ring_size);
219 ATL2_WRITE_REGW(hw, REG_RXD_BUF_NUM, (u16)adapter->rxd_ring_size);
220
221 /* config Internal SRAM */
222/*
223 ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_tx_end);
224 ATL2_WRITE_REGW(hw, REG_SRAM_TXRAM_END, sram_rx_end);
225*/
226
227 /* config IPG/IFG */
228 value = (((u32)hw->ipgt & MAC_IPG_IFG_IPGT_MASK) <<
229 MAC_IPG_IFG_IPGT_SHIFT) |
230 (((u32)hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) <<
231 MAC_IPG_IFG_MIFG_SHIFT) |
232 (((u32)hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) <<
233 MAC_IPG_IFG_IPGR1_SHIFT)|
234 (((u32)hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) <<
235 MAC_IPG_IFG_IPGR2_SHIFT);
236 ATL2_WRITE_REG(hw, REG_MAC_IPG_IFG, value);
237
238 /* config Half-Duplex Control */
239 value = ((u32)hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
240 (((u32)hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) <<
241 MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
242 MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
243 (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
244 (((u32)hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) <<
245 MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
246 ATL2_WRITE_REG(hw, REG_MAC_HALF_DUPLX_CTRL, value);
247
248 /* set Interrupt Moderator Timer */
249 ATL2_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, adapter->imt);
250 ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_ITIMER_EN);
251
252 /* set Interrupt Clear Timer */
253 ATL2_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, adapter->ict);
254
255 /* set MTU */
256 ATL2_WRITE_REG(hw, REG_MTU, adapter->netdev->mtu +
257 ENET_HEADER_SIZE + VLAN_SIZE + ETHERNET_FCS_SIZE);
258
259 /* 1590 */
260 ATL2_WRITE_REG(hw, REG_TX_CUT_THRESH, 0x177);
261
262 /* flow control */
263 ATL2_WRITE_REGW(hw, REG_PAUSE_ON_TH, hw->fc_rxd_hi);
264 ATL2_WRITE_REGW(hw, REG_PAUSE_OFF_TH, hw->fc_rxd_lo);
265
266 /* Init mailbox */
267 ATL2_WRITE_REGW(hw, REG_MB_TXD_WR_IDX, (u16)adapter->txd_write_ptr);
268 ATL2_WRITE_REGW(hw, REG_MB_RXD_RD_IDX, (u16)adapter->rxd_read_ptr);
269
270 /* enable DMA read/write */
271 ATL2_WRITE_REGB(hw, REG_DMAR, DMAR_EN);
272 ATL2_WRITE_REGB(hw, REG_DMAW, DMAW_EN);
273
274 value = ATL2_READ_REG(&adapter->hw, REG_ISR);
275 if ((value & ISR_PHY_LINKDOWN) != 0)
276 value = 1; /* config failed */
277 else
278 value = 0;
279
280 /* clear all interrupt status */
281 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0x3fffffff);
282 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
283 return value;
284}
285
286/*
287 * atl2_setup_ring_resources - allocate Tx / RX descriptor resources
288 * @adapter: board private structure
289 *
290 * Return 0 on success, negative on failure
291 */
292static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
293{
294 struct pci_dev *pdev = adapter->pdev;
295 int size;
296 u8 offset = 0;
297
298 /* real ring DMA buffer */
299 adapter->ring_size = size =
300 adapter->txd_ring_size * 1 + 7 + /* dword align */
301 adapter->txs_ring_size * 4 + 7 + /* dword align */
302 adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */
303
304 adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
305 &adapter->ring_dma);
306 if (!adapter->ring_vir_addr)
307 return -ENOMEM;
308 memset(adapter->ring_vir_addr, 0, adapter->ring_size);
309
310 /* Init TXD Ring */
311 adapter->txd_dma = adapter->ring_dma ;
312 offset = (adapter->txd_dma & 0x7) ? (8 - (adapter->txd_dma & 0x7)) : 0;
313 adapter->txd_dma += offset;
314 adapter->txd_ring = (struct tx_pkt_header *) (adapter->ring_vir_addr +
315 offset);
316
317 /* Init TXS Ring */
318 adapter->txs_dma = adapter->txd_dma + adapter->txd_ring_size;
319 offset = (adapter->txs_dma & 0x7) ? (8 - (adapter->txs_dma & 0x7)) : 0;
320 adapter->txs_dma += offset;
321 adapter->txs_ring = (struct tx_pkt_status *)
322 (((u8 *)adapter->txd_ring) + (adapter->txd_ring_size + offset));
323
324 /* Init RXD Ring */
325 adapter->rxd_dma = adapter->txs_dma + adapter->txs_ring_size * 4;
326 offset = (adapter->rxd_dma & 127) ?
327 (128 - (adapter->rxd_dma & 127)) : 0;
328 if (offset > 7)
329 offset -= 8;
330 else
331 offset += (128 - 8);
332
333 adapter->rxd_dma += offset;
334 adapter->rxd_ring = (struct rx_desc *) (((u8 *)adapter->txs_ring) +
335 (adapter->txs_ring_size * 4 + offset));
336
337/*
338 * Read / Write Ptr Initialize:
339 * init_ring_ptrs(adapter);
340 */
341 return 0;
342}
343
344/*
345 * atl2_irq_enable - Enable default interrupt generation settings
346 * @adapter: board private structure
347 */
348static inline void atl2_irq_enable(struct atl2_adapter *adapter)
349{
350 ATL2_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK);
351 ATL2_WRITE_FLUSH(&adapter->hw);
352}
353
354/*
355 * atl2_irq_disable - Mask off interrupt generation on the NIC
356 * @adapter: board private structure
357 */
358static inline void atl2_irq_disable(struct atl2_adapter *adapter)
359{
360 ATL2_WRITE_REG(&adapter->hw, REG_IMR, 0);
361 ATL2_WRITE_FLUSH(&adapter->hw);
362 synchronize_irq(adapter->pdev->irq);
363}
364
365#ifdef NETIF_F_HW_VLAN_TX
366static void atl2_vlan_rx_register(struct net_device *netdev,
367 struct vlan_group *grp)
368{
369 struct atl2_adapter *adapter = netdev_priv(netdev);
370 u32 ctrl;
371
372 atl2_irq_disable(adapter);
373 adapter->vlgrp = grp;
374
375 if (grp) {
376 /* enable VLAN tag insert/strip */
377 ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
378 ctrl |= MAC_CTRL_RMV_VLAN;
379 ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
380 } else {
381 /* disable VLAN tag insert/strip */
382 ctrl = ATL2_READ_REG(&adapter->hw, REG_MAC_CTRL);
383 ctrl &= ~MAC_CTRL_RMV_VLAN;
384 ATL2_WRITE_REG(&adapter->hw, REG_MAC_CTRL, ctrl);
385 }
386
387 atl2_irq_enable(adapter);
388}
389
390static void atl2_restore_vlan(struct atl2_adapter *adapter)
391{
392 atl2_vlan_rx_register(adapter->netdev, adapter->vlgrp);
393}
394#endif
395
396static void atl2_intr_rx(struct atl2_adapter *adapter)
397{
398 struct net_device *netdev = adapter->netdev;
399 struct rx_desc *rxd;
400 struct sk_buff *skb;
401
402 do {
403 rxd = adapter->rxd_ring+adapter->rxd_write_ptr;
404 if (!rxd->status.update)
405 break; /* end of tx */
406
407 /* clear this flag at once */
408 rxd->status.update = 0;
409
410 if (rxd->status.ok && rxd->status.pkt_size >= 60) {
411 int rx_size = (int)(rxd->status.pkt_size - 4);
412 /* alloc new buffer */
413 skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN);
414 if (NULL == skb) {
415 printk(KERN_WARNING
416 "%s: Mem squeeze, deferring packet.\n",
417 netdev->name);
418 /*
419 * Check that some rx space is free. If not,
420 * free one and mark stats->rx_dropped++.
421 */
422 adapter->net_stats.rx_dropped++;
423 break;
424 }
425 skb_reserve(skb, NET_IP_ALIGN);
426 skb->dev = netdev;
427 memcpy(skb->data, rxd->packet, rx_size);
428 skb_put(skb, rx_size);
429 skb->protocol = eth_type_trans(skb, netdev);
430#ifdef NETIF_F_HW_VLAN_TX
431 if (adapter->vlgrp && (rxd->status.vlan)) {
432 u16 vlan_tag = (rxd->status.vtag>>4) |
433 ((rxd->status.vtag&7) << 13) |
434 ((rxd->status.vtag&8) << 9);
435 vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
436 } else
437#endif
438 netif_rx(skb);
439 adapter->net_stats.rx_bytes += rx_size;
440 adapter->net_stats.rx_packets++;
441 netdev->last_rx = jiffies;
442 } else {
443 adapter->net_stats.rx_errors++;
444
445 if (rxd->status.ok && rxd->status.pkt_size <= 60)
446 adapter->net_stats.rx_length_errors++;
447 if (rxd->status.mcast)
448 adapter->net_stats.multicast++;
449 if (rxd->status.crc)
450 adapter->net_stats.rx_crc_errors++;
451 if (rxd->status.align)
452 adapter->net_stats.rx_frame_errors++;
453 }
454
455 /* advance write ptr */
456 if (++adapter->rxd_write_ptr == adapter->rxd_ring_size)
457 adapter->rxd_write_ptr = 0;
458 } while (1);
459
460 /* update mailbox? */
461 adapter->rxd_read_ptr = adapter->rxd_write_ptr;
462 ATL2_WRITE_REGW(&adapter->hw, REG_MB_RXD_RD_IDX, adapter->rxd_read_ptr);
463}
464
465static void atl2_intr_tx(struct atl2_adapter *adapter)
466{
467 u32 txd_read_ptr;
468 u32 txs_write_ptr;
469 struct tx_pkt_status *txs;
470 struct tx_pkt_header *txph;
471 int free_hole = 0;
472
473 do {
474 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
475 txs = adapter->txs_ring + txs_write_ptr;
476 if (!txs->update)
477 break; /* tx stop here */
478
479 free_hole = 1;
480 txs->update = 0;
481
482 if (++txs_write_ptr == adapter->txs_ring_size)
483 txs_write_ptr = 0;
484 atomic_set(&adapter->txs_write_ptr, (int)txs_write_ptr);
485
486 txd_read_ptr = (u32) atomic_read(&adapter->txd_read_ptr);
487 txph = (struct tx_pkt_header *)
488 (((u8 *)adapter->txd_ring) + txd_read_ptr);
489
490 if (txph->pkt_size != txs->pkt_size) {
491 struct tx_pkt_status *old_txs = txs;
492 printk(KERN_WARNING
493 "%s: txs packet size not consistent with txd"
494 " txd_:0x%08x, txs_:0x%08x!\n",
495 adapter->netdev->name,
496 *(u32 *)txph, *(u32 *)txs);
497 printk(KERN_WARNING
498 "txd read ptr: 0x%x\n",
499 txd_read_ptr);
500 txs = adapter->txs_ring + txs_write_ptr;
501 printk(KERN_WARNING
502 "txs-behind:0x%08x\n",
503 *(u32 *)txs);
504 if (txs_write_ptr < 2) {
505 txs = adapter->txs_ring +
506 (adapter->txs_ring_size +
507 txs_write_ptr - 2);
508 } else {
509 txs = adapter->txs_ring + (txs_write_ptr - 2);
510 }
511 printk(KERN_WARNING
512 "txs-before:0x%08x\n",
513 *(u32 *)txs);
514 txs = old_txs;
515 }
516
517 /* 4for TPH */
518 txd_read_ptr += (((u32)(txph->pkt_size) + 7) & ~3);
519 if (txd_read_ptr >= adapter->txd_ring_size)
520 txd_read_ptr -= adapter->txd_ring_size;
521
522 atomic_set(&adapter->txd_read_ptr, (int)txd_read_ptr);
523
524 /* tx statistics: */
525 if (txs->ok) {
526 adapter->net_stats.tx_bytes += txs->pkt_size;
527 adapter->net_stats.tx_packets++;
528 }
529 else
530 adapter->net_stats.tx_errors++;
531
532 if (txs->defer)
533 adapter->net_stats.collisions++;
534 if (txs->abort_col)
535 adapter->net_stats.tx_aborted_errors++;
536 if (txs->late_col)
537 adapter->net_stats.tx_window_errors++;
538 if (txs->underun)
539 adapter->net_stats.tx_fifo_errors++;
540 } while (1);
541
542 if (free_hole) {
543 if (netif_queue_stopped(adapter->netdev) &&
544 netif_carrier_ok(adapter->netdev))
545 netif_wake_queue(adapter->netdev);
546 }
547}
548
549static void atl2_check_for_link(struct atl2_adapter *adapter)
550{
551 struct net_device *netdev = adapter->netdev;
552 u16 phy_data = 0;
553
554 spin_lock(&adapter->stats_lock);
555 atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
556 atl2_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
557 spin_unlock(&adapter->stats_lock);
558
559 /* notify upper layer link down ASAP */
560 if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
561 if (netif_carrier_ok(netdev)) { /* old link state: Up */
562 printk(KERN_INFO "%s: %s NIC Link is Down\n",
563 atl2_driver_name, netdev->name);
564 adapter->link_speed = SPEED_0;
565 netif_carrier_off(netdev);
566 netif_stop_queue(netdev);
567 }
568 }
569 schedule_work(&adapter->link_chg_task);
570}
571
572static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
573{
574 u16 phy_data;
575 spin_lock(&adapter->stats_lock);
576 atl2_read_phy_reg(&adapter->hw, 19, &phy_data);
577 spin_unlock(&adapter->stats_lock);
578}
579
580/*
581 * atl2_intr - Interrupt Handler
582 * @irq: interrupt number
583 * @data: pointer to a network interface device structure
584 * @pt_regs: CPU registers structure
585 */
586static irqreturn_t atl2_intr(int irq, void *data)
587{
588 struct atl2_adapter *adapter = netdev_priv(data);
589 struct atl2_hw *hw = &adapter->hw;
590 u32 status;
591
592 status = ATL2_READ_REG(hw, REG_ISR);
593 if (0 == status)
594 return IRQ_NONE;
595
596 /* link event */
597 if (status & ISR_PHY)
598 atl2_clear_phy_int(adapter);
599
600 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
601 ATL2_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
602
603 /* check if PCIE PHY Link down */
604 if (status & ISR_PHY_LINKDOWN) {
605 if (netif_running(adapter->netdev)) { /* reset MAC */
606 ATL2_WRITE_REG(hw, REG_ISR, 0);
607 ATL2_WRITE_REG(hw, REG_IMR, 0);
608 ATL2_WRITE_FLUSH(hw);
609 schedule_work(&adapter->reset_task);
610 return IRQ_HANDLED;
611 }
612 }
613
614 /* check if DMA read/write error? */
615 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
616 ATL2_WRITE_REG(hw, REG_ISR, 0);
617 ATL2_WRITE_REG(hw, REG_IMR, 0);
618 ATL2_WRITE_FLUSH(hw);
619 schedule_work(&adapter->reset_task);
620 return IRQ_HANDLED;
621 }
622
623 /* link event */
624 if (status & (ISR_PHY | ISR_MANUAL)) {
625 adapter->net_stats.tx_carrier_errors++;
626 atl2_check_for_link(adapter);
627 }
628
629 /* transmit event */
630 if (status & ISR_TX_EVENT)
631 atl2_intr_tx(adapter);
632
633 /* rx exception */
634 if (status & ISR_RX_EVENT)
635 atl2_intr_rx(adapter);
636
637 /* re-enable Interrupt */
638 ATL2_WRITE_REG(&adapter->hw, REG_ISR, 0);
639 return IRQ_HANDLED;
640}
641
642static int atl2_request_irq(struct atl2_adapter *adapter)
643{
644 struct net_device *netdev = adapter->netdev;
645 int flags, err = 0;
646
647 flags = IRQF_SHARED;
648#ifdef CONFIG_PCI_MSI
649 adapter->have_msi = true;
650 err = pci_enable_msi(adapter->pdev);
651 if (err)
652 adapter->have_msi = false;
653
654 if (adapter->have_msi)
655 flags &= ~IRQF_SHARED;
656#endif
657
658 return request_irq(adapter->pdev->irq, &atl2_intr, flags, netdev->name,
659 netdev);
660}
661
662/*
663 * atl2_free_ring_resources - Free Tx / RX descriptor Resources
664 * @adapter: board private structure
665 *
666 * Free all transmit software resources
667 */
668static void atl2_free_ring_resources(struct atl2_adapter *adapter)
669{
670 struct pci_dev *pdev = adapter->pdev;
671 pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
672 adapter->ring_dma);
673}
674
675/*
676 * atl2_open - Called when a network interface is made active
677 * @netdev: network interface device structure
678 *
679 * Returns 0 on success, negative value on failure
680 *
681 * The open entry point is called when a network interface is made
682 * active by the system (IFF_UP). At this point all resources needed
683 * for transmit and receive operations are allocated, the interrupt
684 * handler is registered with the OS, the watchdog timer is started,
685 * and the stack is notified that the interface is ready.
686 */
687static int atl2_open(struct net_device *netdev)
688{
689 struct atl2_adapter *adapter = netdev_priv(netdev);
690 int err;
691 u32 val;
692
693 /* disallow open during test */
694 if (test_bit(__ATL2_TESTING, &adapter->flags))
695 return -EBUSY;
696
697 /* allocate transmit descriptors */
698 err = atl2_setup_ring_resources(adapter);
699 if (err)
700 return err;
701
702 err = atl2_init_hw(&adapter->hw);
703 if (err) {
704 err = -EIO;
705 goto err_init_hw;
706 }
707
708 /* hardware has been reset, we need to reload some things */
709 atl2_set_multi(netdev);
710 init_ring_ptrs(adapter);
711
712#ifdef NETIF_F_HW_VLAN_TX
713 atl2_restore_vlan(adapter);
714#endif
715
716 if (atl2_configure(adapter)) {
717 err = -EIO;
718 goto err_config;
719 }
720
721 err = atl2_request_irq(adapter);
722 if (err)
723 goto err_req_irq;
724
725 clear_bit(__ATL2_DOWN, &adapter->flags);
726
727 mod_timer(&adapter->watchdog_timer, jiffies + 4*HZ);
728
729 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
730 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
731 val | MASTER_CTRL_MANUAL_INT);
732
733 atl2_irq_enable(adapter);
734
735 return 0;
736
737err_init_hw:
738err_req_irq:
739err_config:
740 atl2_free_ring_resources(adapter);
741 atl2_reset_hw(&adapter->hw);
742
743 return err;
744}
745
746static void atl2_down(struct atl2_adapter *adapter)
747{
748 struct net_device *netdev = adapter->netdev;
749
750 /* signal that we're down so the interrupt handler does not
751 * reschedule our watchdog timer */
752 set_bit(__ATL2_DOWN, &adapter->flags);
753
754#ifdef NETIF_F_LLTX
755 netif_stop_queue(netdev);
756#else
757 netif_tx_disable(netdev);
758#endif
759
760 /* reset MAC to disable all RX/TX */
761 atl2_reset_hw(&adapter->hw);
762 msleep(1);
763
764 atl2_irq_disable(adapter);
765
766 del_timer_sync(&adapter->watchdog_timer);
767 del_timer_sync(&adapter->phy_config_timer);
768 clear_bit(0, &adapter->cfg_phy);
769
770 netif_carrier_off(netdev);
771 adapter->link_speed = SPEED_0;
772 adapter->link_duplex = -1;
773}
774
775static void atl2_free_irq(struct atl2_adapter *adapter)
776{
777 struct net_device *netdev = adapter->netdev;
778
779 free_irq(adapter->pdev->irq, netdev);
780
781#ifdef CONFIG_PCI_MSI
782 if (adapter->have_msi)
783 pci_disable_msi(adapter->pdev);
784#endif
785}
786
787/*
788 * atl2_close - Disables a network interface
789 * @netdev: network interface device structure
790 *
791 * Returns 0, this is not allowed to fail
792 *
793 * The close entry point is called when an interface is de-activated
794 * by the OS. The hardware is still under the drivers control, but
795 * needs to be disabled. A global MAC reset is issued to stop the
796 * hardware, and all transmit and receive resources are freed.
797 */
798static int atl2_close(struct net_device *netdev)
799{
800 struct atl2_adapter *adapter = netdev_priv(netdev);
801
802 WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
803
804 atl2_down(adapter);
805 atl2_free_irq(adapter);
806 atl2_free_ring_resources(adapter);
807
808 return 0;
809}
810
811static inline int TxsFreeUnit(struct atl2_adapter *adapter)
812{
813 u32 txs_write_ptr = (u32) atomic_read(&adapter->txs_write_ptr);
814
815 return (adapter->txs_next_clear >= txs_write_ptr) ?
816 (int) (adapter->txs_ring_size - adapter->txs_next_clear +
817 txs_write_ptr - 1) :
818 (int) (txs_write_ptr - adapter->txs_next_clear - 1);
819}
820
821static inline int TxdFreeBytes(struct atl2_adapter *adapter)
822{
823 u32 txd_read_ptr = (u32)atomic_read(&adapter->txd_read_ptr);
824
825 return (adapter->txd_write_ptr >= txd_read_ptr) ?
826 (int) (adapter->txd_ring_size - adapter->txd_write_ptr +
827 txd_read_ptr - 1) :
828 (int) (txd_read_ptr - adapter->txd_write_ptr - 1);
829}
830
831static int atl2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
832{
833 struct atl2_adapter *adapter = netdev_priv(netdev);
834 unsigned long flags;
835 struct tx_pkt_header *txph;
836 u32 offset, copy_len;
837 int txs_unused;
838 int txbuf_unused;
839
840 if (test_bit(__ATL2_DOWN, &adapter->flags)) {
841 dev_kfree_skb_any(skb);
842 return NETDEV_TX_OK;
843 }
844
845 if (unlikely(skb->len <= 0)) {
846 dev_kfree_skb_any(skb);
847 return NETDEV_TX_OK;
848 }
849
850#ifdef NETIF_F_LLTX
851 local_irq_save(flags);
852 if (!spin_trylock(&adapter->tx_lock)) {
853 /* Collision - tell upper layer to requeue */
854 local_irq_restore(flags);
855 return NETDEV_TX_LOCKED;
856 }
857#else
858 spin_lock_irqsave(&adapter->tx_lock, flags);
859#endif
860 txs_unused = TxsFreeUnit(adapter);
861 txbuf_unused = TxdFreeBytes(adapter);
862
863 if (skb->len + sizeof(struct tx_pkt_header) + 4 > txbuf_unused ||
864 txs_unused < 1) {
865 /* not enough resources */
866 netif_stop_queue(netdev);
867 spin_unlock_irqrestore(&adapter->tx_lock, flags);
868 return NETDEV_TX_BUSY;
869 }
870
871 offset = adapter->txd_write_ptr;
872
873 txph = (struct tx_pkt_header *) (((u8 *)adapter->txd_ring) + offset);
874
875 *(u32 *)txph = 0;
876 txph->pkt_size = skb->len;
877
878 offset += 4;
879 if (offset >= adapter->txd_ring_size)
880 offset -= adapter->txd_ring_size;
881 copy_len = adapter->txd_ring_size - offset;
882 if (copy_len >= skb->len) {
883 memcpy(((u8 *)adapter->txd_ring) + offset, skb->data, skb->len);
884 offset += ((u32)(skb->len + 3) & ~3);
885 } else {
886 memcpy(((u8 *)adapter->txd_ring)+offset, skb->data, copy_len);
887 memcpy((u8 *)adapter->txd_ring, skb->data+copy_len,
888 skb->len-copy_len);
889 offset = ((u32)(skb->len-copy_len + 3) & ~3);
890 }
891#ifdef NETIF_F_HW_VLAN_TX
892 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
893 u16 vlan_tag = vlan_tx_tag_get(skb);
894 vlan_tag = (vlan_tag << 4) |
895 (vlan_tag >> 13) |
896 ((vlan_tag >> 9) & 0x8);
897 txph->ins_vlan = 1;
898 txph->vlan = vlan_tag;
899 }
900#endif
901 if (offset >= adapter->txd_ring_size)
902 offset -= adapter->txd_ring_size;
903 adapter->txd_write_ptr = offset;
904
905 /* clear txs before send */
906 adapter->txs_ring[adapter->txs_next_clear].update = 0;
907 if (++adapter->txs_next_clear == adapter->txs_ring_size)
908 adapter->txs_next_clear = 0;
909
910 ATL2_WRITE_REGW(&adapter->hw, REG_MB_TXD_WR_IDX,
911 (adapter->txd_write_ptr >> 2));
912
913 spin_unlock_irqrestore(&adapter->tx_lock, flags);
914
915 netdev->trans_start = jiffies;
916 dev_kfree_skb_any(skb);
917 return NETDEV_TX_OK;
918}
919
920/*
921 * atl2_get_stats - Get System Network Statistics
922 * @netdev: network interface device structure
923 *
924 * Returns the address of the device statistics structure.
925 * The statistics are actually updated from the timer callback.
926 */
927static struct net_device_stats *atl2_get_stats(struct net_device *netdev)
928{
929 struct atl2_adapter *adapter = netdev_priv(netdev);
930 return &adapter->net_stats;
931}
932
933/*
934 * atl2_change_mtu - Change the Maximum Transfer Unit
935 * @netdev: network interface device structure
936 * @new_mtu: new value for maximum frame size
937 *
938 * Returns 0 on success, negative on failure
939 */
940static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
941{
942 struct atl2_adapter *adapter = netdev_priv(netdev);
943 struct atl2_hw *hw = &adapter->hw;
944
945 if ((new_mtu < 40) || (new_mtu > (ETH_DATA_LEN + VLAN_SIZE)))
946 return -EINVAL;
947
948 /* set MTU */
949 if (hw->max_frame_size != new_mtu) {
950 netdev->mtu = new_mtu;
951 ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ENET_HEADER_SIZE +
952 VLAN_SIZE + ETHERNET_FCS_SIZE);
953 }
954
955 return 0;
956}
957
958/*
959 * atl2_set_mac - Change the Ethernet Address of the NIC
960 * @netdev: network interface device structure
961 * @p: pointer to an address structure
962 *
963 * Returns 0 on success, negative on failure
964 */
965static int atl2_set_mac(struct net_device *netdev, void *p)
966{
967 struct atl2_adapter *adapter = netdev_priv(netdev);
968 struct sockaddr *addr = p;
969
970 if (!is_valid_ether_addr(addr->sa_data))
971 return -EADDRNOTAVAIL;
972
973 if (netif_running(netdev))
974 return -EBUSY;
975
976 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
977 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
978
979 atl2_set_mac_addr(&adapter->hw);
980
981 return 0;
982}
983
984/*
985 * atl2_mii_ioctl -
986 * @netdev:
987 * @ifreq:
988 * @cmd:
989 */
990static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
991{
992 struct atl2_adapter *adapter = netdev_priv(netdev);
993 struct mii_ioctl_data *data = if_mii(ifr);
994 unsigned long flags;
995
996 switch (cmd) {
997 case SIOCGMIIPHY:
998 data->phy_id = 0;
999 break;
1000 case SIOCGMIIREG:
1001 if (!capable(CAP_NET_ADMIN))
1002 return -EPERM;
1003 spin_lock_irqsave(&adapter->stats_lock, flags);
1004 if (atl2_read_phy_reg(&adapter->hw,
1005 data->reg_num & 0x1F, &data->val_out)) {
1006 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1007 return -EIO;
1008 }
1009 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1010 break;
1011 case SIOCSMIIREG:
1012 if (!capable(CAP_NET_ADMIN))
1013 return -EPERM;
1014 if (data->reg_num & ~(0x1F))
1015 return -EFAULT;
1016 spin_lock_irqsave(&adapter->stats_lock, flags);
1017 if (atl2_write_phy_reg(&adapter->hw, data->reg_num,
1018 data->val_in)) {
1019 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1020 return -EIO;
1021 }
1022 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1023 break;
1024 default:
1025 return -EOPNOTSUPP;
1026 }
1027 return 0;
1028}
1029
1030/*
1031 * atl2_ioctl -
1032 * @netdev:
1033 * @ifreq:
1034 * @cmd:
1035 */
1036static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1037{
1038 switch (cmd) {
1039 case SIOCGMIIPHY:
1040 case SIOCGMIIREG:
1041 case SIOCSMIIREG:
1042 return atl2_mii_ioctl(netdev, ifr, cmd);
1043#ifdef ETHTOOL_OPS_COMPAT
1044 case SIOCETHTOOL:
1045 return ethtool_ioctl(ifr);
1046#endif
1047 default:
1048 return -EOPNOTSUPP;
1049 }
1050}
1051
1052/*
1053 * atl2_tx_timeout - Respond to a Tx Hang
1054 * @netdev: network interface device structure
1055 */
1056static void atl2_tx_timeout(struct net_device *netdev)
1057{
1058 struct atl2_adapter *adapter = netdev_priv(netdev);
1059
1060 /* Do the reset outside of interrupt context */
1061 schedule_work(&adapter->reset_task);
1062}
1063
1064/*
1065 * atl2_watchdog - Timer Call-back
1066 * @data: pointer to netdev cast into an unsigned long
1067 */
1068static void atl2_watchdog(unsigned long data)
1069{
1070 struct atl2_adapter *adapter = (struct atl2_adapter *) data;
1071 u32 drop_rxd, drop_rxs;
1072 unsigned long flags;
1073
1074 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1075 spin_lock_irqsave(&adapter->stats_lock, flags);
1076 drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV);
1077 drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV);
1078 adapter->net_stats.rx_over_errors += (drop_rxd+drop_rxs);
1079 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1080
1081 /* Reset the timer */
1082 mod_timer(&adapter->watchdog_timer, jiffies + 4 * HZ);
1083 }
1084}
1085
1086/*
1087 * atl2_phy_config - Timer Call-back
1088 * @data: pointer to netdev cast into an unsigned long
1089 */
1090static void atl2_phy_config(unsigned long data)
1091{
1092 struct atl2_adapter *adapter = (struct atl2_adapter *) data;
1093 struct atl2_hw *hw = &adapter->hw;
1094 unsigned long flags;
1095
1096 spin_lock_irqsave(&adapter->stats_lock, flags);
1097 atl2_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1098 atl2_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN |
1099 MII_CR_RESTART_AUTO_NEG);
1100 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1101 clear_bit(0, &adapter->cfg_phy);
1102}
1103
1104static int atl2_up(struct atl2_adapter *adapter)
1105{
1106 struct net_device *netdev = adapter->netdev;
1107 int err = 0;
1108 u32 val;
1109
1110 /* hardware has been reset, we need to reload some things */
1111
1112 err = atl2_init_hw(&adapter->hw);
1113 if (err) {
1114 err = -EIO;
1115 return err;
1116 }
1117
1118 atl2_set_multi(netdev);
1119 init_ring_ptrs(adapter);
1120
1121#ifdef NETIF_F_HW_VLAN_TX
1122 atl2_restore_vlan(adapter);
1123#endif
1124
1125 if (atl2_configure(adapter)) {
1126 err = -EIO;
1127 goto err_up;
1128 }
1129
1130 clear_bit(__ATL2_DOWN, &adapter->flags);
1131
1132 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
1133 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, val |
1134 MASTER_CTRL_MANUAL_INT);
1135
1136 atl2_irq_enable(adapter);
1137
1138err_up:
1139 return err;
1140}
1141
1142static void atl2_reinit_locked(struct atl2_adapter *adapter)
1143{
1144 WARN_ON(in_interrupt());
1145 while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
1146 msleep(1);
1147 atl2_down(adapter);
1148 atl2_up(adapter);
1149 clear_bit(__ATL2_RESETTING, &adapter->flags);
1150}
1151
1152static void atl2_reset_task(struct work_struct *work)
1153{
1154 struct atl2_adapter *adapter;
1155 adapter = container_of(work, struct atl2_adapter, reset_task);
1156
1157 atl2_reinit_locked(adapter);
1158}
1159
1160static void atl2_setup_mac_ctrl(struct atl2_adapter *adapter)
1161{
1162 u32 value;
1163 struct atl2_hw *hw = &adapter->hw;
1164 struct net_device *netdev = adapter->netdev;
1165
1166 /* Config MAC CTRL Register */
1167 value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
1168
1169 /* duplex */
1170 if (FULL_DUPLEX == adapter->link_duplex)
1171 value |= MAC_CTRL_DUPLX;
1172
1173 /* flow control */
1174 value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1175
1176 /* PAD & CRC */
1177 value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1178
1179 /* preamble length */
1180 value |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) <<
1181 MAC_CTRL_PRMLEN_SHIFT);
1182
1183 /* vlan */
1184 if (adapter->vlgrp)
1185 value |= MAC_CTRL_RMV_VLAN;
1186
1187 /* filter mode */
1188 value |= MAC_CTRL_BC_EN;
1189 if (netdev->flags & IFF_PROMISC)
1190 value |= MAC_CTRL_PROMIS_EN;
1191 else if (netdev->flags & IFF_ALLMULTI)
1192 value |= MAC_CTRL_MC_ALL_EN;
1193
1194 /* half retry buffer */
1195 value |= (((u32)(adapter->hw.retry_buf &
1196 MAC_CTRL_HALF_LEFT_BUF_MASK)) << MAC_CTRL_HALF_LEFT_BUF_SHIFT);
1197
1198 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1199}
1200
1201static int atl2_check_link(struct atl2_adapter *adapter)
1202{
1203 struct atl2_hw *hw = &adapter->hw;
1204 struct net_device *netdev = adapter->netdev;
1205 int ret_val;
1206 u16 speed, duplex, phy_data;
1207 int reconfig = 0;
1208
1209 /* MII_BMSR must read twise */
1210 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1211 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1212 if (!(phy_data&BMSR_LSTATUS)) { /* link down */
1213 if (netif_carrier_ok(netdev)) { /* old link state: Up */
1214 u32 value;
1215 /* disable rx */
1216 value = ATL2_READ_REG(hw, REG_MAC_CTRL);
1217 value &= ~MAC_CTRL_RX_EN;
1218 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1219 adapter->link_speed = SPEED_0;
1220 netif_carrier_off(netdev);
1221 netif_stop_queue(netdev);
1222 }
1223 return 0;
1224 }
1225
1226 /* Link Up */
1227 ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
1228 if (ret_val)
1229 return ret_val;
1230 switch (hw->MediaType) {
1231 case MEDIA_TYPE_100M_FULL:
1232 if (speed != SPEED_100 || duplex != FULL_DUPLEX)
1233 reconfig = 1;
1234 break;
1235 case MEDIA_TYPE_100M_HALF:
1236 if (speed != SPEED_100 || duplex != HALF_DUPLEX)
1237 reconfig = 1;
1238 break;
1239 case MEDIA_TYPE_10M_FULL:
1240 if (speed != SPEED_10 || duplex != FULL_DUPLEX)
1241 reconfig = 1;
1242 break;
1243 case MEDIA_TYPE_10M_HALF:
1244 if (speed != SPEED_10 || duplex != HALF_DUPLEX)
1245 reconfig = 1;
1246 break;
1247 }
1248 /* link result is our setting */
1249 if (reconfig == 0) {
1250 if (adapter->link_speed != speed ||
1251 adapter->link_duplex != duplex) {
1252 adapter->link_speed = speed;
1253 adapter->link_duplex = duplex;
1254 atl2_setup_mac_ctrl(adapter);
1255 printk(KERN_INFO "%s: %s NIC Link is Up<%d Mbps %s>\n",
1256 atl2_driver_name, netdev->name,
1257 adapter->link_speed,
1258 adapter->link_duplex == FULL_DUPLEX ?
1259 "Full Duplex" : "Half Duplex");
1260 }
1261
1262 if (!netif_carrier_ok(netdev)) { /* Link down -> Up */
1263 netif_carrier_on(netdev);
1264 netif_wake_queue(netdev);
1265 }
1266 return 0;
1267 }
1268
1269 /* change original link status */
1270 if (netif_carrier_ok(netdev)) {
1271 u32 value;
1272 /* disable rx */
1273 value = ATL2_READ_REG(hw, REG_MAC_CTRL);
1274 value &= ~MAC_CTRL_RX_EN;
1275 ATL2_WRITE_REG(hw, REG_MAC_CTRL, value);
1276
1277 adapter->link_speed = SPEED_0;
1278 netif_carrier_off(netdev);
1279 netif_stop_queue(netdev);
1280 }
1281
1282 /* auto-neg, insert timer to re-config phy
1283 * (if interval smaller than 5 seconds, something strange) */
1284 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1285 if (!test_and_set_bit(0, &adapter->cfg_phy))
1286 mod_timer(&adapter->phy_config_timer, jiffies + 5 * HZ);
1287 }
1288
1289 return 0;
1290}
1291
1292/*
1293 * atl2_link_chg_task - deal with link change event Out of interrupt context
1294 * @netdev: network interface device structure
1295 */
1296static void atl2_link_chg_task(struct work_struct *work)
1297{
1298 struct atl2_adapter *adapter;
1299 unsigned long flags;
1300
1301 adapter = container_of(work, struct atl2_adapter, link_chg_task);
1302
1303 spin_lock_irqsave(&adapter->stats_lock, flags);
1304 atl2_check_link(adapter);
1305 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1306}
1307
1308static void atl2_setup_pcicmd(struct pci_dev *pdev)
1309{
1310 u16 cmd;
1311
1312 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
1313
1314 if (cmd & PCI_COMMAND_INTX_DISABLE)
1315 cmd &= ~PCI_COMMAND_INTX_DISABLE;
1316 if (cmd & PCI_COMMAND_IO)
1317 cmd &= ~PCI_COMMAND_IO;
1318 if (0 == (cmd & PCI_COMMAND_MEMORY))
1319 cmd |= PCI_COMMAND_MEMORY;
1320 if (0 == (cmd & PCI_COMMAND_MASTER))
1321 cmd |= PCI_COMMAND_MASTER;
1322 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1323
1324 /*
1325 * some motherboards BIOS(PXE/EFI) driver may set PME
1326 * while they transfer control to OS (Windows/Linux)
1327 * so we should clear this bit before NIC work normally
1328 */
1329 pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0);
1330}
1331
1332/*
1333 * atl2_probe - Device Initialization Routine
1334 * @pdev: PCI device information struct
1335 * @ent: entry in atl2_pci_tbl
1336 *
1337 * Returns 0 on success, negative on failure
1338 *
1339 * atl2_probe initializes an adapter identified by a pci_dev structure.
1340 * The OS initialization, configuring of the adapter private structure,
1341 * and a hardware reset occur.
1342 */
1343static int __devinit atl2_probe(struct pci_dev *pdev,
1344 const struct pci_device_id *ent)
1345{
1346 struct net_device *netdev;
1347 struct atl2_adapter *adapter;
1348 static int cards_found;
1349 unsigned long mmio_start;
1350 int mmio_len;
1351 int err;
1352
1353 cards_found = 0;
1354
1355 err = pci_enable_device(pdev);
1356 if (err)
1357 return err;
1358
1359 /*
1360 * atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA
1361 * until the kernel has the proper infrastructure to support 64-bit DMA
1362 * on these devices.
1363 */
1364 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) &&
1365 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
1366 printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
1367 goto err_dma;
1368 }
1369
1370 /* Mark all PCI regions associated with PCI device
1371 * pdev as being reserved by owner atl2_driver_name */
1372 err = pci_request_regions(pdev, atl2_driver_name);
1373 if (err)
1374 goto err_pci_reg;
1375
1376 /* Enables bus-mastering on the device and calls
1377 * pcibios_set_master to do the needed arch specific settings */
1378 pci_set_master(pdev);
1379
1380 err = -ENOMEM;
1381 netdev = alloc_etherdev(sizeof(struct atl2_adapter));
1382 if (!netdev)
1383 goto err_alloc_etherdev;
1384
1385 SET_NETDEV_DEV(netdev, &pdev->dev);
1386
1387 pci_set_drvdata(pdev, netdev);
1388 adapter = netdev_priv(netdev);
1389 adapter->netdev = netdev;
1390 adapter->pdev = pdev;
1391 adapter->hw.back = adapter;
1392
1393 mmio_start = pci_resource_start(pdev, 0x0);
1394 mmio_len = pci_resource_len(pdev, 0x0);
1395
1396 adapter->hw.mem_rang = (u32)mmio_len;
1397 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
1398 if (!adapter->hw.hw_addr) {
1399 err = -EIO;
1400 goto err_ioremap;
1401 }
1402
1403 atl2_setup_pcicmd(pdev);
1404
1405 netdev->open = &atl2_open;
1406 netdev->stop = &atl2_close;
1407 netdev->hard_start_xmit = &atl2_xmit_frame;
1408 netdev->get_stats = &atl2_get_stats;
1409 netdev->set_multicast_list = &atl2_set_multi;
1410 netdev->set_mac_address = &atl2_set_mac;
1411 netdev->change_mtu = &atl2_change_mtu;
1412 netdev->do_ioctl = &atl2_ioctl;
1413 atl2_set_ethtool_ops(netdev);
1414
1415#ifdef HAVE_TX_TIMEOUT
1416 netdev->tx_timeout = &atl2_tx_timeout;
1417 netdev->watchdog_timeo = 5 * HZ;
1418#endif
1419#ifdef NETIF_F_HW_VLAN_TX
1420 netdev->vlan_rx_register = atl2_vlan_rx_register;
1421#endif
1422 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1423
1424 netdev->mem_start = mmio_start;
1425 netdev->mem_end = mmio_start + mmio_len;
1426 adapter->bd_number = cards_found;
1427 adapter->pci_using_64 = false;
1428
1429 /* setup the private structure */
1430 err = atl2_sw_init(adapter);
1431 if (err)
1432 goto err_sw_init;
1433
1434 err = -EIO;
1435
1436#ifdef NETIF_F_HW_VLAN_TX
1437 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
1438#endif
1439
1440#ifdef NETIF_F_LLTX
1441 netdev->features |= NETIF_F_LLTX;
1442#endif
1443
1444 /* Init PHY as early as possible due to power saving issue */
1445 atl2_phy_init(&adapter->hw);
1446
1447 /* reset the controller to
1448 * put the device in a known good starting state */
1449
1450 if (atl2_reset_hw(&adapter->hw)) {
1451 err = -EIO;
1452 goto err_reset;
1453 }
1454
1455 /* copy the MAC address out of the EEPROM */
1456 atl2_read_mac_addr(&adapter->hw);
1457 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
1458/* FIXME: do we still need this? */
1459#ifdef ETHTOOL_GPERMADDR
1460 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
1461
1462 if (!is_valid_ether_addr(netdev->perm_addr)) {
1463#else
1464 if (!is_valid_ether_addr(netdev->dev_addr)) {
1465#endif
1466 err = -EIO;
1467 goto err_eeprom;
1468 }
1469
1470 atl2_check_options(adapter);
1471
1472 init_timer(&adapter->watchdog_timer);
1473 adapter->watchdog_timer.function = &atl2_watchdog;
1474 adapter->watchdog_timer.data = (unsigned long) adapter;
1475
1476 init_timer(&adapter->phy_config_timer);
1477 adapter->phy_config_timer.function = &atl2_phy_config;
1478 adapter->phy_config_timer.data = (unsigned long) adapter;
1479
1480 INIT_WORK(&adapter->reset_task, atl2_reset_task);
1481 INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
1482
1483 strcpy(netdev->name, "eth%d"); /* ?? */
1484 err = register_netdev(netdev);
1485 if (err)
1486 goto err_register;
1487
1488 /* assume we have no link for now */
1489 netif_carrier_off(netdev);
1490 netif_stop_queue(netdev);
1491
1492 cards_found++;
1493
1494 return 0;
1495
1496err_reset:
1497err_register:
1498err_sw_init:
1499err_eeprom:
1500 iounmap(adapter->hw.hw_addr);
1501err_ioremap:
1502 free_netdev(netdev);
1503err_alloc_etherdev:
1504 pci_release_regions(pdev);
1505err_pci_reg:
1506err_dma:
1507 pci_disable_device(pdev);
1508 return err;
1509}
1510
1511/*
1512 * atl2_remove - Device Removal Routine
1513 * @pdev: PCI device information struct
1514 *
1515 * atl2_remove is called by the PCI subsystem to alert the driver
1516 * that it should release a PCI device. The could be caused by a
1517 * Hot-Plug event, or because the driver is going to be removed from
1518 * memory.
1519 */
1520/* FIXME: write the original MAC address back in case it was changed from a
1521 * BIOS-set value, as in atl1 -- CHS */
1522static void __devexit atl2_remove(struct pci_dev *pdev)
1523{
1524 struct net_device *netdev = pci_get_drvdata(pdev);
1525 struct atl2_adapter *adapter = netdev_priv(netdev);
1526
1527 /* flush_scheduled work may reschedule our watchdog task, so
1528 * explicitly disable watchdog tasks from being rescheduled */
1529 set_bit(__ATL2_DOWN, &adapter->flags);
1530
1531 del_timer_sync(&adapter->watchdog_timer);
1532 del_timer_sync(&adapter->phy_config_timer);
1533
1534 flush_scheduled_work();
1535
1536 unregister_netdev(netdev);
1537
1538 atl2_force_ps(&adapter->hw);
1539
1540 iounmap(adapter->hw.hw_addr);
1541 pci_release_regions(pdev);
1542
1543 free_netdev(netdev);
1544
1545 pci_disable_device(pdev);
1546}
1547
1548static int atl2_suspend(struct pci_dev *pdev, pm_message_t state)
1549{
1550 struct net_device *netdev = pci_get_drvdata(pdev);
1551 struct atl2_adapter *adapter = netdev_priv(netdev);
1552 struct atl2_hw *hw = &adapter->hw;
1553 u16 speed, duplex;
1554 u32 ctrl = 0;
1555 u32 wufc = adapter->wol;
1556
1557#ifdef CONFIG_PM
1558 int retval = 0;
1559#endif
1560
1561 netif_device_detach(netdev);
1562
1563 if (netif_running(netdev)) {
1564 WARN_ON(test_bit(__ATL2_RESETTING, &adapter->flags));
1565 atl2_down(adapter);
1566 }
1567
1568#ifdef CONFIG_PM
1569 retval = pci_save_state(pdev);
1570 if (retval)
1571 return retval;
1572#endif
1573
1574 atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
1575 atl2_read_phy_reg(hw, MII_BMSR, (u16 *)&ctrl);
1576 if (ctrl & BMSR_LSTATUS)
1577 wufc &= ~ATLX_WUFC_LNKC;
1578
1579 if (0 != (ctrl & BMSR_LSTATUS) && 0 != wufc) {
1580 u32 ret_val;
1581 /* get current link speed & duplex */
1582 ret_val = atl2_get_speed_and_duplex(hw, &speed, &duplex);
1583 if (ret_val) {
1584 printk(KERN_DEBUG
1585 "%s: get speed&duplex error while suspend\n",
1586 atl2_driver_name);
1587 goto wol_dis;
1588 }
1589
1590 ctrl = 0;
1591
1592 /* turn on magic packet wol */
1593 if (wufc & ATLX_WUFC_MAG)
1594 ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN);
1595
1596 /* ignore Link Chg event when Link is up */
1597 ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
1598
1599 /* Config MAC CTRL Register */
1600 ctrl = MAC_CTRL_RX_EN | MAC_CTRL_MACLP_CLK_PHY;
1601 if (FULL_DUPLEX == adapter->link_duplex)
1602 ctrl |= MAC_CTRL_DUPLX;
1603 ctrl |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1604 ctrl |= (((u32)adapter->hw.preamble_len &
1605 MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
1606 ctrl |= (((u32)(adapter->hw.retry_buf &
1607 MAC_CTRL_HALF_LEFT_BUF_MASK)) <<
1608 MAC_CTRL_HALF_LEFT_BUF_SHIFT);
1609 if (wufc & ATLX_WUFC_MAG) {
1610 /* magic packet maybe Broadcast&multicast&Unicast */
1611 ctrl |= MAC_CTRL_BC_EN;
1612 }
1613
1614 ATL2_WRITE_REG(hw, REG_MAC_CTRL, ctrl);
1615
1616 /* pcie patch */
1617 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1618 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1619 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1620 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1621 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1622 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1623
1624 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
1625 goto suspend_exit;
1626 }
1627
1628 if (0 == (ctrl&BMSR_LSTATUS) && 0 != (wufc&ATLX_WUFC_LNKC)) {
1629 /* link is down, so only LINK CHG WOL event enable */
1630 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
1631 ATL2_WRITE_REG(hw, REG_WOL_CTRL, ctrl);
1632 ATL2_WRITE_REG(hw, REG_MAC_CTRL, 0);
1633
1634 /* pcie patch */
1635 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1636 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1637 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1638 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1639 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1640 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1641
1642 hw->phy_configured = false; /* re-init PHY when resume */
1643
1644 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
1645
1646 goto suspend_exit;
1647 }
1648
1649wol_dis:
1650 /* WOL disabled */
1651 ATL2_WRITE_REG(hw, REG_WOL_CTRL, 0);
1652
1653 /* pcie patch */
1654 ctrl = ATL2_READ_REG(hw, REG_PCIE_PHYMISC);
1655 ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
1656 ATL2_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl);
1657 ctrl = ATL2_READ_REG(hw, REG_PCIE_DLL_TX_CTRL1);
1658 ctrl |= PCIE_DLL_TX_CTRL1_SEL_NOR_CLK;
1659 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, ctrl);
1660
1661 atl2_force_ps(hw);
1662 hw->phy_configured = false; /* re-init PHY when resume */
1663
1664 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1665
1666suspend_exit:
1667 if (netif_running(netdev))
1668 atl2_free_irq(adapter);
1669
1670 pci_disable_device(pdev);
1671
1672 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1673
1674 return 0;
1675}
1676
1677#ifdef CONFIG_PM
1678static int atl2_resume(struct pci_dev *pdev)
1679{
1680 struct net_device *netdev = pci_get_drvdata(pdev);
1681 struct atl2_adapter *adapter = netdev_priv(netdev);
1682 u32 err;
1683
1684 pci_set_power_state(pdev, PCI_D0);
1685 pci_restore_state(pdev);
1686
1687 err = pci_enable_device(pdev);
1688 if (err) {
1689 printk(KERN_ERR
1690 "atl2: Cannot enable PCI device from suspend\n");
1691 return err;
1692 }
1693
1694 pci_set_master(pdev);
1695
1696 ATL2_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */
1697
1698 pci_enable_wake(pdev, PCI_D3hot, 0);
1699 pci_enable_wake(pdev, PCI_D3cold, 0);
1700
1701 ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
1702
1703 err = atl2_request_irq(adapter);
1704 if (netif_running(netdev) && err)
1705 return err;
1706
1707 atl2_reset_hw(&adapter->hw);
1708
1709 if (netif_running(netdev))
1710 atl2_up(adapter);
1711
1712 netif_device_attach(netdev);
1713
1714 return 0;
1715}
1716#endif
1717
1718static void atl2_shutdown(struct pci_dev *pdev)
1719{
1720 atl2_suspend(pdev, PMSG_SUSPEND);
1721}
1722
1723static struct pci_driver atl2_driver = {
1724 .name = atl2_driver_name,
1725 .id_table = atl2_pci_tbl,
1726 .probe = atl2_probe,
1727 .remove = __devexit_p(atl2_remove),
1728 /* Power Managment Hooks */
1729 .suspend = atl2_suspend,
1730#ifdef CONFIG_PM
1731 .resume = atl2_resume,
1732#endif
1733 .shutdown = atl2_shutdown,
1734};
1735
1736/*
1737 * atl2_init_module - Driver Registration Routine
1738 *
1739 * atl2_init_module is the first routine called when the driver is
1740 * loaded. All it does is register with the PCI subsystem.
1741 */
1742static int __init atl2_init_module(void)
1743{
1744 printk(KERN_INFO "%s - version %s\n", atl2_driver_string,
1745 atl2_driver_version);
1746 printk(KERN_INFO "%s\n", atl2_copyright);
1747 return pci_register_driver(&atl2_driver);
1748}
1749module_init(atl2_init_module);
1750
1751/*
1752 * atl2_exit_module - Driver Exit Cleanup Routine
1753 *
1754 * atl2_exit_module is called just before the driver is removed
1755 * from memory.
1756 */
1757static void __exit atl2_exit_module(void)
1758{
1759 pci_unregister_driver(&atl2_driver);
1760}
1761module_exit(atl2_exit_module);
1762
1763static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
1764{
1765 struct atl2_adapter *adapter = hw->back;
1766 pci_read_config_word(adapter->pdev, reg, value);
1767}
1768
1769static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value)
1770{
1771 struct atl2_adapter *adapter = hw->back;
1772 pci_write_config_word(adapter->pdev, reg, *value);
1773}
1774
1775static int atl2_get_settings(struct net_device *netdev,
1776 struct ethtool_cmd *ecmd)
1777{
1778 struct atl2_adapter *adapter = netdev_priv(netdev);
1779 struct atl2_hw *hw = &adapter->hw;
1780
1781 ecmd->supported = (SUPPORTED_10baseT_Half |
1782 SUPPORTED_10baseT_Full |
1783 SUPPORTED_100baseT_Half |
1784 SUPPORTED_100baseT_Full |
1785 SUPPORTED_Autoneg |
1786 SUPPORTED_TP);
1787 ecmd->advertising = ADVERTISED_TP;
1788
1789 ecmd->advertising |= ADVERTISED_Autoneg;
1790 ecmd->advertising |= hw->autoneg_advertised;
1791
1792 ecmd->port = PORT_TP;
1793 ecmd->phy_address = 0;
1794 ecmd->transceiver = XCVR_INTERNAL;
1795
1796 if (adapter->link_speed != SPEED_0) {
1797 ecmd->speed = adapter->link_speed;
1798 if (adapter->link_duplex == FULL_DUPLEX)
1799 ecmd->duplex = DUPLEX_FULL;
1800 else
1801 ecmd->duplex = DUPLEX_HALF;
1802 } else {
1803 ecmd->speed = -1;
1804 ecmd->duplex = -1;
1805 }
1806
1807 ecmd->autoneg = AUTONEG_ENABLE;
1808 return 0;
1809}
1810
1811static int atl2_set_settings(struct net_device *netdev,
1812 struct ethtool_cmd *ecmd)
1813{
1814 struct atl2_adapter *adapter = netdev_priv(netdev);
1815 struct atl2_hw *hw = &adapter->hw;
1816
1817 while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
1818 msleep(1);
1819
1820 if (ecmd->autoneg == AUTONEG_ENABLE) {
1821#define MY_ADV_MASK (ADVERTISE_10_HALF | \
1822 ADVERTISE_10_FULL | \
1823 ADVERTISE_100_HALF| \
1824 ADVERTISE_100_FULL)
1825
1826 if ((ecmd->advertising & MY_ADV_MASK) == MY_ADV_MASK) {
1827 hw->MediaType = MEDIA_TYPE_AUTO_SENSOR;
1828 hw->autoneg_advertised = MY_ADV_MASK;
1829 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1830 ADVERTISE_100_FULL) {
1831 hw->MediaType = MEDIA_TYPE_100M_FULL;
1832 hw->autoneg_advertised = ADVERTISE_100_FULL;
1833 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1834 ADVERTISE_100_HALF) {
1835 hw->MediaType = MEDIA_TYPE_100M_HALF;
1836 hw->autoneg_advertised = ADVERTISE_100_HALF;
1837 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1838 ADVERTISE_10_FULL) {
1839 hw->MediaType = MEDIA_TYPE_10M_FULL;
1840 hw->autoneg_advertised = ADVERTISE_10_FULL;
1841 } else if ((ecmd->advertising & MY_ADV_MASK) ==
1842 ADVERTISE_10_HALF) {
1843 hw->MediaType = MEDIA_TYPE_10M_HALF;
1844 hw->autoneg_advertised = ADVERTISE_10_HALF;
1845 } else {
1846 clear_bit(__ATL2_RESETTING, &adapter->flags);
1847 return -EINVAL;
1848 }
1849 ecmd->advertising = hw->autoneg_advertised |
1850 ADVERTISED_TP | ADVERTISED_Autoneg;
1851 } else {
1852 clear_bit(__ATL2_RESETTING, &adapter->flags);
1853 return -EINVAL;
1854 }
1855
1856 /* reset the link */
1857 if (netif_running(adapter->netdev)) {
1858 atl2_down(adapter);
1859 atl2_up(adapter);
1860 } else
1861 atl2_reset_hw(&adapter->hw);
1862
1863 clear_bit(__ATL2_RESETTING, &adapter->flags);
1864 return 0;
1865}
1866
1867static u32 atl2_get_tx_csum(struct net_device *netdev)
1868{
1869 return (netdev->features & NETIF_F_HW_CSUM) != 0;
1870}
1871
1872static u32 atl2_get_msglevel(struct net_device *netdev)
1873{
1874 return 0;
1875}
1876
1877/*
1878 * It's sane for this to be empty, but we might want to take advantage of this.
1879 */
1880static void atl2_set_msglevel(struct net_device *netdev, u32 data)
1881{
1882}
1883
1884static int atl2_get_regs_len(struct net_device *netdev)
1885{
1886#define ATL2_REGS_LEN 42
1887 return sizeof(u32) * ATL2_REGS_LEN;
1888}
1889
1890static void atl2_get_regs(struct net_device *netdev,
1891 struct ethtool_regs *regs, void *p)
1892{
1893 struct atl2_adapter *adapter = netdev_priv(netdev);
1894 struct atl2_hw *hw = &adapter->hw;
1895 u32 *regs_buff = p;
1896 u16 phy_data;
1897
1898 memset(p, 0, sizeof(u32) * ATL2_REGS_LEN);
1899
1900 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
1901
1902 regs_buff[0] = ATL2_READ_REG(hw, REG_VPD_CAP);
1903 regs_buff[1] = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
1904 regs_buff[2] = ATL2_READ_REG(hw, REG_SPI_FLASH_CONFIG);
1905 regs_buff[3] = ATL2_READ_REG(hw, REG_TWSI_CTRL);
1906 regs_buff[4] = ATL2_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL);
1907 regs_buff[5] = ATL2_READ_REG(hw, REG_MASTER_CTRL);
1908 regs_buff[6] = ATL2_READ_REG(hw, REG_MANUAL_TIMER_INIT);
1909 regs_buff[7] = ATL2_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT);
1910 regs_buff[8] = ATL2_READ_REG(hw, REG_PHY_ENABLE);
1911 regs_buff[9] = ATL2_READ_REG(hw, REG_CMBDISDMA_TIMER);
1912 regs_buff[10] = ATL2_READ_REG(hw, REG_IDLE_STATUS);
1913 regs_buff[11] = ATL2_READ_REG(hw, REG_MDIO_CTRL);
1914 regs_buff[12] = ATL2_READ_REG(hw, REG_SERDES_LOCK);
1915 regs_buff[13] = ATL2_READ_REG(hw, REG_MAC_CTRL);
1916 regs_buff[14] = ATL2_READ_REG(hw, REG_MAC_IPG_IFG);
1917 regs_buff[15] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
1918 regs_buff[16] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR+4);
1919 regs_buff[17] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE);
1920 regs_buff[18] = ATL2_READ_REG(hw, REG_RX_HASH_TABLE+4);
1921 regs_buff[19] = ATL2_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL);
1922 regs_buff[20] = ATL2_READ_REG(hw, REG_MTU);
1923 regs_buff[21] = ATL2_READ_REG(hw, REG_WOL_CTRL);
1924 regs_buff[22] = ATL2_READ_REG(hw, REG_SRAM_TXRAM_END);
1925 regs_buff[23] = ATL2_READ_REG(hw, REG_DESC_BASE_ADDR_HI);
1926 regs_buff[24] = ATL2_READ_REG(hw, REG_TXD_BASE_ADDR_LO);
1927 regs_buff[25] = ATL2_READ_REG(hw, REG_TXD_MEM_SIZE);
1928 regs_buff[26] = ATL2_READ_REG(hw, REG_TXS_BASE_ADDR_LO);
1929 regs_buff[27] = ATL2_READ_REG(hw, REG_TXS_MEM_SIZE);
1930 regs_buff[28] = ATL2_READ_REG(hw, REG_RXD_BASE_ADDR_LO);
1931 regs_buff[29] = ATL2_READ_REG(hw, REG_RXD_BUF_NUM);
1932 regs_buff[30] = ATL2_READ_REG(hw, REG_DMAR);
1933 regs_buff[31] = ATL2_READ_REG(hw, REG_TX_CUT_THRESH);
1934 regs_buff[32] = ATL2_READ_REG(hw, REG_DMAW);
1935 regs_buff[33] = ATL2_READ_REG(hw, REG_PAUSE_ON_TH);
1936 regs_buff[34] = ATL2_READ_REG(hw, REG_PAUSE_OFF_TH);
1937 regs_buff[35] = ATL2_READ_REG(hw, REG_MB_TXD_WR_IDX);
1938 regs_buff[36] = ATL2_READ_REG(hw, REG_MB_RXD_RD_IDX);
1939 regs_buff[38] = ATL2_READ_REG(hw, REG_ISR);
1940 regs_buff[39] = ATL2_READ_REG(hw, REG_IMR);
1941
1942 atl2_read_phy_reg(hw, MII_BMCR, &phy_data);
1943 regs_buff[40] = (u32)phy_data;
1944 atl2_read_phy_reg(hw, MII_BMSR, &phy_data);
1945 regs_buff[41] = (u32)phy_data;
1946}
1947
1948static int atl2_get_eeprom_len(struct net_device *netdev)
1949{
1950 struct atl2_adapter *adapter = netdev_priv(netdev);
1951
1952 if (!atl2_check_eeprom_exist(&adapter->hw))
1953 return 512;
1954 else
1955 return 0;
1956}
1957
1958static int atl2_get_eeprom(struct net_device *netdev,
1959 struct ethtool_eeprom *eeprom, u8 *bytes)
1960{
1961 struct atl2_adapter *adapter = netdev_priv(netdev);
1962 struct atl2_hw *hw = &adapter->hw;
1963 u32 *eeprom_buff;
1964 int first_dword, last_dword;
1965 int ret_val = 0;
1966 int i;
1967
1968 if (eeprom->len == 0)
1969 return -EINVAL;
1970
1971 if (atl2_check_eeprom_exist(hw))
1972 return -EINVAL;
1973
1974 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
1975
1976 first_dword = eeprom->offset >> 2;
1977 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
1978
1979 eeprom_buff = kmalloc(sizeof(u32) * (last_dword - first_dword + 1),
1980 GFP_KERNEL);
1981 if (!eeprom_buff)
1982 return -ENOMEM;
1983
1984 for (i = first_dword; i < last_dword; i++) {
1985 if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword])))
1986 return -EIO;
1987 }
1988
1989 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
1990 eeprom->len);
1991 kfree(eeprom_buff);
1992
1993 return ret_val;
1994}
1995
1996static int atl2_set_eeprom(struct net_device *netdev,
1997 struct ethtool_eeprom *eeprom, u8 *bytes)
1998{
1999 struct atl2_adapter *adapter = netdev_priv(netdev);
2000 struct atl2_hw *hw = &adapter->hw;
2001 u32 *eeprom_buff;
2002 u32 *ptr;
2003 int max_len, first_dword, last_dword, ret_val = 0;
2004 int i;
2005
2006 if (eeprom->len == 0)
2007 return -EOPNOTSUPP;
2008
2009 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
2010 return -EFAULT;
2011
2012 max_len = 512;
2013
2014 first_dword = eeprom->offset >> 2;
2015 last_dword = (eeprom->offset + eeprom->len - 1) >> 2;
2016 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
2017 if (!eeprom_buff)
2018 return -ENOMEM;
2019
2020 ptr = (u32 *)eeprom_buff;
2021
2022 if (eeprom->offset & 3) {
2023 /* need read/modify/write of first changed EEPROM word */
2024 /* only the second byte of the word is being modified */
2025 if (!atl2_read_eeprom(hw, first_dword*4, &(eeprom_buff[0])))
2026 return -EIO;
2027 ptr++;
2028 }
2029 if (((eeprom->offset + eeprom->len) & 3)) {
2030 /*
2031 * need read/modify/write of last changed EEPROM word
2032 * only the first byte of the word is being modified
2033 */
2034 if (!atl2_read_eeprom(hw, last_dword * 4,
2035 &(eeprom_buff[last_dword - first_dword])))
2036 return -EIO;
2037 }
2038
2039 /* Device's eeprom is always little-endian, word addressable */
2040 memcpy(ptr, bytes, eeprom->len);
2041
2042 for (i = 0; i < last_dword - first_dword + 1; i++) {
2043 if (!atl2_write_eeprom(hw, ((first_dword+i)*4), eeprom_buff[i]))
2044 return -EIO;
2045 }
2046
2047 kfree(eeprom_buff);
2048 return ret_val;
2049}
2050
2051static void atl2_get_drvinfo(struct net_device *netdev,
2052 struct ethtool_drvinfo *drvinfo)
2053{
2054 struct atl2_adapter *adapter = netdev_priv(netdev);
2055
2056 strncpy(drvinfo->driver, atl2_driver_name, 32);
2057 strncpy(drvinfo->version, atl2_driver_version, 32);
2058 strncpy(drvinfo->fw_version, "L2", 32);
2059 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
2060 drvinfo->n_stats = 0;
2061 drvinfo->testinfo_len = 0;
2062 drvinfo->regdump_len = atl2_get_regs_len(netdev);
2063 drvinfo->eedump_len = atl2_get_eeprom_len(netdev);
2064}
2065
2066static void atl2_get_wol(struct net_device *netdev,
2067 struct ethtool_wolinfo *wol)
2068{
2069 struct atl2_adapter *adapter = netdev_priv(netdev);
2070
2071 wol->supported = WAKE_MAGIC;
2072 wol->wolopts = 0;
2073
2074 if (adapter->wol & ATLX_WUFC_EX)
2075 wol->wolopts |= WAKE_UCAST;
2076 if (adapter->wol & ATLX_WUFC_MC)
2077 wol->wolopts |= WAKE_MCAST;
2078 if (adapter->wol & ATLX_WUFC_BC)
2079 wol->wolopts |= WAKE_BCAST;
2080 if (adapter->wol & ATLX_WUFC_MAG)
2081 wol->wolopts |= WAKE_MAGIC;
2082 if (adapter->wol & ATLX_WUFC_LNKC)
2083 wol->wolopts |= WAKE_PHY;
2084}
2085
2086static int atl2_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2087{
2088 struct atl2_adapter *adapter = netdev_priv(netdev);
2089
2090 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
2091 return -EOPNOTSUPP;
2092
2093 if (wol->wolopts & (WAKE_MCAST|WAKE_BCAST|WAKE_MCAST))
2094 return -EOPNOTSUPP;
2095
2096 /* these settings will always override what we currently have */
2097 adapter->wol = 0;
2098
2099 if (wol->wolopts & WAKE_MAGIC)
2100 adapter->wol |= ATLX_WUFC_MAG;
2101 if (wol->wolopts & WAKE_PHY)
2102 adapter->wol |= ATLX_WUFC_LNKC;
2103
2104 return 0;
2105}
2106
2107static int atl2_nway_reset(struct net_device *netdev)
2108{
2109 struct atl2_adapter *adapter = netdev_priv(netdev);
2110 if (netif_running(netdev))
2111 atl2_reinit_locked(adapter);
2112 return 0;
2113}
2114
2115static struct ethtool_ops atl2_ethtool_ops = {
2116 .get_settings = atl2_get_settings,
2117 .set_settings = atl2_set_settings,
2118 .get_drvinfo = atl2_get_drvinfo,
2119 .get_regs_len = atl2_get_regs_len,
2120 .get_regs = atl2_get_regs,
2121 .get_wol = atl2_get_wol,
2122 .set_wol = atl2_set_wol,
2123 .get_msglevel = atl2_get_msglevel,
2124 .set_msglevel = atl2_set_msglevel,
2125 .nway_reset = atl2_nway_reset,
2126 .get_link = ethtool_op_get_link,
2127 .get_eeprom_len = atl2_get_eeprom_len,
2128 .get_eeprom = atl2_get_eeprom,
2129 .set_eeprom = atl2_set_eeprom,
2130 .get_tx_csum = atl2_get_tx_csum,
2131 .get_sg = ethtool_op_get_sg,
2132 .set_sg = ethtool_op_set_sg,
2133#ifdef NETIF_F_TSO
2134 .get_tso = ethtool_op_get_tso,
2135#endif
2136};
2137
2138static void atl2_set_ethtool_ops(struct net_device *netdev)
2139{
2140 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
2141}
2142
2143#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
2144 (((a) & 0xff00ff00) >> 8))
2145#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
2146#define SHORTSWAP(a) (((a) << 8) | ((a) >> 8))
2147
2148/*
2149 * Reset the transmit and receive units; mask and clear all interrupts.
2150 *
2151 * hw - Struct containing variables accessed by shared code
2152 * return : 0 or idle status (if error)
2153 */
2154static s32 atl2_reset_hw(struct atl2_hw *hw)
2155{
2156 u32 icr;
2157 u16 pci_cfg_cmd_word;
2158 int i;
2159
2160 /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
2161 atl2_read_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
2162 if ((pci_cfg_cmd_word &
2163 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) !=
2164 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER)) {
2165 pci_cfg_cmd_word |=
2166 (CMD_IO_SPACE|CMD_MEMORY_SPACE|CMD_BUS_MASTER);
2167 atl2_write_pci_cfg(hw, PCI_REG_COMMAND, &pci_cfg_cmd_word);
2168 }
2169
2170 /* Clear Interrupt mask to stop board from generating
2171 * interrupts & Clear any pending interrupt events
2172 */
2173 /* FIXME */
2174 /* ATL2_WRITE_REG(hw, REG_IMR, 0); */
2175 /* ATL2_WRITE_REG(hw, REG_ISR, 0xffffffff); */
2176
2177 /* Issue Soft Reset to the MAC. This will reset the chip's
2178 * transmit, receive, DMA. It will not effect
2179 * the current PCI configuration. The global reset bit is self-
2180 * clearing, and should clear within a microsecond.
2181 */
2182 ATL2_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_SOFT_RST);
2183 wmb();
2184 msleep(1); /* delay about 1ms */
2185
2186 /* Wait at least 10ms for All module to be Idle */
2187 for (i = 0; i < 10; i++) {
2188 icr = ATL2_READ_REG(hw, REG_IDLE_STATUS);
2189 if (!icr)
2190 break;
2191 msleep(1); /* delay 1 ms */
2192 cpu_relax();
2193 }
2194
2195 if (icr)
2196 return icr;
2197
2198 return 0;
2199}
2200
2201#define CUSTOM_SPI_CS_SETUP 2
2202#define CUSTOM_SPI_CLK_HI 2
2203#define CUSTOM_SPI_CLK_LO 2
2204#define CUSTOM_SPI_CS_HOLD 2
2205#define CUSTOM_SPI_CS_HI 3
2206
2207static struct atl2_spi_flash_dev flash_table[] =
2208{
2209/* MFR WRSR READ PROGRAM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */
2210{"Atmel", 0x0, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62 },
2211{"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60 },
2212{"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7 },
2213};
2214
2215static bool atl2_spi_read(struct atl2_hw *hw, u32 addr, u32 *buf)
2216{
2217 int i;
2218 u32 value;
2219
2220 ATL2_WRITE_REG(hw, REG_SPI_DATA, 0);
2221 ATL2_WRITE_REG(hw, REG_SPI_ADDR, addr);
2222
2223 value = SPI_FLASH_CTRL_WAIT_READY |
2224 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
2225 SPI_FLASH_CTRL_CS_SETUP_SHIFT |
2226 (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) <<
2227 SPI_FLASH_CTRL_CLK_HI_SHIFT |
2228 (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) <<
2229 SPI_FLASH_CTRL_CLK_LO_SHIFT |
2230 (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) <<
2231 SPI_FLASH_CTRL_CS_HOLD_SHIFT |
2232 (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) <<
2233 SPI_FLASH_CTRL_CS_HI_SHIFT |
2234 (0x1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT;
2235
2236 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2237
2238 value |= SPI_FLASH_CTRL_START;
2239
2240 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2241
2242 for (i = 0; i < 10; i++) {
2243 msleep(1);
2244 value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
2245 if (!(value & SPI_FLASH_CTRL_START))
2246 break;
2247 }
2248
2249 if (value & SPI_FLASH_CTRL_START)
2250 return false;
2251
2252 *buf = ATL2_READ_REG(hw, REG_SPI_DATA);
2253
2254 return true;
2255}
2256
2257/*
2258 * get_permanent_address
2259 * return 0 if get valid mac address,
2260 */
2261static int get_permanent_address(struct atl2_hw *hw)
2262{
2263 u32 Addr[2];
2264 u32 i, Control;
2265 u16 Register;
2266 u8 EthAddr[NODE_ADDRESS_SIZE];
2267 bool KeyValid;
2268
2269 if (is_valid_ether_addr(hw->perm_mac_addr))
2270 return 0;
2271
2272 Addr[0] = 0;
2273 Addr[1] = 0;
2274
2275 if (!atl2_check_eeprom_exist(hw)) { /* eeprom exists */
2276 Register = 0;
2277 KeyValid = false;
2278
2279 /* Read out all EEPROM content */
2280 i = 0;
2281 while (1) {
2282 if (atl2_read_eeprom(hw, i + 0x100, &Control)) {
2283 if (KeyValid) {
2284 if (Register == REG_MAC_STA_ADDR)
2285 Addr[0] = Control;
2286 else if (Register ==
2287 (REG_MAC_STA_ADDR + 4))
2288 Addr[1] = Control;
2289 KeyValid = false;
2290 } else if ((Control & 0xff) == 0x5A) {
2291 KeyValid = true;
2292 Register = (u16) (Control >> 16);
2293 } else {
2294 /* assume data end while encount an invalid KEYWORD */
2295 break;
2296 }
2297 } else {
2298 break; /* read error */
2299 }
2300 i += 4;
2301 }
2302
2303 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2304 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
2305
2306 if (is_valid_ether_addr(EthAddr)) {
2307 memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
2308 return 0;
2309 }
2310 return 1;
2311 }
2312
2313 /* see if SPI flash exists? */
2314 Addr[0] = 0;
2315 Addr[1] = 0;
2316 Register = 0;
2317 KeyValid = false;
2318 i = 0;
2319 while (1) {
2320 if (atl2_spi_read(hw, i + 0x1f000, &Control)) {
2321 if (KeyValid) {
2322 if (Register == REG_MAC_STA_ADDR)
2323 Addr[0] = Control;
2324 else if (Register == (REG_MAC_STA_ADDR + 4))
2325 Addr[1] = Control;
2326 KeyValid = false;
2327 } else if ((Control & 0xff) == 0x5A) {
2328 KeyValid = true;
2329 Register = (u16) (Control >> 16);
2330 } else {
2331 break; /* data end */
2332 }
2333 } else {
2334 break; /* read error */
2335 }
2336 i += 4;
2337 }
2338
2339 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2340 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *)&Addr[1]);
2341 if (is_valid_ether_addr(EthAddr)) {
2342 memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
2343 return 0;
2344 }
2345 /* maybe MAC-address is from BIOS */
2346 Addr[0] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR);
2347 Addr[1] = ATL2_READ_REG(hw, REG_MAC_STA_ADDR + 4);
2348 *(u32 *) &EthAddr[2] = LONGSWAP(Addr[0]);
2349 *(u16 *) &EthAddr[0] = SHORTSWAP(*(u16 *) &Addr[1]);
2350
2351 if (is_valid_ether_addr(EthAddr)) {
2352 memcpy(hw->perm_mac_addr, EthAddr, NODE_ADDRESS_SIZE);
2353 return 0;
2354 }
2355
2356 return 1;
2357}
2358
2359/*
2360 * Reads the adapter's MAC address from the EEPROM
2361 *
2362 * hw - Struct containing variables accessed by shared code
2363 */
2364static s32 atl2_read_mac_addr(struct atl2_hw *hw)
2365{
2366 u16 i;
2367
2368 if (get_permanent_address(hw)) {
2369 /* for test */
2370 /* FIXME: shouldn't we use random_ether_addr() here? */
2371 hw->perm_mac_addr[0] = 0x00;
2372 hw->perm_mac_addr[1] = 0x13;
2373 hw->perm_mac_addr[2] = 0x74;
2374 hw->perm_mac_addr[3] = 0x00;
2375 hw->perm_mac_addr[4] = 0x5c;
2376 hw->perm_mac_addr[5] = 0x38;
2377 }
2378
2379 for (i = 0; i < NODE_ADDRESS_SIZE; i++)
2380 hw->mac_addr[i] = hw->perm_mac_addr[i];
2381
2382 return 0;
2383}
2384
2385/*
2386 * Hashes an address to determine its location in the multicast table
2387 *
2388 * hw - Struct containing variables accessed by shared code
2389 * mc_addr - the multicast address to hash
2390 *
2391 * atl2_hash_mc_addr
2392 * purpose
2393 * set hash value for a multicast address
2394 * hash calcu processing :
2395 * 1. calcu 32bit CRC for multicast address
2396 * 2. reverse crc with MSB to LSB
2397 */
2398static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr)
2399{
2400 u32 crc32, value;
2401 int i;
2402
2403 value = 0;
2404 crc32 = ether_crc_le(6, mc_addr);
2405
2406 for (i = 0; i < 32; i++)
2407 value |= (((crc32 >> i) & 1) << (31 - i));
2408
2409 return value;
2410}
2411
2412/*
2413 * Sets the bit in the multicast table corresponding to the hash value.
2414 *
2415 * hw - Struct containing variables accessed by shared code
2416 * hash_value - Multicast address hash value
2417 */
2418static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value)
2419{
2420 u32 hash_bit, hash_reg;
2421 u32 mta;
2422
2423 /* The HASH Table is a register array of 2 32-bit registers.
2424 * It is treated like an array of 64 bits. We want to set
2425 * bit BitArray[hash_value]. So we figure out what register
2426 * the bit is in, read it, OR in the new bit, then write
2427 * back the new value. The register is determined by the
2428 * upper 7 bits of the hash value and the bit within that
2429 * register are determined by the lower 5 bits of the value.
2430 */
2431 hash_reg = (hash_value >> 31) & 0x1;
2432 hash_bit = (hash_value >> 26) & 0x1F;
2433
2434 mta = ATL2_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg);
2435
2436 mta |= (1 << hash_bit);
2437
2438 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta);
2439}
2440
2441/*
2442 * atl2_init_pcie - init PCIE module
2443 */
2444static void atl2_init_pcie(struct atl2_hw *hw)
2445{
2446 u32 value;
2447 value = LTSSM_TEST_MODE_DEF;
2448 ATL2_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value);
2449
2450 value = PCIE_DLL_TX_CTRL1_DEF;
2451 ATL2_WRITE_REG(hw, REG_PCIE_DLL_TX_CTRL1, value);
2452}
2453
2454static void atl2_init_flash_opcode(struct atl2_hw *hw)
2455{
2456 if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
2457 hw->flash_vendor = 0; /* ATMEL */
2458
2459 /* Init OP table */
2460 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_PROGRAM,
2461 flash_table[hw->flash_vendor].cmdPROGRAM);
2462 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_SC_ERASE,
2463 flash_table[hw->flash_vendor].cmdSECTOR_ERASE);
2464 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_CHIP_ERASE,
2465 flash_table[hw->flash_vendor].cmdCHIP_ERASE);
2466 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDID,
2467 flash_table[hw->flash_vendor].cmdRDID);
2468 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WREN,
2469 flash_table[hw->flash_vendor].cmdWREN);
2470 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_RDSR,
2471 flash_table[hw->flash_vendor].cmdRDSR);
2472 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_WRSR,
2473 flash_table[hw->flash_vendor].cmdWRSR);
2474 ATL2_WRITE_REGB(hw, REG_SPI_FLASH_OP_READ,
2475 flash_table[hw->flash_vendor].cmdREAD);
2476}
2477
2478/********************************************************************
2479* Performs basic configuration of the adapter.
2480*
2481* hw - Struct containing variables accessed by shared code
2482* Assumes that the controller has previously been reset and is in a
2483* post-reset uninitialized state. Initializes multicast table,
2484* and Calls routines to setup link
2485* Leaves the transmit and receive units disabled and uninitialized.
2486********************************************************************/
2487static s32 atl2_init_hw(struct atl2_hw *hw)
2488{
2489 u32 ret_val = 0;
2490
2491 atl2_init_pcie(hw);
2492
2493 /* Zero out the Multicast HASH table */
2494 /* clear the old settings from the multicast hash table */
2495 ATL2_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
2496 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
2497
2498 atl2_init_flash_opcode(hw);
2499
2500 ret_val = atl2_phy_init(hw);
2501
2502 return ret_val;
2503}
2504
2505/*
2506 * Detects the current speed and duplex settings of the hardware.
2507 *
2508 * hw - Struct containing variables accessed by shared code
2509 * speed - Speed of the connection
2510 * duplex - Duplex setting of the connection
2511 */
2512static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
2513 u16 *duplex)
2514{
2515 s32 ret_val;
2516 u16 phy_data;
2517
2518 /* Read PHY Specific Status Register (17) */
2519 ret_val = atl2_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
2520 if (ret_val)
2521 return ret_val;
2522
2523 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
2524 return ATLX_ERR_PHY_RES;
2525
2526 switch (phy_data & MII_ATLX_PSSR_SPEED) {
2527 case MII_ATLX_PSSR_100MBS:
2528 *speed = SPEED_100;
2529 break;
2530 case MII_ATLX_PSSR_10MBS:
2531 *speed = SPEED_10;
2532 break;
2533 default:
2534 return ATLX_ERR_PHY_SPEED;
2535 break;
2536 }
2537
2538 if (phy_data & MII_ATLX_PSSR_DPLX)
2539 *duplex = FULL_DUPLEX;
2540 else
2541 *duplex = HALF_DUPLEX;
2542
2543 return 0;
2544}
2545
2546/*
2547 * Reads the value from a PHY register
2548 * hw - Struct containing variables accessed by shared code
2549 * reg_addr - address of the PHY register to read
2550 */
2551static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data)
2552{
2553 u32 val;
2554 int i;
2555
2556 val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
2557 MDIO_START |
2558 MDIO_SUP_PREAMBLE |
2559 MDIO_RW |
2560 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
2561 ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
2562
2563 wmb();
2564
2565 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2566 udelay(2);
2567 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2568 if (!(val & (MDIO_START | MDIO_BUSY)))
2569 break;
2570 wmb();
2571 }
2572 if (!(val & (MDIO_START | MDIO_BUSY))) {
2573 *phy_data = (u16)val;
2574 return 0;
2575 }
2576
2577 return ATLX_ERR_PHY;
2578}
2579
2580/*
2581 * Writes a value to a PHY register
2582 * hw - Struct containing variables accessed by shared code
2583 * reg_addr - address of the PHY register to write
2584 * data - data to write to the PHY
2585 */
2586static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data)
2587{
2588 int i;
2589 u32 val;
2590
2591 val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
2592 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
2593 MDIO_SUP_PREAMBLE |
2594 MDIO_START |
2595 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
2596 ATL2_WRITE_REG(hw, REG_MDIO_CTRL, val);
2597
2598 wmb();
2599
2600 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
2601 udelay(2);
2602 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2603 if (!(val & (MDIO_START | MDIO_BUSY)))
2604 break;
2605
2606 wmb();
2607 }
2608
2609 if (!(val & (MDIO_START | MDIO_BUSY)))
2610 return 0;
2611
2612 return ATLX_ERR_PHY;
2613}
2614
2615/*
2616 * Configures PHY autoneg and flow control advertisement settings
2617 *
2618 * hw - Struct containing variables accessed by shared code
2619 */
2620static s32 atl2_phy_setup_autoneg_adv(struct atl2_hw *hw)
2621{
2622 s32 ret_val;
2623 s16 mii_autoneg_adv_reg;
2624
2625 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
2626 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
2627
2628 /* Need to parse autoneg_advertised and set up
2629 * the appropriate PHY registers. First we will parse for
2630 * autoneg_advertised software override. Since we can advertise
2631 * a plethora of combinations, we need to check each bit
2632 * individually.
2633 */
2634
2635 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
2636 * Advertisement Register (Address 4) and the 1000 mb speed bits in
2637 * the 1000Base-T Control Register (Address 9). */
2638 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
2639
2640 /* Need to parse MediaType and setup the
2641 * appropriate PHY registers. */
2642 switch (hw->MediaType) {
2643 case MEDIA_TYPE_AUTO_SENSOR:
2644 mii_autoneg_adv_reg |=
2645 (MII_AR_10T_HD_CAPS |
2646 MII_AR_10T_FD_CAPS |
2647 MII_AR_100TX_HD_CAPS|
2648 MII_AR_100TX_FD_CAPS);
2649 hw->autoneg_advertised =
2650 ADVERTISE_10_HALF |
2651 ADVERTISE_10_FULL |
2652 ADVERTISE_100_HALF|
2653 ADVERTISE_100_FULL;
2654 break;
2655 case MEDIA_TYPE_100M_FULL:
2656 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
2657 hw->autoneg_advertised = ADVERTISE_100_FULL;
2658 break;
2659 case MEDIA_TYPE_100M_HALF:
2660 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
2661 hw->autoneg_advertised = ADVERTISE_100_HALF;
2662 break;
2663 case MEDIA_TYPE_10M_FULL:
2664 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
2665 hw->autoneg_advertised = ADVERTISE_10_FULL;
2666 break;
2667 default:
2668 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
2669 hw->autoneg_advertised = ADVERTISE_10_HALF;
2670 break;
2671 }
2672
2673 /* flow control fixed to enable all */
2674 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
2675
2676 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
2677
2678 ret_val = atl2_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
2679
2680 if (ret_val)
2681 return ret_val;
2682
2683 return 0;
2684}
2685
2686/*
2687 * Resets the PHY and make all config validate
2688 *
2689 * hw - Struct containing variables accessed by shared code
2690 *
2691 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
2692 */
2693static s32 atl2_phy_commit(struct atl2_hw *hw)
2694{
2695 s32 ret_val;
2696 u16 phy_data;
2697
2698 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2699 ret_val = atl2_write_phy_reg(hw, MII_BMCR, phy_data);
2700 if (ret_val) {
2701 u32 val;
2702 int i;
2703 /* pcie serdes link may be down ! */
2704 for (i = 0; i < 25; i++) {
2705 msleep(1);
2706 val = ATL2_READ_REG(hw, REG_MDIO_CTRL);
2707 if (!(val & (MDIO_START | MDIO_BUSY)))
2708 break;
2709 }
2710
2711 if (0 != (val & (MDIO_START | MDIO_BUSY))) {
2712 printk(KERN_ERR "atl2: PCIe link down for at least 25ms !\n");
2713 return ret_val;
2714 }
2715 }
2716 return 0;
2717}
2718
2719static s32 atl2_phy_init(struct atl2_hw *hw)
2720{
2721 s32 ret_val;
2722 u16 phy_val;
2723
2724 if (hw->phy_configured)
2725 return 0;
2726
2727 /* Enable PHY */
2728 ATL2_WRITE_REGW(hw, REG_PHY_ENABLE, 1);
2729 ATL2_WRITE_FLUSH(hw);
2730 msleep(1);
2731
2732 /* check if the PHY is in powersaving mode */
2733 atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
2734 atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
2735
2736 /* 024E / 124E 0r 0274 / 1274 ? */
2737 if (phy_val & 0x1000) {
2738 phy_val &= ~0x1000;
2739 atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val);
2740 }
2741
2742 msleep(1);
2743
2744 /*Enable PHY LinkChange Interrupt */
2745 ret_val = atl2_write_phy_reg(hw, 18, 0xC00);
2746 if (ret_val)
2747 return ret_val;
2748
2749 /* setup AutoNeg parameters */
2750 ret_val = atl2_phy_setup_autoneg_adv(hw);
2751 if (ret_val)
2752 return ret_val;
2753
2754 /* SW.Reset & En-Auto-Neg to restart Auto-Neg */
2755 ret_val = atl2_phy_commit(hw);
2756 if (ret_val)
2757 return ret_val;
2758
2759 hw->phy_configured = true;
2760
2761 return ret_val;
2762}
2763
2764static void atl2_set_mac_addr(struct atl2_hw *hw)
2765{
2766 u32 value;
2767 /* 00-0B-6A-F6-00-DC
2768 * 0: 6AF600DC 1: 000B
2769 * low dword */
2770 value = (((u32)hw->mac_addr[2]) << 24) |
2771 (((u32)hw->mac_addr[3]) << 16) |
2772 (((u32)hw->mac_addr[4]) << 8) |
2773 (((u32)hw->mac_addr[5]));
2774 ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
2775 /* hight dword */
2776 value = (((u32)hw->mac_addr[0]) << 8) |
2777 (((u32)hw->mac_addr[1]));
2778 ATL2_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
2779}
2780
2781/*
2782 * check_eeprom_exist
2783 * return 0 if eeprom exist
2784 */
2785static int atl2_check_eeprom_exist(struct atl2_hw *hw)
2786{
2787 u32 value;
2788
2789 value = ATL2_READ_REG(hw, REG_SPI_FLASH_CTRL);
2790 if (value & SPI_FLASH_CTRL_EN_VPD) {
2791 value &= ~SPI_FLASH_CTRL_EN_VPD;
2792 ATL2_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value);
2793 }
2794 value = ATL2_READ_REGW(hw, REG_PCIE_CAP_LIST);
2795 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
2796}
2797
2798/* FIXME: This doesn't look right. -- CHS */
2799static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value)
2800{
2801 return true;
2802}
2803
2804static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue)
2805{
2806 int i;
2807 u32 Control;
2808
2809 if (Offset & 0x3)
2810 return false; /* address do not align */
2811
2812 ATL2_WRITE_REG(hw, REG_VPD_DATA, 0);
2813 Control = (Offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
2814 ATL2_WRITE_REG(hw, REG_VPD_CAP, Control);
2815
2816 for (i = 0; i < 10; i++) {
2817 msleep(2);
2818 Control = ATL2_READ_REG(hw, REG_VPD_CAP);
2819 if (Control & VPD_CAP_VPD_FLAG)
2820 break;
2821 }
2822
2823 if (Control & VPD_CAP_VPD_FLAG) {
2824 *pValue = ATL2_READ_REG(hw, REG_VPD_DATA);
2825 return true;
2826 }
2827 return false; /* timeout */
2828}
2829
2830static void atl2_force_ps(struct atl2_hw *hw)
2831{
2832 u16 phy_val;
2833
2834 atl2_write_phy_reg(hw, MII_DBG_ADDR, 0);
2835 atl2_read_phy_reg(hw, MII_DBG_DATA, &phy_val);
2836 atl2_write_phy_reg(hw, MII_DBG_DATA, phy_val | 0x1000);
2837
2838 atl2_write_phy_reg(hw, MII_DBG_ADDR, 2);
2839 atl2_write_phy_reg(hw, MII_DBG_DATA, 0x3000);
2840 atl2_write_phy_reg(hw, MII_DBG_ADDR, 3);
2841 atl2_write_phy_reg(hw, MII_DBG_DATA, 0);
2842}
2843
2844/* This is the only thing that needs to be changed to adjust the
2845 * maximum number of ports that the driver can manage.
2846 */
2847#define ATL2_MAX_NIC 4
2848
2849#define OPTION_UNSET -1
2850#define OPTION_DISABLED 0
2851#define OPTION_ENABLED 1
2852
2853/* All parameters are treated the same, as an integer array of values.
2854 * This macro just reduces the need to repeat the same declaration code
2855 * over and over (plus this helps to avoid typo bugs).
2856 */
2857#define ATL2_PARAM_INIT {[0 ... ATL2_MAX_NIC] = OPTION_UNSET}
2858#ifndef module_param_array
2859/* Module Parameters are always initialized to -1, so that the driver
2860 * can tell the difference between no user specified value or the
2861 * user asking for the default value.
2862 * The true default values are loaded in when atl2_check_options is called.
2863 *
2864 * This is a GCC extension to ANSI C.
2865 * See the item "Labeled Elements in Initializers" in the section
2866 * "Extensions to the C Language Family" of the GCC documentation.
2867 */
2868
2869#define ATL2_PARAM(X, desc) \
2870 static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
2871 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
2872 MODULE_PARM_DESC(X, desc);
2873#else
2874#define ATL2_PARAM(X, desc) \
2875 static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
2876 static int num_##X = 0; \
2877 module_param_array_named(X, X, int, &num_##X, 0); \
2878 MODULE_PARM_DESC(X, desc);
2879#endif
2880
2881/*
2882 * Transmit Memory Size
2883 * Valid Range: 64-2048
2884 * Default Value: 128
2885 */
2886#define ATL2_MIN_TX_MEMSIZE 4 /* 4KB */
2887#define ATL2_MAX_TX_MEMSIZE 64 /* 64KB */
2888#define ATL2_DEFAULT_TX_MEMSIZE 8 /* 8KB */
2889ATL2_PARAM(TxMemSize, "Bytes of Transmit Memory");
2890
2891/*
2892 * Receive Memory Block Count
2893 * Valid Range: 16-512
2894 * Default Value: 128
2895 */
2896#define ATL2_MIN_RXD_COUNT 16
2897#define ATL2_MAX_RXD_COUNT 512
2898#define ATL2_DEFAULT_RXD_COUNT 64
2899ATL2_PARAM(RxMemBlock, "Number of receive memory block");
2900
2901/*
2902 * User Specified MediaType Override
2903 *
2904 * Valid Range: 0-5
2905 * - 0 - auto-negotiate at all supported speeds
2906 * - 1 - only link at 1000Mbps Full Duplex
2907 * - 2 - only link at 100Mbps Full Duplex
2908 * - 3 - only link at 100Mbps Half Duplex
2909 * - 4 - only link at 10Mbps Full Duplex
2910 * - 5 - only link at 10Mbps Half Duplex
2911 * Default Value: 0
2912 */
2913ATL2_PARAM(MediaType, "MediaType Select");
2914
2915/*
2916 * Interrupt Moderate Timer in units of 2048 ns (~2 us)
2917 * Valid Range: 10-65535
2918 * Default Value: 45000(90ms)
2919 */
2920#define INT_MOD_DEFAULT_CNT 100 /* 200us */
2921#define INT_MOD_MAX_CNT 65000
2922#define INT_MOD_MIN_CNT 50
2923ATL2_PARAM(IntModTimer, "Interrupt Moderator Timer");
2924
2925/*
2926 * FlashVendor
2927 * Valid Range: 0-2
2928 * 0 - Atmel
2929 * 1 - SST
2930 * 2 - ST
2931 */
2932ATL2_PARAM(FlashVendor, "SPI Flash Vendor");
2933
2934#define AUTONEG_ADV_DEFAULT 0x2F
2935#define AUTONEG_ADV_MASK 0x2F
2936#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
2937
2938#define FLASH_VENDOR_DEFAULT 0
2939#define FLASH_VENDOR_MIN 0
2940#define FLASH_VENDOR_MAX 2
2941
2942struct atl2_option {
2943 enum { enable_option, range_option, list_option } type;
2944 char *name;
2945 char *err;
2946 int def;
2947 union {
2948 struct { /* range_option info */
2949 int min;
2950 int max;
2951 } r;
2952 struct { /* list_option info */
2953 int nr;
2954 struct atl2_opt_list { int i; char *str; } *p;
2955 } l;
2956 } arg;
2957};
2958
2959static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
2960{
2961 int i;
2962 struct atl2_opt_list *ent;
2963
2964 if (*value == OPTION_UNSET) {
2965 *value = opt->def;
2966 return 0;
2967 }
2968
2969 switch (opt->type) {
2970 case enable_option:
2971 switch (*value) {
2972 case OPTION_ENABLED:
2973 printk(KERN_INFO "%s Enabled\n", opt->name);
2974 return 0;
2975 break;
2976 case OPTION_DISABLED:
2977 printk(KERN_INFO "%s Disabled\n", opt->name);
2978 return 0;
2979 break;
2980 }
2981 break;
2982 case range_option:
2983 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
2984 printk(KERN_INFO "%s set to %i\n", opt->name, *value);
2985 return 0;
2986 }
2987 break;
2988 case list_option:
2989 for (i = 0; i < opt->arg.l.nr; i++) {
2990 ent = &opt->arg.l.p[i];
2991 if (*value == ent->i) {
2992 if (ent->str[0] != '\0')
2993 printk(KERN_INFO "%s\n", ent->str);
2994 return 0;
2995 }
2996 }
2997 break;
2998 default:
2999 BUG();
3000 }
3001
3002 printk(KERN_INFO "Invalid %s specified (%i) %s\n",
3003 opt->name, *value, opt->err);
3004 *value = opt->def;
3005 return -1;
3006}
3007
3008/*
3009 * atl2_check_options - Range Checking for Command Line Parameters
3010 * @adapter: board private structure
3011 *
3012 * This routine checks all command line parameters for valid user
3013 * input. If an invalid value is given, or if no user specified
3014 * value exists, a default value is used. The final value is stored
3015 * in a variable in the adapter structure.
3016 */
3017static void __devinit atl2_check_options(struct atl2_adapter *adapter)
3018{
3019 int val;
3020 struct atl2_option opt;
3021 int bd = adapter->bd_number;
3022 if (bd >= ATL2_MAX_NIC) {
3023 printk(KERN_NOTICE "Warning: no configuration for board #%i\n",
3024 bd);
3025 printk(KERN_NOTICE "Using defaults for all values\n");
3026#ifndef module_param_array
3027 bd = ATL2_MAX_NIC;
3028#endif
3029 }
3030
3031 /* Bytes of Transmit Memory */
3032 opt.type = range_option;
3033 opt.name = "Bytes of Transmit Memory";
3034 opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_TX_MEMSIZE);
3035 opt.def = ATL2_DEFAULT_TX_MEMSIZE;
3036 opt.arg.r.min = ATL2_MIN_TX_MEMSIZE;
3037 opt.arg.r.max = ATL2_MAX_TX_MEMSIZE;
3038#ifdef module_param_array
3039 if (num_TxMemSize > bd) {
3040#endif
3041 val = TxMemSize[bd];
3042 atl2_validate_option(&val, &opt);
3043 adapter->txd_ring_size = ((u32) val) * 1024;
3044#ifdef module_param_array
3045 } else
3046 adapter->txd_ring_size = ((u32)opt.def) * 1024;
3047#endif
3048 /* txs ring size: */
3049 adapter->txs_ring_size = adapter->txd_ring_size / 128;
3050 if (adapter->txs_ring_size > 160)
3051 adapter->txs_ring_size = 160;
3052
3053 /* Receive Memory Block Count */
3054 opt.type = range_option;
3055 opt.name = "Number of receive memory block";
3056 opt.err = "using default of " __MODULE_STRING(ATL2_DEFAULT_RXD_COUNT);
3057 opt.def = ATL2_DEFAULT_RXD_COUNT;
3058 opt.arg.r.min = ATL2_MIN_RXD_COUNT;
3059 opt.arg.r.max = ATL2_MAX_RXD_COUNT;
3060#ifdef module_param_array
3061 if (num_RxMemBlock > bd) {
3062#endif
3063 val = RxMemBlock[bd];
3064 atl2_validate_option(&val, &opt);
3065 adapter->rxd_ring_size = (u32)val;
3066 /* FIXME */
3067 /* ((u16)val)&~1; */ /* even number */
3068#ifdef module_param_array
3069 } else
3070 adapter->rxd_ring_size = (u32)opt.def;
3071#endif
3072 /* init RXD Flow control value */
3073 adapter->hw.fc_rxd_hi = (adapter->rxd_ring_size / 8) * 7;
3074 adapter->hw.fc_rxd_lo = (ATL2_MIN_RXD_COUNT / 8) >
3075 (adapter->rxd_ring_size / 12) ? (ATL2_MIN_RXD_COUNT / 8) :
3076 (adapter->rxd_ring_size / 12);
3077
3078 /* Interrupt Moderate Timer */
3079 opt.type = range_option;
3080 opt.name = "Interrupt Moderate Timer";
3081 opt.err = "using default of " __MODULE_STRING(INT_MOD_DEFAULT_CNT);
3082 opt.def = INT_MOD_DEFAULT_CNT;
3083 opt.arg.r.min = INT_MOD_MIN_CNT;
3084 opt.arg.r.max = INT_MOD_MAX_CNT;
3085#ifdef module_param_array
3086 if (num_IntModTimer > bd) {
3087#endif
3088 val = IntModTimer[bd];
3089 atl2_validate_option(&val, &opt);
3090 adapter->imt = (u16) val;
3091#ifdef module_param_array
3092 } else
3093 adapter->imt = (u16)(opt.def);
3094#endif
3095 /* Flash Vendor */
3096 opt.type = range_option;
3097 opt.name = "SPI Flash Vendor";
3098 opt.err = "using default of " __MODULE_STRING(FLASH_VENDOR_DEFAULT);
3099 opt.def = FLASH_VENDOR_DEFAULT;
3100 opt.arg.r.min = FLASH_VENDOR_MIN;
3101 opt.arg.r.max = FLASH_VENDOR_MAX;
3102#ifdef module_param_array
3103 if (num_FlashVendor > bd) {
3104#endif
3105 val = FlashVendor[bd];
3106 atl2_validate_option(&val, &opt);
3107 adapter->hw.flash_vendor = (u8) val;
3108#ifdef module_param_array
3109 } else
3110 adapter->hw.flash_vendor = (u8)(opt.def);
3111#endif
3112 /* MediaType */
3113 opt.type = range_option;
3114 opt.name = "Speed/Duplex Selection";
3115 opt.err = "using default of " __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR);
3116 opt.def = MEDIA_TYPE_AUTO_SENSOR;
3117 opt.arg.r.min = MEDIA_TYPE_AUTO_SENSOR;
3118 opt.arg.r.max = MEDIA_TYPE_10M_HALF;
3119#ifdef module_param_array
3120 if (num_MediaType > bd) {
3121#endif
3122 val = MediaType[bd];
3123 atl2_validate_option(&val, &opt);
3124 adapter->hw.MediaType = (u16) val;
3125#ifdef module_param_array
3126 } else
3127 adapter->hw.MediaType = (u16)(opt.def);
3128#endif
3129}
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
new file mode 100644
index 000000000000..6e1f28ff227b
--- /dev/null
+++ b/drivers/net/atlx/atl2.h
@@ -0,0 +1,530 @@
1/* atl2.h -- atl2 driver definitions
2 *
3 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
4 * Copyright(c) 2006 xiong huang <xiong.huang@atheros.com>
5 * Copyright(c) 2007 Chris Snook <csnook@redhat.com>
6 *
7 * Derived from Intel e1000 driver
8 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; either version 2 of the License, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59
22 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24
25#ifndef _ATL2_H_
26#define _ATL2_H_
27
28#include <asm/atomic.h>
29#include <linux/netdevice.h>
30
31#ifndef _ATL2_HW_H_
32#define _ATL2_HW_H_
33
34#ifndef _ATL2_OSDEP_H_
35#define _ATL2_OSDEP_H_
36
37#include <linux/pci.h>
38#include <linux/delay.h>
39#include <linux/interrupt.h>
40#include <linux/if_ether.h>
41
42#include "atlx.h"
43
44#ifdef ETHTOOL_OPS_COMPAT
45extern int ethtool_ioctl(struct ifreq *ifr);
46#endif
47
48#define PCI_COMMAND_REGISTER PCI_COMMAND
49#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
50#define ETH_ADDR_LEN ETH_ALEN
51
52#define ATL2_WRITE_REG(a, reg, value) (iowrite32((value), \
53 ((a)->hw_addr + (reg))))
54
55#define ATL2_WRITE_FLUSH(a) (ioread32((a)->hw_addr))
56
57#define ATL2_READ_REG(a, reg) (ioread32((a)->hw_addr + (reg)))
58
59#define ATL2_WRITE_REGB(a, reg, value) (iowrite8((value), \
60 ((a)->hw_addr + (reg))))
61
62#define ATL2_READ_REGB(a, reg) (ioread8((a)->hw_addr + (reg)))
63
64#define ATL2_WRITE_REGW(a, reg, value) (iowrite16((value), \
65 ((a)->hw_addr + (reg))))
66
67#define ATL2_READ_REGW(a, reg) (ioread16((a)->hw_addr + (reg)))
68
69#define ATL2_WRITE_REG_ARRAY(a, reg, offset, value) \
70 (iowrite32((value), (((a)->hw_addr + (reg)) + ((offset) << 2))))
71
72#define ATL2_READ_REG_ARRAY(a, reg, offset) \
73 (ioread32(((a)->hw_addr + (reg)) + ((offset) << 2)))
74
75#endif /* _ATL2_OSDEP_H_ */
76
77struct atl2_adapter;
78struct atl2_hw;
79
80/* function prototype */
81static s32 atl2_reset_hw(struct atl2_hw *hw);
82static s32 atl2_read_mac_addr(struct atl2_hw *hw);
83static s32 atl2_init_hw(struct atl2_hw *hw);
84static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
85 u16 *duplex);
86static u32 atl2_hash_mc_addr(struct atl2_hw *hw, u8 *mc_addr);
87static void atl2_hash_set(struct atl2_hw *hw, u32 hash_value);
88static s32 atl2_read_phy_reg(struct atl2_hw *hw, u16 reg_addr, u16 *phy_data);
89static s32 atl2_write_phy_reg(struct atl2_hw *hw, u32 reg_addr, u16 phy_data);
90static void atl2_read_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
91static void atl2_write_pci_cfg(struct atl2_hw *hw, u32 reg, u16 *value);
92static void atl2_set_mac_addr(struct atl2_hw *hw);
93static bool atl2_read_eeprom(struct atl2_hw *hw, u32 Offset, u32 *pValue);
94static bool atl2_write_eeprom(struct atl2_hw *hw, u32 offset, u32 value);
95static s32 atl2_phy_init(struct atl2_hw *hw);
96static int atl2_check_eeprom_exist(struct atl2_hw *hw);
97static void atl2_force_ps(struct atl2_hw *hw);
98
99/* register definition */
100
101/* Block IDLE Status Register */
102#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC is non-IDLE */
103#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC is non-IDLE */
104#define IDLE_STATUS_DMAR 8 /* 1: DMAR is non-IDLE */
105#define IDLE_STATUS_DMAW 4 /* 1: DMAW is non-IDLE */
106
107/* MDIO Control Register */
108#define MDIO_WAIT_TIMES 10
109
110/* MAC Control Register */
111#define MAC_CTRL_DBG_TX_BKPRESURE 0x100000 /* 1: TX max backoff */
112#define MAC_CTRL_MACLP_CLK_PHY 0x8000000 /* 1: 25MHz from phy */
113#define MAC_CTRL_HALF_LEFT_BUF_SHIFT 28
114#define MAC_CTRL_HALF_LEFT_BUF_MASK 0xF /* MAC retry buf x32B */
115
116/* Internal SRAM Partition Register */
117#define REG_SRAM_TXRAM_END 0x1500 /* Internal tail address of TXRAM
118 * default: 2byte*1024 */
119#define REG_SRAM_RXRAM_END 0x1502 /* Internal tail address of RXRAM
120 * default: 2byte*1024 */
121
122/* Descriptor Control register */
123#define REG_TXD_BASE_ADDR_LO 0x1544 /* The base address of the Transmit
124 * Data Mem low 32-bit(dword align) */
125#define REG_TXD_MEM_SIZE 0x1548 /* Transmit Data Memory size(by
126 * double word , max 256KB) */
127#define REG_TXS_BASE_ADDR_LO 0x154C /* The base address of the Transmit
128 * Status Memory low 32-bit(dword word
129 * align) */
130#define REG_TXS_MEM_SIZE 0x1550 /* double word unit, max 4*2047
131 * bytes. */
132#define REG_RXD_BASE_ADDR_LO 0x1554 /* The base address of the Transmit
133 * Status Memory low 32-bit(unit 8
134 * bytes) */
135#define REG_RXD_BUF_NUM 0x1558 /* Receive Data & Status Memory buffer
136 * number (unit 1536bytes, max
137 * 1536*2047) */
138
139/* DMAR Control Register */
140#define REG_DMAR 0x1580
141#define DMAR_EN 0x1 /* 1: Enable DMAR */
142
143/* TX Cur-Through (early tx threshold) Control Register */
144#define REG_TX_CUT_THRESH 0x1590 /* TxMac begin transmit packet
145 * threshold(unit word) */
146
147/* DMAW Control Register */
148#define REG_DMAW 0x15A0
149#define DMAW_EN 0x1
150
151/* Flow control register */
152#define REG_PAUSE_ON_TH 0x15A8 /* RXD high watermark of overflow
153 * threshold configuration register */
154#define REG_PAUSE_OFF_TH 0x15AA /* RXD lower watermark of overflow
155 * threshold configuration register */
156
157/* Mailbox Register */
158#define REG_MB_TXD_WR_IDX 0x15f0 /* double word align */
159#define REG_MB_RXD_RD_IDX 0x15F4 /* RXD Read index (unit: 1536byets) */
160
161/* Interrupt Status Register */
162#define ISR_TIMER 1 /* Interrupt when Timer counts down to zero */
163#define ISR_MANUAL 2 /* Software manual interrupt, for debug. Set
164 * when SW_MAN_INT_EN is set in Table 51
165 * Selene Master Control Register
166 * (Offset 0x1400). */
167#define ISR_RXF_OV 4 /* RXF overflow interrupt */
168#define ISR_TXF_UR 8 /* TXF underrun interrupt */
169#define ISR_TXS_OV 0x10 /* Internal transmit status buffer full
170 * interrupt */
171#define ISR_RXS_OV 0x20 /* Internal receive status buffer full
172 * interrupt */
173#define ISR_LINK_CHG 0x40 /* Link Status Change Interrupt */
174#define ISR_HOST_TXD_UR 0x80
175#define ISR_HOST_RXD_OV 0x100 /* Host rx data memory full , one pulse */
176#define ISR_DMAR_TO_RST 0x200 /* DMAR op timeout interrupt. SW should
177 * do Reset */
178#define ISR_DMAW_TO_RST 0x400
179#define ISR_PHY 0x800 /* phy interrupt */
180#define ISR_TS_UPDATE 0x10000 /* interrupt after new tx pkt status written
181 * to host */
182#define ISR_RS_UPDATE 0x20000 /* interrupt ater new rx pkt status written
183 * to host. */
184#define ISR_TX_EARLY 0x40000 /* interrupt when txmac begin transmit one
185 * packet */
186
187#define ISR_TX_EVENT (ISR_TXF_UR | ISR_TXS_OV | ISR_HOST_TXD_UR |\
188 ISR_TS_UPDATE | ISR_TX_EARLY)
189#define ISR_RX_EVENT (ISR_RXF_OV | ISR_RXS_OV | ISR_HOST_RXD_OV |\
190 ISR_RS_UPDATE)
191
192#define IMR_NORMAL_MASK (\
193 /*ISR_LINK_CHG |*/\
194 ISR_MANUAL |\
195 ISR_DMAR_TO_RST |\
196 ISR_DMAW_TO_RST |\
197 ISR_PHY |\
198 ISR_PHY_LINKDOWN |\
199 ISR_TS_UPDATE |\
200 ISR_RS_UPDATE)
201
202/* Receive MAC Statistics Registers */
203#define REG_STS_RX_PAUSE 0x1700 /* Num pause packets received */
204#define REG_STS_RXD_OV 0x1704 /* Num frames dropped due to RX
205 * FIFO overflow */
206#define REG_STS_RXS_OV 0x1708 /* Num frames dropped due to RX
207 * Status Buffer Overflow */
208#define REG_STS_RX_FILTER 0x170C /* Num packets dropped due to
209 * address filtering */
210
211/* MII definitions */
212
213/* PHY Common Register */
214#define MII_SMARTSPEED 0x14
215#define MII_DBG_ADDR 0x1D
216#define MII_DBG_DATA 0x1E
217
218/* PCI Command Register Bit Definitions */
219#define PCI_REG_COMMAND 0x04
220#define CMD_IO_SPACE 0x0001
221#define CMD_MEMORY_SPACE 0x0002
222#define CMD_BUS_MASTER 0x0004
223
224#define MEDIA_TYPE_100M_FULL 1
225#define MEDIA_TYPE_100M_HALF 2
226#define MEDIA_TYPE_10M_FULL 3
227#define MEDIA_TYPE_10M_HALF 4
228
229#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x000F /* Everything */
230
231/* The size (in bytes) of a ethernet packet */
232#define ENET_HEADER_SIZE 14
233#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
234#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
235#define ETHERNET_FCS_SIZE 4
236#define MAX_JUMBO_FRAME_SIZE 0x2000
237#define VLAN_SIZE 4
238
239struct tx_pkt_header {
240 unsigned pkt_size:11;
241 unsigned:4; /* reserved */
242 unsigned ins_vlan:1; /* txmac should insert vlan */
243 unsigned short vlan; /* vlan tag */
244};
245/* FIXME: replace above bitfields with MASK/SHIFT defines below */
246#define TX_PKT_HEADER_SIZE_MASK 0x7FF
247#define TX_PKT_HEADER_SIZE_SHIFT 0
248#define TX_PKT_HEADER_INS_VLAN_MASK 0x1
249#define TX_PKT_HEADER_INS_VLAN_SHIFT 15
250#define TX_PKT_HEADER_VLAN_TAG_MASK 0xFFFF
251#define TX_PKT_HEADER_VLAN_TAG_SHIFT 16
252
253struct tx_pkt_status {
254 unsigned pkt_size:11;
255 unsigned:5; /* reserved */
256 unsigned ok:1; /* current packet transmitted without error */
257 unsigned bcast:1; /* broadcast packet */
258 unsigned mcast:1; /* multicast packet */
259 unsigned pause:1; /* transmiited a pause frame */
260 unsigned ctrl:1;
261 unsigned defer:1; /* current packet is xmitted with defer */
262 unsigned exc_defer:1;
263 unsigned single_col:1;
264 unsigned multi_col:1;
265 unsigned late_col:1;
266 unsigned abort_col:1;
267 unsigned underun:1; /* current packet is aborted
268 * due to txram underrun */
269 unsigned:3; /* reserved */
270 unsigned update:1; /* always 1'b1 in tx_status_buf */
271};
272/* FIXME: replace above bitfields with MASK/SHIFT defines below */
273#define TX_PKT_STATUS_SIZE_MASK 0x7FF
274#define TX_PKT_STATUS_SIZE_SHIFT 0
275#define TX_PKT_STATUS_OK_MASK 0x1
276#define TX_PKT_STATUS_OK_SHIFT 16
277#define TX_PKT_STATUS_BCAST_MASK 0x1
278#define TX_PKT_STATUS_BCAST_SHIFT 17
279#define TX_PKT_STATUS_MCAST_MASK 0x1
280#define TX_PKT_STATUS_MCAST_SHIFT 18
281#define TX_PKT_STATUS_PAUSE_MASK 0x1
282#define TX_PKT_STATUS_PAUSE_SHIFT 19
283#define TX_PKT_STATUS_CTRL_MASK 0x1
284#define TX_PKT_STATUS_CTRL_SHIFT 20
285#define TX_PKT_STATUS_DEFER_MASK 0x1
286#define TX_PKT_STATUS_DEFER_SHIFT 21
287#define TX_PKT_STATUS_EXC_DEFER_MASK 0x1
288#define TX_PKT_STATUS_EXC_DEFER_SHIFT 22
289#define TX_PKT_STATUS_SINGLE_COL_MASK 0x1
290#define TX_PKT_STATUS_SINGLE_COL_SHIFT 23
291#define TX_PKT_STATUS_MULTI_COL_MASK 0x1
292#define TX_PKT_STATUS_MULTI_COL_SHIFT 24
293#define TX_PKT_STATUS_LATE_COL_MASK 0x1
294#define TX_PKT_STATUS_LATE_COL_SHIFT 25
295#define TX_PKT_STATUS_ABORT_COL_MASK 0x1
296#define TX_PKT_STATUS_ABORT_COL_SHIFT 26
297#define TX_PKT_STATUS_UNDERRUN_MASK 0x1
298#define TX_PKT_STATUS_UNDERRUN_SHIFT 27
299#define TX_PKT_STATUS_UPDATE_MASK 0x1
300#define TX_PKT_STATUS_UPDATE_SHIFT 31
301
302struct rx_pkt_status {
303 unsigned pkt_size:11; /* packet size, max 2047 bytes */
304 unsigned:5; /* reserved */
305 unsigned ok:1; /* current packet received ok without error */
306 unsigned bcast:1; /* current packet is broadcast */
307 unsigned mcast:1; /* current packet is multicast */
308 unsigned pause:1;
309 unsigned ctrl:1;
310 unsigned crc:1; /* received a packet with crc error */
311 unsigned code:1; /* received a packet with code error */
312 unsigned runt:1; /* received a packet less than 64 bytes
313 * with good crc */
314 unsigned frag:1; /* received a packet less than 64 bytes
315 * with bad crc */
316 unsigned trunc:1; /* current frame truncated due to rxram full */
317 unsigned align:1; /* this packet is alignment error */
318 unsigned vlan:1; /* this packet has vlan */
319 unsigned:3; /* reserved */
320 unsigned update:1;
321 unsigned short vtag; /* vlan tag */
322 unsigned:16;
323};
324/* FIXME: replace above bitfields with MASK/SHIFT defines below */
325#define RX_PKT_STATUS_SIZE_MASK 0x7FF
326#define RX_PKT_STATUS_SIZE_SHIFT 0
327#define RX_PKT_STATUS_OK_MASK 0x1
328#define RX_PKT_STATUS_OK_SHIFT 16
329#define RX_PKT_STATUS_BCAST_MASK 0x1
330#define RX_PKT_STATUS_BCAST_SHIFT 17
331#define RX_PKT_STATUS_MCAST_MASK 0x1
332#define RX_PKT_STATUS_MCAST_SHIFT 18
333#define RX_PKT_STATUS_PAUSE_MASK 0x1
334#define RX_PKT_STATUS_PAUSE_SHIFT 19
335#define RX_PKT_STATUS_CTRL_MASK 0x1
336#define RX_PKT_STATUS_CTRL_SHIFT 20
337#define RX_PKT_STATUS_CRC_MASK 0x1
338#define RX_PKT_STATUS_CRC_SHIFT 21
339#define RX_PKT_STATUS_CODE_MASK 0x1
340#define RX_PKT_STATUS_CODE_SHIFT 22
341#define RX_PKT_STATUS_RUNT_MASK 0x1
342#define RX_PKT_STATUS_RUNT_SHIFT 23
343#define RX_PKT_STATUS_FRAG_MASK 0x1
344#define RX_PKT_STATUS_FRAG_SHIFT 24
345#define RX_PKT_STATUS_TRUNK_MASK 0x1
346#define RX_PKT_STATUS_TRUNK_SHIFT 25
347#define RX_PKT_STATUS_ALIGN_MASK 0x1
348#define RX_PKT_STATUS_ALIGN_SHIFT 26
349#define RX_PKT_STATUS_VLAN_MASK 0x1
350#define RX_PKT_STATUS_VLAN_SHIFT 27
351#define RX_PKT_STATUS_UPDATE_MASK 0x1
352#define RX_PKT_STATUS_UPDATE_SHIFT 31
353#define RX_PKT_STATUS_VLAN_TAG_MASK 0xFFFF
354#define RX_PKT_STATUS_VLAN_TAG_SHIFT 32
355
356struct rx_desc {
357 struct rx_pkt_status status;
358 unsigned char packet[1536-sizeof(struct rx_pkt_status)];
359};
360
361enum atl2_speed_duplex {
362 atl2_10_half = 0,
363 atl2_10_full = 1,
364 atl2_100_half = 2,
365 atl2_100_full = 3
366};
367
368struct atl2_spi_flash_dev {
369 const char *manu_name; /* manufacturer id */
370 /* op-code */
371 u8 cmdWRSR;
372 u8 cmdREAD;
373 u8 cmdPROGRAM;
374 u8 cmdWREN;
375 u8 cmdWRDI;
376 u8 cmdRDSR;
377 u8 cmdRDID;
378 u8 cmdSECTOR_ERASE;
379 u8 cmdCHIP_ERASE;
380};
381
382/* Structure containing variables used by the shared code (atl2_hw.c) */
383struct atl2_hw {
384 u8 __iomem *hw_addr;
385 void *back;
386
387 u8 preamble_len;
388 u8 max_retry; /* Retransmission maximum, afterwards the
389 * packet will be discarded. */
390 u8 jam_ipg; /* IPG to start JAM for collision based flow
391 * control in half-duplex mode. In unit of
392 * 8-bit time. */
393 u8 ipgt; /* Desired back to back inter-packet gap. The
394 * default is 96-bit time. */
395 u8 min_ifg; /* Minimum number of IFG to enforce in between
396 * RX frames. Frame gap below such IFP is
397 * dropped. */
398 u8 ipgr1; /* 64bit Carrier-Sense window */
399 u8 ipgr2; /* 96-bit IPG window */
400 u8 retry_buf; /* When half-duplex mode, should hold some
401 * bytes for mac retry . (8*4bytes unit) */
402
403 u16 fc_rxd_hi;
404 u16 fc_rxd_lo;
405 u16 lcol; /* Collision Window */
406 u16 max_frame_size;
407
408 u16 MediaType;
409 u16 autoneg_advertised;
410 u16 pci_cmd_word;
411
412 u16 mii_autoneg_adv_reg;
413
414 u32 mem_rang;
415 u32 txcw;
416 u32 mc_filter_type;
417 u32 num_mc_addrs;
418 u32 collision_delta;
419 u32 tx_packet_delta;
420 u16 phy_spd_default;
421
422 u16 device_id;
423 u16 vendor_id;
424 u16 subsystem_id;
425 u16 subsystem_vendor_id;
426 u8 revision_id;
427
428 /* spi flash */
429 u8 flash_vendor;
430
431 u8 dma_fairness;
432 u8 mac_addr[NODE_ADDRESS_SIZE];
433 u8 perm_mac_addr[NODE_ADDRESS_SIZE];
434
435 /* FIXME */
436 /* bool phy_preamble_sup; */
437 bool phy_configured;
438};
439
440#endif /* _ATL2_HW_H_ */
441
442struct atl2_ring_header {
443 /* pointer to the descriptor ring memory */
444 void *desc;
445 /* physical adress of the descriptor ring */
446 dma_addr_t dma;
447 /* length of descriptor ring in bytes */
448 unsigned int size;
449};
450
451/* board specific private data structure */
452struct atl2_adapter {
453 /* OS defined structs */
454 struct net_device *netdev;
455 struct pci_dev *pdev;
456 struct net_device_stats net_stats;
457#ifdef NETIF_F_HW_VLAN_TX
458 struct vlan_group *vlgrp;
459#endif
460 u32 wol;
461 u16 link_speed;
462 u16 link_duplex;
463
464 spinlock_t stats_lock;
465 spinlock_t tx_lock;
466
467 struct work_struct reset_task;
468 struct work_struct link_chg_task;
469 struct timer_list watchdog_timer;
470 struct timer_list phy_config_timer;
471
472 unsigned long cfg_phy;
473 bool mac_disabled;
474
475 /* All Descriptor memory */
476 dma_addr_t ring_dma;
477 void *ring_vir_addr;
478 int ring_size;
479
480 struct tx_pkt_header *txd_ring;
481 dma_addr_t txd_dma;
482
483 struct tx_pkt_status *txs_ring;
484 dma_addr_t txs_dma;
485
486 struct rx_desc *rxd_ring;
487 dma_addr_t rxd_dma;
488
489 u32 txd_ring_size; /* bytes per unit */
490 u32 txs_ring_size; /* dwords per unit */
491 u32 rxd_ring_size; /* 1536 bytes per unit */
492
493 /* read /write ptr: */
494 /* host */
495 u32 txd_write_ptr;
496 u32 txs_next_clear;
497 u32 rxd_read_ptr;
498
499 /* nic */
500 atomic_t txd_read_ptr;
501 atomic_t txs_write_ptr;
502 u32 rxd_write_ptr;
503
504 /* Interrupt Moderator timer ( 2us resolution) */
505 u16 imt;
506 /* Interrupt Clear timer (2us resolution) */
507 u16 ict;
508
509 unsigned long flags;
510 /* structs defined in atl2_hw.h */
511 u32 bd_number; /* board number */
512 bool pci_using_64;
513 bool have_msi;
514 struct atl2_hw hw;
515
516 u32 usr_cmd;
517 /* FIXME */
518 /* u32 regs_buff[ATL2_REGS_LEN]; */
519 u32 pci_state[16];
520
521 u32 *config_space;
522};
523
524enum atl2_state_t {
525 __ATL2_TESTING,
526 __ATL2_RESETTING,
527 __ATL2_DOWN
528};
529
530#endif /* _ATL2_H_ */
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 5ee1b0557a02..92c16c37ff23 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -653,6 +653,8 @@ static struct net_device * au1000_probe(int port_num)
653 653
654 aup = dev->priv; 654 aup = dev->priv;
655 655
656 spin_lock_init(&aup->lock);
657
656 /* Allocate the data buffers */ 658 /* Allocate the data buffers */
657 /* Snooping works fine with eth on all au1xxx */ 659 /* Snooping works fine with eth on all au1xxx */
658 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * 660 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
@@ -753,7 +755,6 @@ static struct net_device * au1000_probe(int port_num)
753 aup->tx_db_inuse[i] = pDB; 755 aup->tx_db_inuse[i] = pDB;
754 } 756 }
755 757
756 spin_lock_init(&aup->lock);
757 dev->base_addr = base; 758 dev->base_addr = base;
758 dev->irq = irq; 759 dev->irq = irq;
759 dev->open = au1000_open; 760 dev->open = au1000_open;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index a886a4b9f7e5..4207d6efddc0 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -153,7 +153,7 @@ static void ax_reset_8390(struct net_device *dev)
153 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 153 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
154 if (jiffies - reset_start_time > 2*HZ/100) { 154 if (jiffies - reset_start_time > 2*HZ/100) {
155 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", 155 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
156 __FUNCTION__, dev->name); 156 __func__, dev->name);
157 break; 157 break;
158 } 158 }
159 } 159 }
@@ -173,7 +173,7 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
173 if (ei_status.dmaing) { 173 if (ei_status.dmaing) {
174 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " 174 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
175 "[DMAstat:%d][irqlock:%d].\n", 175 "[DMAstat:%d][irqlock:%d].\n",
176 dev->name, __FUNCTION__, 176 dev->name, __func__,
177 ei_status.dmaing, ei_status.irqlock); 177 ei_status.dmaing, ei_status.irqlock);
178 return; 178 return;
179 } 179 }
@@ -215,7 +215,7 @@ static void ax_block_input(struct net_device *dev, int count,
215 dev_err(&ax->dev->dev, 215 dev_err(&ax->dev->dev,
216 "%s: DMAing conflict in %s " 216 "%s: DMAing conflict in %s "
217 "[DMAstat:%d][irqlock:%d].\n", 217 "[DMAstat:%d][irqlock:%d].\n",
218 dev->name, __FUNCTION__, 218 dev->name, __func__,
219 ei_status.dmaing, ei_status.irqlock); 219 ei_status.dmaing, ei_status.irqlock);
220 return; 220 return;
221 } 221 }
@@ -260,7 +260,7 @@ static void ax_block_output(struct net_device *dev, int count,
260 if (ei_status.dmaing) { 260 if (ei_status.dmaing) {
261 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." 261 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
262 "[DMAstat:%d][irqlock:%d]\n", 262 "[DMAstat:%d][irqlock:%d]\n",
263 dev->name, __FUNCTION__, 263 dev->name, __func__,
264 ei_status.dmaing, ei_status.irqlock); 264 ei_status.dmaing, ei_status.irqlock);
265 return; 265 return;
266 } 266 }
@@ -396,7 +396,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
396{ 396{
397 if (phy_debug) 397 if (phy_debug)
398 pr_debug("%s: dev %p, %04x, %04x, %d\n", 398 pr_debug("%s: dev %p, %04x, %04x, %d\n",
399 __FUNCTION__, dev, phy_addr, reg, opc); 399 __func__, dev, phy_addr, reg, opc);
400 400
401 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */ 401 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */
402 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */ 402 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */
@@ -422,7 +422,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
422 spin_unlock_irqrestore(&ei_local->page_lock, flags); 422 spin_unlock_irqrestore(&ei_local->page_lock, flags);
423 423
424 if (phy_debug) 424 if (phy_debug)
425 pr_debug("%s: %04x.%04x => read %04x\n", __FUNCTION__, 425 pr_debug("%s: %04x.%04x => read %04x\n", __func__,
426 phy_addr, reg, result); 426 phy_addr, reg, result);
427 427
428 return result; 428 return result;
@@ -436,7 +436,7 @@ ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
436 unsigned long flags; 436 unsigned long flags;
437 437
438 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n", 438 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
439 __FUNCTION__, dev, phy_addr, reg, value); 439 __func__, dev, phy_addr, reg, value);
440 440
441 spin_lock_irqsave(&ei->page_lock, flags); 441 spin_lock_irqsave(&ei->page_lock, flags);
442 442
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 3db7db1828e7..df896e23e2c5 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -811,7 +811,7 @@ static void bfin_mac_enable(void)
811{ 811{
812 u32 opmode; 812 u32 opmode;
813 813
814 pr_debug("%s: %s\n", DRV_NAME, __FUNCTION__); 814 pr_debug("%s: %s\n", DRV_NAME, __func__);
815 815
816 /* Set RX DMA */ 816 /* Set RX DMA */
817 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); 817 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -847,7 +847,7 @@ static void bfin_mac_enable(void)
847/* Our watchdog timed out. Called by the networking layer */ 847/* Our watchdog timed out. Called by the networking layer */
848static void bfin_mac_timeout(struct net_device *dev) 848static void bfin_mac_timeout(struct net_device *dev)
849{ 849{
850 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 850 pr_debug("%s: %s\n", dev->name, __func__);
851 851
852 bfin_mac_disable(); 852 bfin_mac_disable();
853 853
@@ -949,7 +949,7 @@ static int bfin_mac_open(struct net_device *dev)
949{ 949{
950 struct bfin_mac_local *lp = netdev_priv(dev); 950 struct bfin_mac_local *lp = netdev_priv(dev);
951 int retval; 951 int retval;
952 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 952 pr_debug("%s: %s\n", dev->name, __func__);
953 953
954 /* 954 /*
955 * Check that the address is valid. If its not, refuse 955 * Check that the address is valid. If its not, refuse
@@ -989,7 +989,7 @@ static int bfin_mac_open(struct net_device *dev)
989static int bfin_mac_close(struct net_device *dev) 989static int bfin_mac_close(struct net_device *dev)
990{ 990{
991 struct bfin_mac_local *lp = netdev_priv(dev); 991 struct bfin_mac_local *lp = netdev_priv(dev);
992 pr_debug("%s: %s\n", dev->name, __FUNCTION__); 992 pr_debug("%s: %s\n", dev->name, __func__);
993 993
994 netif_stop_queue(dev); 994 netif_stop_queue(dev);
995 netif_carrier_off(dev); 995 netif_carrier_off(dev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 2486a656f12d..883e0a724107 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -69,7 +69,7 @@ static char version[] __devinitdata =
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 70
71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); 71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 Driver"); 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
73MODULE_LICENSE("GPL"); 73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION); 74MODULE_VERSION(DRV_MODULE_VERSION);
75 75
@@ -1127,7 +1127,7 @@ bnx2_init_all_rx_contexts(struct bnx2 *bp)
1127 } 1127 }
1128} 1128}
1129 1129
1130static int 1130static void
1131bnx2_set_mac_link(struct bnx2 *bp) 1131bnx2_set_mac_link(struct bnx2 *bp)
1132{ 1132{
1133 u32 val; 1133 u32 val;
@@ -1193,8 +1193,6 @@ bnx2_set_mac_link(struct bnx2 *bp)
1193 1193
1194 if (CHIP_NUM(bp) == CHIP_NUM_5709) 1194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1195 bnx2_init_all_rx_contexts(bp); 1195 bnx2_init_all_rx_contexts(bp);
1196
1197 return 0;
1198} 1196}
1199 1197
1200static void 1198static void
@@ -5600,7 +5598,7 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5600 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) { 5598 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5601 u32 bmcr; 5599 u32 bmcr;
5602 5600
5603 bp->current_interval = bp->timer_interval; 5601 bp->current_interval = BNX2_TIMER_INTERVAL;
5604 5602
5605 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 5603 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5606 5604
@@ -5629,7 +5627,7 @@ bnx2_5706_serdes_timer(struct bnx2 *bp)
5629 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; 5627 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5630 } 5628 }
5631 } else 5629 } else
5632 bp->current_interval = bp->timer_interval; 5630 bp->current_interval = BNX2_TIMER_INTERVAL;
5633 5631
5634 if (check_link) { 5632 if (check_link) {
5635 u32 val; 5633 u32 val;
@@ -5674,11 +5672,11 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
5674 } else { 5672 } else {
5675 bnx2_disable_forced_2g5(bp); 5673 bnx2_disable_forced_2g5(bp);
5676 bp->serdes_an_pending = 2; 5674 bp->serdes_an_pending = 2;
5677 bp->current_interval = bp->timer_interval; 5675 bp->current_interval = BNX2_TIMER_INTERVAL;
5678 } 5676 }
5679 5677
5680 } else 5678 } else
5681 bp->current_interval = bp->timer_interval; 5679 bp->current_interval = BNX2_TIMER_INTERVAL;
5682 5680
5683 spin_unlock(&bp->phy_lock); 5681 spin_unlock(&bp->phy_lock);
5684} 5682}
@@ -7516,8 +7514,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7516 7514
7517 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS; 7515 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7518 7516
7519 bp->timer_interval = HZ; 7517 bp->current_interval = BNX2_TIMER_INTERVAL;
7520 bp->current_interval = HZ;
7521 7518
7522 bp->phy_addr = 1; 7519 bp->phy_addr = 1;
7523 7520
@@ -7607,7 +7604,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7607 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; 7604 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7608 7605
7609 init_timer(&bp->timer); 7606 init_timer(&bp->timer);
7610 bp->timer.expires = RUN_AT(bp->timer_interval); 7607 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7611 bp->timer.data = (unsigned long) bp; 7608 bp->timer.data = (unsigned long) bp;
7612 bp->timer.function = bnx2_timer; 7609 bp->timer.function = bnx2_timer;
7613 7610
@@ -7720,7 +7717,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7720 7717
7721 memcpy(dev->dev_addr, bp->mac_addr, 6); 7718 memcpy(dev->dev_addr, bp->mac_addr, 6);
7722 memcpy(dev->perm_addr, bp->mac_addr, 6); 7719 memcpy(dev->perm_addr, bp->mac_addr, 6);
7723 bp->name = board_info[ent->driver_data].name;
7724 7720
7725 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 7721 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
7726 if (CHIP_NUM(bp) == CHIP_NUM_5709) 7722 if (CHIP_NUM(bp) == CHIP_NUM_5709)
@@ -7747,7 +7743,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7747 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, " 7743 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7748 "IRQ %d, node addr %s\n", 7744 "IRQ %d, node addr %s\n",
7749 dev->name, 7745 dev->name,
7750 bp->name, 7746 board_info[ent->driver_data].name,
7751 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 7747 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7752 ((CHIP_ID(bp) & 0x0ff0) >> 4), 7748 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7753 bnx2_bus_string(bp, str), 7749 bnx2_bus_string(bp, str),
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index dfacd31f7ed0..edc7774f2f21 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6654,6 +6654,8 @@ struct bnx2_napi {
6654 struct bnx2_tx_ring_info tx_ring; 6654 struct bnx2_tx_ring_info tx_ring;
6655}; 6655};
6656 6656
6657#define BNX2_TIMER_INTERVAL HZ
6658
6657struct bnx2 { 6659struct bnx2 {
6658 /* Fields used in the tx and intr/napi performance paths are grouped */ 6660 /* Fields used in the tx and intr/napi performance paths are grouped */
6659 /* together in the beginning of the structure. */ 6661 /* together in the beginning of the structure. */
@@ -6701,9 +6703,6 @@ struct bnx2 {
6701 6703
6702 /* End of fields used in the performance code paths. */ 6704 /* End of fields used in the performance code paths. */
6703 6705
6704 char *name;
6705
6706 int timer_interval;
6707 int current_interval; 6706 int current_interval;
6708 struct timer_list timer; 6707 struct timer_list timer;
6709 struct work_struct reset_task; 6708 struct work_struct reset_task;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index a8eb3c4a47c8..fce745148ff9 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
59#include "bnx2x.h" 59#include "bnx2x.h"
60#include "bnx2x_init.h" 60#include "bnx2x_init.h"
61 61
62#define DRV_MODULE_VERSION "1.45.21" 62#define DRV_MODULE_VERSION "1.45.22"
63#define DRV_MODULE_RELDATE "2008/09/03" 63#define DRV_MODULE_RELDATE "2008/09/09"
64#define BNX2X_BC_VER 0x040200 64#define BNX2X_BC_VER 0x040200
65 65
66/* Time in jiffies before concluding the transmitter is hung */ 66/* Time in jiffies before concluding the transmitter is hung */
@@ -649,15 +649,16 @@ static void bnx2x_int_disable(struct bnx2x *bp)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650} 650}
651 651
652static void bnx2x_int_disable_sync(struct bnx2x *bp) 652static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
653{ 653{
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int i; 655 int i;
656 656
657 /* disable interrupt handling */ 657 /* disable interrupt handling */
658 atomic_inc(&bp->intr_sem); 658 atomic_inc(&bp->intr_sem);
659 /* prevent the HW from sending interrupts */ 659 if (disable_hw)
660 bnx2x_int_disable(bp); 660 /* prevent the HW from sending interrupts */
661 bnx2x_int_disable(bp);
661 662
662 /* make sure all ISRs are done */ 663 /* make sure all ISRs are done */
663 if (msix) { 664 if (msix) {
@@ -6086,9 +6087,9 @@ static void bnx2x_netif_start(struct bnx2x *bp)
6086 } 6087 }
6087} 6088}
6088 6089
6089static void bnx2x_netif_stop(struct bnx2x *bp) 6090static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6090{ 6091{
6091 bnx2x_int_disable_sync(bp); 6092 bnx2x_int_disable_sync(bp, disable_hw);
6092 if (netif_running(bp->dev)) { 6093 if (netif_running(bp->dev)) {
6093 bnx2x_napi_disable(bp); 6094 bnx2x_napi_disable(bp);
6094 netif_tx_disable(bp->dev); 6095 netif_tx_disable(bp->dev);
@@ -6475,7 +6476,7 @@ load_rings_free:
6475 for_each_queue(bp, i) 6476 for_each_queue(bp, i)
6476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 6477 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6477load_int_disable: 6478load_int_disable:
6478 bnx2x_int_disable_sync(bp); 6479 bnx2x_int_disable_sync(bp, 1);
6479 /* Release IRQs */ 6480 /* Release IRQs */
6480 bnx2x_free_irq(bp); 6481 bnx2x_free_irq(bp);
6481load_error: 6482load_error:
@@ -6650,7 +6651,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
6650 bp->rx_mode = BNX2X_RX_MODE_NONE; 6651 bp->rx_mode = BNX2X_RX_MODE_NONE;
6651 bnx2x_set_storm_rx_mode(bp); 6652 bnx2x_set_storm_rx_mode(bp);
6652 6653
6653 bnx2x_netif_stop(bp); 6654 bnx2x_netif_stop(bp, 1);
6654 if (!netif_running(bp->dev)) 6655 if (!netif_running(bp->dev))
6655 bnx2x_napi_disable(bp); 6656 bnx2x_napi_disable(bp);
6656 del_timer_sync(&bp->timer); 6657 del_timer_sync(&bp->timer);
@@ -8791,7 +8792,7 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8791 if (!netif_running(bp->dev)) 8792 if (!netif_running(bp->dev))
8792 return BNX2X_LOOPBACK_FAILED; 8793 return BNX2X_LOOPBACK_FAILED;
8793 8794
8794 bnx2x_netif_stop(bp); 8795 bnx2x_netif_stop(bp, 1);
8795 8796
8796 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) { 8797 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8797 DP(NETIF_MSG_PROBE, "MAC loopback failed\n"); 8798 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
@@ -10346,6 +10347,74 @@ static int bnx2x_resume(struct pci_dev *pdev)
10346 return rc; 10347 return rc;
10347} 10348}
10348 10349
10350static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10351{
10352 int i;
10353
10354 bp->state = BNX2X_STATE_ERROR;
10355
10356 bp->rx_mode = BNX2X_RX_MODE_NONE;
10357
10358 bnx2x_netif_stop(bp, 0);
10359
10360 del_timer_sync(&bp->timer);
10361 bp->stats_state = STATS_STATE_DISABLED;
10362 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10363
10364 /* Release IRQs */
10365 bnx2x_free_irq(bp);
10366
10367 if (CHIP_IS_E1(bp)) {
10368 struct mac_configuration_cmd *config =
10369 bnx2x_sp(bp, mcast_config);
10370
10371 for (i = 0; i < config->hdr.length_6b; i++)
10372 CAM_INVALIDATE(config->config_table[i]);
10373 }
10374
10375 /* Free SKBs, SGEs, TPA pool and driver internals */
10376 bnx2x_free_skbs(bp);
10377 for_each_queue(bp, i)
10378 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10379 bnx2x_free_mem(bp);
10380
10381 bp->state = BNX2X_STATE_CLOSED;
10382
10383 netif_carrier_off(bp->dev);
10384
10385 return 0;
10386}
10387
10388static void bnx2x_eeh_recover(struct bnx2x *bp)
10389{
10390 u32 val;
10391
10392 mutex_init(&bp->port.phy_mutex);
10393
10394 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10395 bp->link_params.shmem_base = bp->common.shmem_base;
10396 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10397
10398 if (!bp->common.shmem_base ||
10399 (bp->common.shmem_base < 0xA0000) ||
10400 (bp->common.shmem_base >= 0xC0000)) {
10401 BNX2X_DEV_INFO("MCP not active\n");
10402 bp->flags |= NO_MCP_FLAG;
10403 return;
10404 }
10405
10406 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10407 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10408 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10409 BNX2X_ERR("BAD MCP validity signature\n");
10410
10411 if (!BP_NOMCP(bp)) {
10412 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10413 & DRV_MSG_SEQ_NUMBER_MASK);
10414 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10415 }
10416}
10417
10349/** 10418/**
10350 * bnx2x_io_error_detected - called when PCI error is detected 10419 * bnx2x_io_error_detected - called when PCI error is detected
10351 * @pdev: Pointer to PCI device 10420 * @pdev: Pointer to PCI device
@@ -10365,7 +10434,7 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10365 netif_device_detach(dev); 10434 netif_device_detach(dev);
10366 10435
10367 if (netif_running(dev)) 10436 if (netif_running(dev))
10368 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 10437 bnx2x_eeh_nic_unload(bp);
10369 10438
10370 pci_disable_device(pdev); 10439 pci_disable_device(pdev);
10371 10440
@@ -10420,8 +10489,10 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
10420 10489
10421 rtnl_lock(); 10490 rtnl_lock();
10422 10491
10492 bnx2x_eeh_recover(bp);
10493
10423 if (netif_running(dev)) 10494 if (netif_running(dev))
10424 bnx2x_nic_load(bp, LOAD_OPEN); 10495 bnx2x_nic_load(bp, LOAD_NORMAL);
10425 10496
10426 netif_device_attach(dev); 10497 netif_device_attach(dev);
10427 10498
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index b211486a0ca3..ade5f3f6693b 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -38,6 +38,7 @@
38#include <linux/in.h> 38#include <linux/in.h>
39#include <net/ipx.h> 39#include <net/ipx.h>
40#include <net/arp.h> 40#include <net/arp.h>
41#include <net/ipv6.h>
41#include <asm/byteorder.h> 42#include <asm/byteorder.h>
42#include "bonding.h" 43#include "bonding.h"
43#include "bond_alb.h" 44#include "bond_alb.h"
@@ -81,6 +82,7 @@
81#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC 82#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC
82 83
83static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff}; 84static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff};
85static const u8 mac_v6_allmcast[ETH_ALEN] = {0x33,0x33,0x00,0x00,0x00,0x01};
84static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; 86static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
85 87
86#pragma pack(1) 88#pragma pack(1)
@@ -710,7 +712,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
710 struct arp_pkt *arp = arp_pkt(skb); 712 struct arp_pkt *arp = arp_pkt(skb);
711 struct slave *tx_slave = NULL; 713 struct slave *tx_slave = NULL;
712 714
713 if (arp->op_code == __constant_htons(ARPOP_REPLY)) { 715 if (arp->op_code == htons(ARPOP_REPLY)) {
714 /* the arp must be sent on the selected 716 /* the arp must be sent on the selected
715 * rx channel 717 * rx channel
716 */ 718 */
@@ -719,7 +721,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
719 memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN); 721 memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
720 } 722 }
721 dprintk("Server sent ARP Reply packet\n"); 723 dprintk("Server sent ARP Reply packet\n");
722 } else if (arp->op_code == __constant_htons(ARPOP_REQUEST)) { 724 } else if (arp->op_code == htons(ARPOP_REQUEST)) {
723 /* Create an entry in the rx_hashtbl for this client as a 725 /* Create an entry in the rx_hashtbl for this client as a
724 * place holder. 726 * place holder.
725 * When the arp reply is received the entry will be updated 727 * When the arp reply is received the entry will be updated
@@ -1290,6 +1292,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1290 u32 hash_index = 0; 1292 u32 hash_index = 0;
1291 const u8 *hash_start = NULL; 1293 const u8 *hash_start = NULL;
1292 int res = 1; 1294 int res = 1;
1295 struct ipv6hdr *ip6hdr;
1293 1296
1294 skb_reset_mac_header(skb); 1297 skb_reset_mac_header(skb);
1295 eth_data = eth_hdr(skb); 1298 eth_data = eth_hdr(skb);
@@ -1319,11 +1322,32 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1319 } 1322 }
1320 break; 1323 break;
1321 case ETH_P_IPV6: 1324 case ETH_P_IPV6:
1325 /* IPv6 doesn't really use broadcast mac address, but leave
1326 * that here just in case.
1327 */
1322 if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { 1328 if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) {
1323 do_tx_balance = 0; 1329 do_tx_balance = 0;
1324 break; 1330 break;
1325 } 1331 }
1326 1332
1333 /* IPv6 uses all-nodes multicast as an equivalent to
1334 * broadcasts in IPv4.
1335 */
1336 if (memcmp(eth_data->h_dest, mac_v6_allmcast, ETH_ALEN) == 0) {
1337 do_tx_balance = 0;
1338 break;
1339 }
1340
1341 /* Additianally, DAD probes should not be tx-balanced as that
1342 * will lead to false positives for duplicate addresses and
1343 * prevent address configuration from working.
1344 */
1345 ip6hdr = ipv6_hdr(skb);
1346 if (ipv6_addr_any(&ip6hdr->saddr)) {
1347 do_tx_balance = 0;
1348 break;
1349 }
1350
1327 hash_start = (char *)&(ipv6_hdr(skb)->daddr); 1351 hash_start = (char *)&(ipv6_hdr(skb)->daddr);
1328 hash_size = sizeof(ipv6_hdr(skb)->daddr); 1352 hash_size = sizeof(ipv6_hdr(skb)->daddr);
1329 break; 1353 break;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c792138511e6..8e2be24f3fe4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3702,7 +3702,7 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
3702 struct ethhdr *data = (struct ethhdr *)skb->data; 3702 struct ethhdr *data = (struct ethhdr *)skb->data;
3703 struct iphdr *iph = ip_hdr(skb); 3703 struct iphdr *iph = ip_hdr(skb);
3704 3704
3705 if (skb->protocol == __constant_htons(ETH_P_IP)) { 3705 if (skb->protocol == htons(ETH_P_IP)) {
3706 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ 3706 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3707 (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count; 3707 (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count;
3708 } 3708 }
@@ -3723,8 +3723,8 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
3723 __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); 3723 __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
3724 int layer4_xor = 0; 3724 int layer4_xor = 0;
3725 3725
3726 if (skb->protocol == __constant_htons(ETH_P_IP)) { 3726 if (skb->protocol == htons(ETH_P_IP)) {
3727 if (!(iph->frag_off & __constant_htons(IP_MF|IP_OFFSET)) && 3727 if (!(iph->frag_off & htons(IP_MF|IP_OFFSET)) &&
3728 (iph->protocol == IPPROTO_TCP || 3728 (iph->protocol == IPPROTO_TCP ||
3729 iph->protocol == IPPROTO_UDP)) { 3729 iph->protocol == IPPROTO_UDP)) {
3730 layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1))); 3730 layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1)));
@@ -4493,6 +4493,12 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4493 4493
4494static const struct ethtool_ops bond_ethtool_ops = { 4494static const struct ethtool_ops bond_ethtool_ops = {
4495 .get_drvinfo = bond_ethtool_get_drvinfo, 4495 .get_drvinfo = bond_ethtool_get_drvinfo,
4496 .get_link = ethtool_op_get_link,
4497 .get_tx_csum = ethtool_op_get_tx_csum,
4498 .get_sg = ethtool_op_get_sg,
4499 .get_tso = ethtool_op_get_tso,
4500 .get_ufo = ethtool_op_get_ufo,
4501 .get_flags = ethtool_op_get_flags,
4496}; 4502};
4497 4503
4498/* 4504/*
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index fb730ec0396f..ffb668dd6d3b 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -32,7 +32,7 @@
32#ifdef BONDING_DEBUG 32#ifdef BONDING_DEBUG
33#define dprintk(fmt, args...) \ 33#define dprintk(fmt, args...) \
34 printk(KERN_DEBUG \ 34 printk(KERN_DEBUG \
35 DRV_NAME ": %s() %d: " fmt, __FUNCTION__, __LINE__ , ## args ) 35 DRV_NAME ": %s() %d: " fmt, __func__, __LINE__ , ## args )
36#else 36#else
37#define dprintk(fmt, args...) 37#define dprintk(fmt, args...)
38#endif /* BONDING_DEBUG */ 38#endif /* BONDING_DEBUG */
@@ -333,5 +333,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
333void bond_register_arp(struct bonding *); 333void bond_register_arp(struct bonding *);
334void bond_unregister_arp(struct bonding *); 334void bond_unregister_arp(struct bonding *);
335 335
336/* exported from bond_main.c */
337extern struct list_head bond_dev_list;
338extern struct bond_parm_tbl bond_lacp_tbl[];
339extern struct bond_parm_tbl bond_mode_tbl[];
340extern struct bond_parm_tbl xmit_hashtype_tbl[];
341extern struct bond_parm_tbl arp_validate_tbl[];
342extern struct bond_parm_tbl fail_over_mac_tbl[];
343
336#endif /* _LINUX_BONDING_H */ 344#endif /* _LINUX_BONDING_H */
337 345
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f1936d51b458..86909cfb14de 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -74,6 +74,7 @@
74#include <linux/slab.h> 74#include <linux/slab.h>
75#include <linux/delay.h> 75#include <linux/delay.h>
76#include <linux/init.h> 76#include <linux/init.h>
77#include <linux/vmalloc.h>
77#include <linux/ioport.h> 78#include <linux/ioport.h>
78#include <linux/pci.h> 79#include <linux/pci.h>
79#include <linux/mm.h> 80#include <linux/mm.h>
@@ -91,6 +92,7 @@
91#include <linux/ip.h> 92#include <linux/ip.h>
92#include <linux/tcp.h> 93#include <linux/tcp.h>
93#include <linux/mutex.h> 94#include <linux/mutex.h>
95#include <linux/firmware.h>
94 96
95#include <net/checksum.h> 97#include <net/checksum.h>
96 98
@@ -197,6 +199,7 @@ static int link_mode;
197MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); 199MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
198MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); 200MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
199MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
202MODULE_FIRMWARE("sun/cassini.bin");
200module_param(cassini_debug, int, 0); 203module_param(cassini_debug, int, 0);
201MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); 204MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
202module_param(link_mode, int, 0); 205module_param(link_mode, int, 0);
@@ -812,9 +815,44 @@ static int cas_reset_mii_phy(struct cas *cp)
812 return (limit <= 0); 815 return (limit <= 0);
813} 816}
814 817
818static int cas_saturn_firmware_init(struct cas *cp)
819{
820 const struct firmware *fw;
821 const char fw_name[] = "sun/cassini.bin";
822 int err;
823
824 if (PHY_NS_DP83065 != cp->phy_id)
825 return 0;
826
827 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
828 if (err) {
829 printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n",
830 fw_name);
831 return err;
832 }
833 if (fw->size < 2) {
834 printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n",
835 fw->size, fw_name);
836 err = -EINVAL;
837 goto out;
838 }
839 cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
840 cp->fw_size = fw->size - 2;
841 cp->fw_data = vmalloc(cp->fw_size);
842 if (!cp->fw_data) {
843 err = -ENOMEM;
844 printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err);
845 goto out;
846 }
847 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
848out:
849 release_firmware(fw);
850 return err;
851}
852
815static void cas_saturn_firmware_load(struct cas *cp) 853static void cas_saturn_firmware_load(struct cas *cp)
816{ 854{
817 cas_saturn_patch_t *patch = cas_saturn_patch; 855 int i;
818 856
819 cas_phy_powerdown(cp); 857 cas_phy_powerdown(cp);
820 858
@@ -833,11 +871,9 @@ static void cas_saturn_firmware_load(struct cas *cp)
833 871
834 /* download new firmware */ 872 /* download new firmware */
835 cas_phy_write(cp, DP83065_MII_MEM, 0x1); 873 cas_phy_write(cp, DP83065_MII_MEM, 0x1);
836 cas_phy_write(cp, DP83065_MII_REGE, patch->addr); 874 cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
837 while (patch->addr) { 875 for (i = 0; i < cp->fw_size; i++)
838 cas_phy_write(cp, DP83065_MII_REGD, patch->val); 876 cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
839 patch++;
840 }
841 877
842 /* enable firmware */ 878 /* enable firmware */
843 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); 879 cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
@@ -2182,7 +2218,7 @@ static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words,
2182 * do any additional locking here. stick the buffer 2218 * do any additional locking here. stick the buffer
2183 * at the end. 2219 * at the end.
2184 */ 2220 */
2185 __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow); 2221 __skb_queue_tail(flow, skb);
2186 if (words[0] & RX_COMP1_RELEASE_FLOW) { 2222 if (words[0] & RX_COMP1_RELEASE_FLOW) {
2187 while ((skb = __skb_dequeue(flow))) { 2223 while ((skb = __skb_dequeue(flow))) {
2188 cas_skb_release(skb); 2224 cas_skb_release(skb);
@@ -5108,6 +5144,9 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5108 cas_reset(cp, 0); 5144 cas_reset(cp, 0);
5109 if (cas_check_invariants(cp)) 5145 if (cas_check_invariants(cp))
5110 goto err_out_iounmap; 5146 goto err_out_iounmap;
5147 if (cp->cas_flags & CAS_FLAG_SATURN)
5148 if (cas_saturn_firmware_init(cp))
5149 goto err_out_iounmap;
5111 5150
5112 cp->init_block = (struct cas_init_block *) 5151 cp->init_block = (struct cas_init_block *)
5113 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5152 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
@@ -5217,6 +5256,9 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
5217 cp = netdev_priv(dev); 5256 cp = netdev_priv(dev);
5218 unregister_netdev(dev); 5257 unregister_netdev(dev);
5219 5258
5259 if (cp->fw_data)
5260 vfree(cp->fw_data);
5261
5220 mutex_lock(&cp->pm_mutex); 5262 mutex_lock(&cp->pm_mutex);
5221 flush_scheduled_work(); 5263 flush_scheduled_work();
5222 if (cp->hw_running) 5264 if (cp->hw_running)
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
index 552af89ca1cf..fd17a002b453 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/cassini.h
@@ -2514,1523 +2514,6 @@ static cas_hp_inst_t cas_prog_null[] = { {NULL} };
2514#define CAS_HP_FIRMWARE cas_prog_null 2514#define CAS_HP_FIRMWARE cas_prog_null
2515#endif 2515#endif
2516 2516
2517/* firmware patch for NS_DP83065 */
2518typedef struct cas_saturn_patch {
2519 u16 addr;
2520 u16 val;
2521} cas_saturn_patch_t;
2522
2523#if 1
2524cas_saturn_patch_t cas_saturn_patch[] = {
2525{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
2526{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
2527{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
2528{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
2529{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
2530{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
2531{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
2532{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
2533{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
2534{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
2535{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
2536{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
2537{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
2538{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
2539{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
2540{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
2541{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
2542{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
2543{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
2544{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
2545{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
2546{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
2547{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
2548{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
2549{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
2550{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
2551{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
2552{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
2553{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
2554{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
2555{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
2556{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
2557{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
2558{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
2559{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
2560{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
2561{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
2562{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
2563{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
2564{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
2565{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
2566{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
2567{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
2568{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
2569{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
2570{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
2571{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
2572{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
2573{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
2574{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
2575{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
2576{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
2577{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
2578{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
2579{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
2580{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
2581{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
2582{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
2583{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
2584{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
2585{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
2586{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
2587{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
2588{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
2589{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
2590{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
2591{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
2592{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
2593{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
2594{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
2595{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
2596{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
2597{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
2598{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
2599{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
2600{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
2601{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
2602{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
2603{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
2604{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
2605{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
2606{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
2607{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
2608{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
2609{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
2610{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
2611{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
2612{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
2613{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
2614{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
2615{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
2616{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
2617{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
2618{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
2619{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
2620{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
2621{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
2622{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
2623{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
2624{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
2625{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
2626{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
2627{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
2628{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
2629{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
2630{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
2631{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
2632{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
2633{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
2634{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
2635{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
2636{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
2637{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
2638{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
2639{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
2640{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
2641{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
2642{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
2643{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
2644{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
2645{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
2646{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
2647{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
2648{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
2649{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
2650{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
2651{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
2652{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
2653{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
2654{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
2655{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
2656{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
2657{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
2658{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
2659{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
2660{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
2661{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
2662{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
2663{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
2664{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
2665{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
2666{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
2667{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
2668{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
2669{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
2670{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
2671{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
2672{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
2673{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
2674{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
2675{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
2676{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
2677{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
2678{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
2679{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
2680{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
2681{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
2682{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
2683{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
2684{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
2685{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
2686{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
2687{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
2688{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
2689{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
2690{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
2691{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
2692{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
2693{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
2694{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
2695{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
2696{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
2697{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
2698{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
2699{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
2700{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
2701{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
2702{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
2703{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
2704{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
2705{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
2706{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
2707{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
2708{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
2709{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
2710{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
2711{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
2712{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
2713{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
2714{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
2715{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
2716{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
2717{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
2718{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
2719{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
2720{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
2721{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
2722{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
2723{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
2724{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
2725{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
2726{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
2727{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
2728{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
2729{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
2730{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
2731{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
2732{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
2733{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
2734{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
2735{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
2736{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
2737{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
2738{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
2739{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
2740{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
2741{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
2742{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
2743{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
2744{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
2745{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
2746{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
2747{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
2748{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
2749{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
2750{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
2751{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
2752{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
2753{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
2754{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
2755{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
2756{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
2757{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
2758{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
2759{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
2760{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
2761{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
2762{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
2763{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
2764{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
2765{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
2766{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
2767{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
2768{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
2769{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
2770{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
2771{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
2772{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
2773{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
2774{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
2775{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
2776{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
2777{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
2778{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
2779{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
2780{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
2781{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
2782{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
2783{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
2784{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
2785{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
2786{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
2787{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
2788{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
2789{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
2790{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
2791{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
2792{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
2793{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
2794{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
2795{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
2796{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
2797{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
2798{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
2799{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
2800{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
2801{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
2802{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
2803{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
2804{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
2805{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
2806{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
2807{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
2808{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
2809{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
2810{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
2811{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
2812{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
2813{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
2814{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
2815{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
2816{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
2817{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
2818{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
2819{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
2820{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
2821{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
2822{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
2823{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
2824{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
2825{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
2826{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
2827{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
2828{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
2829{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
2830{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
2831{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
2832{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
2833{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
2834{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
2835{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
2836{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
2837{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
2838{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
2839{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
2840{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
2841{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
2842{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
2843{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
2844{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
2845{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
2846{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
2847{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
2848{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
2849{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
2850{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
2851{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
2852{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
2853{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
2854{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
2855{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
2856{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
2857{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
2858{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
2859{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
2860{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
2861{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
2862{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
2863{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
2864{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
2865{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
2866{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
2867{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
2868{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
2869{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
2870{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
2871{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
2872{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
2873{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
2874{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
2875{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
2876{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
2877{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
2878{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
2879{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
2880{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
2881{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
2882{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
2883{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
2884{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
2885{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
2886{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
2887{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
2888{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
2889{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
2890{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
2891{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
2892{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
2893{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
2894{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
2895{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
2896{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
2897{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
2898{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
2899{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
2900{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
2901{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
2902{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
2903{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
2904{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
2905{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
2906{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
2907{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
2908{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
2909{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
2910{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
2911{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
2912{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
2913{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
2914{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
2915{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
2916{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
2917{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
2918{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
2919{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
2920{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
2921{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
2922{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
2923{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
2924{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
2925{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
2926{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
2927{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
2928{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
2929{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
2930{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
2931{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
2932{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
2933{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
2934{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
2935{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
2936{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
2937{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
2938{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
2939{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
2940{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
2941{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
2942{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
2943{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
2944{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
2945{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
2946{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
2947{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
2948{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
2949{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
2950{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
2951{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
2952{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
2953{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
2954{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
2955{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
2956{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
2957{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
2958{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
2959{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
2960{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
2961{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
2962{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
2963{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
2964{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
2965{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
2966{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
2967{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
2968{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
2969{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
2970{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
2971{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
2972{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
2973{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
2974{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
2975{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
2976{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
2977{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
2978{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
2979{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
2980{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
2981{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
2982{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
2983{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
2984{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
2985{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
2986{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
2987{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
2988{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
2989{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
2990{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
2991{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
2992{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
2993{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
2994{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
2995{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
2996{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
2997{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
2998{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
2999{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
3000{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
3001{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
3002{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
3003{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
3004{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
3005{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
3006{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
3007{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
3008{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
3009{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
3010{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
3011{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
3012{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
3013{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
3014{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
3015{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
3016{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
3017{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
3018{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
3019{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
3020{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
3021{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
3022{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
3023{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
3024{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
3025{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
3026{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
3027{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
3028{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
3029{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
3030{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
3031{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
3032{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
3033{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
3034{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x0000},
3035{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
3036{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
3037{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
3038{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
3039{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
3040{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
3041{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
3042{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
3043{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
3044{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
3045{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
3046{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
3047{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
3048{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
3049{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
3050{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
3051{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
3052{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
3053{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
3054{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
3055{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
3056{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
3057{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
3058{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
3059{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
3060{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
3061{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
3062{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
3063{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
3064{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
3065{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
3066{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
3067{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
3068{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
3069{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
3070{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
3071{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
3072{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
3073{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
3074{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
3075{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
3076{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
3077{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
3078{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
3079{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
3080{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
3081{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
3082{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
3083{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
3084{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
3085{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
3086{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
3087{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
3088{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
3089{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
3090{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
3091{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
3092{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
3093{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
3094{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
3095{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
3096{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
3097{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
3098{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
3099{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
3100{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
3101{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
3102{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
3103{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
3104{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
3105{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
3106{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
3107{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
3108{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
3109{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
3110{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
3111{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
3112{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
3113{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
3114{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
3115{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
3116{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
3117{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
3118{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
3119{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
3120{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
3121{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
3122{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
3123{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
3124{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
3125{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
3126{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
3127{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
3128{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
3129{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
3130{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
3131{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
3132{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
3133{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
3134{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
3135{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
3136{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
3137{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
3138{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
3139{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
3140{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
3141{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
3142{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
3143{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
3144{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
3145{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
3146{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
3147{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
3148{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
3149{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
3150{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
3151{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
3152{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
3153{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
3154{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
3155{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
3156{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
3157{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
3158{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
3159{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
3160{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
3161{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
3162{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
3163{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
3164{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
3165{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
3166{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
3167{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
3168{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
3169{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
3170{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
3171{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
3172{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
3173{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
3174{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
3175{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
3176{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
3177{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
3178{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
3179{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
3180{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
3181{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
3182{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
3183{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
3184{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
3185{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
3186{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
3187{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
3188{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
3189{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
3190{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
3191{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
3192{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
3193{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
3194{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
3195{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
3196{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
3197{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
3198{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
3199{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
3200{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
3201{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
3202{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
3203{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
3204{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
3205{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
3206{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
3207{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
3208{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
3209{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
3210{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
3211{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
3212{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
3213{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
3214{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
3215{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
3216{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
3217{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
3218{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
3219{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
3220{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
3221{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
3222{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
3223{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
3224{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
3225{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
3226{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
3227{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
3228{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
3229{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
3230{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
3231{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
3232{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
3233{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
3234{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
3235{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
3236{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
3237{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
3238{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
3239{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
3240{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
3241{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
3242{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
3243{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
3244{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
3245{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
3246{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
3247{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
3248{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
3249{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
3250{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
3251{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
3252{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
3253{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
3254{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
3255{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
3256{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
3257{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
3258{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
3259{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
3260{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
3261{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
3262{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
3263{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
3264{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
3265{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
3266{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
3267{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
3268{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
3269{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
3270{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
3271{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
3272{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
3273{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
3274{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
3275{0x8aca, 0x0039}, { 0x0, 0x0 }
3276};
3277#else
3278cas_saturn_patch_t cas_saturn_patch[] = {
3279{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009},
3280{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000},
3281{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000},
3282{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff},
3283{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025},
3284{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f},
3285{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026},
3286{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011},
3287{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d},
3288{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086},
3289{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f},
3290{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3},
3291{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047},
3292{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a},
3293{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047},
3294{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033},
3295{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f},
3296{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084},
3297{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004},
3298{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096},
3299{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c},
3300{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027},
3301{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084},
3302{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047},
3303{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a},
3304{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047},
3305{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054},
3306{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f},
3307{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084},
3308{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004},
3309{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6},
3310{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084},
3311{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003},
3312{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025},
3313{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6},
3314{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f},
3315{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7},
3316{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f},
3317{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec},
3318{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa},
3319{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7},
3320{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082},
3321{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001},
3322{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046},
3323{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081},
3324{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a},
3325{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020},
3326{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027},
3327{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084},
3328{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7},
3329{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084},
3330{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047},
3331{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a},
3332{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047},
3333{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad},
3334{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082},
3335{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001},
3336{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084},
3337{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041},
3338{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026},
3339{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023},
3340{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027},
3341{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed},
3342{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083},
3343{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042},
3344{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e},
3345{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084},
3346{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003},
3347{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df},
3348{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6},
3349{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f},
3350{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7},
3351{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f},
3352{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec},
3353{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa},
3354{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011},
3355{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd},
3356{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce},
3357{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff},
3358{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096},
3359{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c},
3360{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027},
3361{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049},
3362{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091},
3363{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6},
3364{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085},
3365{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c},
3366{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1},
3367{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f},
3368{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025},
3369{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016},
3370{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052},
3371{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e},
3372{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7},
3373{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6},
3374{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4},
3375{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083},
3376{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001},
3377{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046},
3378{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081},
3379{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a},
3380{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd},
3381{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025},
3382{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084},
3383{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084},
3384{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018},
3385{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019},
3386{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004},
3387{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e},
3388{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b},
3389{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007},
3390{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027},
3391{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081},
3392{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b},
3393{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007},
3394{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027},
3395{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e},
3396{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd},
3397{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086},
3398{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049},
3399{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012},
3400{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071},
3401{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f},
3402{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084},
3403{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008},
3404{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6},
3405{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4},
3406{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006},
3407{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025},
3408{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016},
3409{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e},
3410{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd},
3411{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026},
3412{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082},
3413{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001},
3414{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084},
3415{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f},
3416{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec},
3417{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa},
3418{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7},
3419{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f},
3420{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd},
3421{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce},
3422{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff},
3423{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096},
3424{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c},
3425{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026},
3426{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012},
3427{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f},
3428{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027},
3429{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023},
3430{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027},
3431{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084},
3432{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051},
3433{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091},
3434{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e},
3435{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce},
3436{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff},
3437{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e},
3438{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd},
3439{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c},
3440{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce},
3441{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff},
3442{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e},
3443{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096},
3444{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c},
3445{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026},
3446{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024},
3447{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026},
3448{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018},
3449{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019},
3450{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001},
3451{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009},
3452{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020},
3453{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081},
3454{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015},
3455{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044},
3456{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1},
3457{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f},
3458{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044},
3459{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029},
3460{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025},
3461{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f},
3462{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047},
3463{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a},
3464{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047},
3465{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034},
3466{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011},
3467{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084},
3468{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002},
3469{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e},
3470{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096},
3471{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc},
3472{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097},
3473{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1},
3474{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086},
3475{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012},
3476{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7},
3477{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010},
3478{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd},
3479{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031},
3480{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e},
3481{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6},
3482{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f},
3483{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7},
3484{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f},
3485{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec},
3486{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa},
3487{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008},
3488{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5},
3489{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002},
3490{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6},
3491{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4},
3492{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084},
3493{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001},
3494{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046},
3495{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081},
3496{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003},
3497{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f},
3498{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd},
3499{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025},
3500{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085},
3501{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044},
3502{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026},
3503{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012},
3504{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001},
3505{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010},
3506{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd},
3507{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce},
3508{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff},
3509{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e},
3510{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096},
3511{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003},
3512{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026},
3513{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012},
3514{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003},
3515{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027},
3516{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085},
3517{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044},
3518{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026},
3519{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012},
3520{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001},
3521{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010},
3522{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce},
3523{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff},
3524{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e},
3525{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6},
3526{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a},
3527{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010},
3528{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085},
3529{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8},
3530{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000},
3531{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084},
3532{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001},
3533{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085},
3534{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046},
3535{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081},
3536{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009},
3537{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030},
3538{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081},
3539{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f},
3540{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044},
3541{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b},
3542{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029},
3543{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026},
3544{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011},
3545{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022},
3546{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6},
3547{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba},
3548{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084},
3549{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d},
3550{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085},
3551{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005},
3552{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e},
3553{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca},
3554{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022},
3555{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000},
3556{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018},
3557{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000},
3558{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046},
3559{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085},
3560{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002},
3561{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085},
3562{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001},
3563{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f},
3564{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004},
3565{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004},
3566{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7},
3567{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086},
3568{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012},
3569{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007},
3570{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006},
3571{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d},
3572{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070},
3573{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba},
3574{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7},
3575{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001},
3576{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001},
3577{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6},
3578{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084},
3579{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002},
3580{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004},
3581{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001},
3582{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001},
3583{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4},
3584{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7},
3585{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6},
3586{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084},
3587{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008},
3588{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6},
3589{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081},
3590{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008},
3591{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7},
3592{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e},
3593{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086},
3594{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040},
3595{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e},
3596{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7},
3597{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f},
3598{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082},
3599{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f},
3600{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f},
3601{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f},
3602{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f},
3603{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f},
3604{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f},
3605{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f},
3606{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f},
3607{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f},
3608{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f},
3609{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f},
3610{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f},
3611{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f},
3612{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012},
3613{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010},
3614{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004},
3615{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7},
3616{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7},
3617{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7},
3618{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7},
3619{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086},
3620{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012},
3621{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012},
3622{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7},
3623{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004},
3624{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004},
3625{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001},
3626{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001},
3627{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008},
3628{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081},
3629{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b},
3630{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce},
3631{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd},
3632{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e},
3633{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081},
3634{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b},
3635{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce},
3636{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd},
3637{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e},
3638{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081},
3639{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b},
3640{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce},
3641{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd},
3642{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e},
3643{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081},
3644{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b},
3645{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce},
3646{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd},
3647{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e},
3648{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081},
3649{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b},
3650{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce},
3651{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd},
3652{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e},
3653{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081},
3654{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b},
3655{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce},
3656{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd},
3657{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e},
3658{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081},
3659{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b},
3660{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce},
3661{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd},
3662{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e},
3663{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081},
3664{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008},
3665{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce},
3666{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd},
3667{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6},
3668{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081},
3669{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003},
3670{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047},
3671{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009},
3672{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081},
3673{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006},
3674{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009},
3675{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe},
3676{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006},
3677{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081},
3678{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008},
3679{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7},
3680{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e},
3681{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6},
3682{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026},
3683{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f},
3684{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7},
3685{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e},
3686{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6},
3687{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084},
3688{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f},
3689{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b},
3690{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012},
3691{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012},
3692{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc},
3693{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009},
3694{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe},
3695{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070},
3696{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f},
3697{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c},
3698{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f},
3699{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084},
3700{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f},
3701{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c},
3702{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f},
3703{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1},
3704{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003},
3705{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040},
3706{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f},
3707{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027},
3708{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b},
3709{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081},
3710{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b},
3711{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027},
3712{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087},
3713{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f},
3714{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002},
3715{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a},
3716{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7},
3717{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086},
3718{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f},
3719{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012},
3720{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075},
3721{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7},
3722{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020},
3723{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f},
3724{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002},
3725{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071},
3726{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047},
3727{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097},
3728{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089},
3729{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f},
3730{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089},
3731{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f},
3732{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089},
3733{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f},
3734{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089},
3735{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f},
3736{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089},
3737{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7},
3738{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7},
3739{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6},
3740{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027},
3741{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f},
3742{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f},
3743{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f},
3744{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d},
3745{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078},
3746{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c},
3747{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6},
3748{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027},
3749{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f},
3750{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f},
3751{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f},
3752{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b},
3753{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d},
3754{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075},
3755{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c},
3756{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a},
3757{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027},
3758{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f},
3759{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f},
3760{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c},
3761{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083},
3762{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075},
3763{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078},
3764{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b},
3765{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc},
3766{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d},
3767{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000},
3768{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070},
3769{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072},
3770{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e},
3771{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6},
3772{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026},
3773{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce},
3774{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd},
3775{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e},
3776{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6},
3777{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026},
3778{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce},
3779{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd},
3780{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e},
3781{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6},
3782{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026},
3783{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce},
3784{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd},
3785{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e},
3786{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086},
3787{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040},
3788{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x006e},
3789{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075},
3790{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e},
3791{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012},
3792{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8},
3793{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012},
3794{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f},
3795{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007},
3796{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048},
3797{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6},
3798{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4},
3799{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7},
3800{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6},
3801{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081},
3802{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf},
3803{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005},
3804{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b},
3805{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005},
3806{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6},
3807{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd},
3808{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086},
3809{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f},
3810{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089},
3811{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002},
3812{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077},
3813{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094},
3814{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6},
3815{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd},
3816{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce},
3817{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6},
3818{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001},
3819{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081},
3820{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003},
3821{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066},
3822{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8},
3823{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084},
3824{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b},
3825{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079},
3826{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008},
3827{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e},
3828{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6},
3829{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a},
3830{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012},
3831{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012},
3832{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb},
3833{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7},
3834{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6},
3835{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036},
3836{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c},
3837{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7},
3838{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086},
3839{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012},
3840{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012},
3841{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001},
3842{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001},
3843{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe},
3844{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004},
3845{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004},
3846{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba},
3847{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7},
3848{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086},
3849{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012},
3850{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012},
3851{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7},
3852{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6},
3853{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084},
3854{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008},
3855{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c},
3856{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026},
3857{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076},
3858{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e},
3859{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e},
3860{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6},
3861{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081},
3862{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c},
3863{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7},
3864{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d},
3865{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb},
3866{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004},
3867{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7},
3868{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce},
3869{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6},
3870{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081},
3871{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005},
3872{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6},
3873{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6},
3874{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084},
3875{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012},
3876{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083},
3877{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c},
3878{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000},
3879{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006},
3880{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b},
3881{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083},
3882{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041},
3883{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e},
3884{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7},
3885{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086},
3886{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f},
3887{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012},
3888{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f},
3889{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c},
3890{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7},
3891{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086},
3892{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a},
3893{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012},
3894{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009},
3895{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c},
3896{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d},
3897{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c},
3898{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e},
3899{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027},
3900{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020},
3901{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e},
3902{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c},
3903{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba},
3904{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7},
3905{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6},
3906{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048},
3907{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d},
3908{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021},
3909{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004},
3910{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7},
3911{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd},
3912{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f},
3913{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000},
3914{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000},
3915{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008},
3916{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5},
3917{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c},
3918{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba},
3919{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7},
3920{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6},
3921{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084},
3922{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001},
3923{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006},
3924{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7},
3925{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036},
3926{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7},
3927{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032},
3928{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026},
3929{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f},
3930{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089},
3931{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001},
3932{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1},
3933{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c},
3934{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027},
3935{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f},
3936{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005},
3937{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080},
3938{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080},
3939{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7},
3940{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6},
3941{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005},
3942{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f},
3943{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f},
3944{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4},
3945{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b},
3946{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007},
3947{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f},
3948{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000},
3949{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000},
3950{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000},
3951{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6},
3952{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6},
3953{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7},
3954{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001},
3955{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018},
3956{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018},
3957{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7},
3958{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6},
3959{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007},
3960{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4},
3961{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054},
3962{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7},
3963{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a},
3964{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039},
3965{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084},
3966{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022},
3967{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7},
3968{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6},
3969{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7},
3970{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6},
3971{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4},
3972{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f},
3973{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072},
3974{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072},
3975{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071},
3976{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027},
3977{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001},
3978{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081},
3979{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024},
3980{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070},
3981{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096},
3982{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080},
3983{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064},
3984{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070},
3985{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096},
3986{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010},
3987{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064},
3988{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070},
3989{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096},
3990{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020},
3991{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064},
3992{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070},
3993{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096},
3994{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040},
3995{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074},
3996{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074},
3997{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078},
3998{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6},
3999{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085},
4000{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af},
4001{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4},
4002{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6},
4003{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081},
4004{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036},
4005{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026},
4006{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022},
4007{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044},
4008{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022},
4009{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020},
4010{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081},
4011{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d},
4012{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084},
4013{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044},
4014{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022},
4015{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020},
4016{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081},
4017{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f},
4018{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084},
4019{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044},
4020{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6},
4021{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f},
4022{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022},
4023{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c},
4024{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006},
4025{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed},
4026{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007},
4027{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9},
4028{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006},
4029{0x8aca, 0x0039}, { 0x0, 0x0 }
4030};
4031#endif
4032
4033
4034/* phy types */ 2517/* phy types */
4035#define CAS_PHY_UNKNOWN 0x00 2518#define CAS_PHY_UNKNOWN 0x00
4036#define CAS_PHY_SERDES 0x01 2519#define CAS_PHY_SERDES 0x01
@@ -4389,6 +2872,11 @@ struct cas {
4389 dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS]; 2872 dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
4390 struct pci_dev *pdev; 2873 struct pci_dev *pdev;
4391 struct net_device *dev; 2874 struct net_device *dev;
2875
2876 /* Firmware Info */
2877 u16 fw_load_addr;
2878 u32 fw_size;
2879 u8 *fw_data;
4392}; 2880};
4393 2881
4394#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1)) 2882#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1))
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index ea6144a9565e..b0b66766ed27 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1397,9 +1397,7 @@ net_open(struct net_device *dev)
1397release_dma: 1397release_dma:
1398#if ALLOW_DMA 1398#if ALLOW_DMA
1399 free_dma(dev->dma); 1399 free_dma(dev->dma);
1400#endif
1401release_irq: 1400release_irq:
1402#if ALLOW_DMA
1403 release_dma_buff(lp); 1401 release_dma_buff(lp);
1404#endif 1402#endif
1405 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON)); 1403 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 271140433b09..4f5cc6987ec1 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -124,8 +124,7 @@ struct sge_rspq { /* state for an SGE response queue */
124 dma_addr_t phys_addr; /* physical address of the ring */ 124 dma_addr_t phys_addr; /* physical address of the ring */
125 unsigned int cntxt_id; /* SGE context id for the response q */ 125 unsigned int cntxt_id; /* SGE context id for the response q */
126 spinlock_t lock; /* guards response processing */ 126 spinlock_t lock; /* guards response processing */
127 struct sk_buff *rx_head; /* offload packet receive queue head */ 127 struct sk_buff_head rx_queue; /* offload packet receive queue */
128 struct sk_buff *rx_tail; /* offload packet receive queue tail */
129 struct sk_buff *pg_skb; /* used to build frag list in napi handler */ 128 struct sk_buff *pg_skb; /* used to build frag list in napi handler */
130 129
131 unsigned long offload_pkts; 130 unsigned long offload_pkts;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index c5b3de1bb456..0f6fd63b2847 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1018,7 +1018,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1018 1018
1019 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 1019 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1020 if (!skb) { 1020 if (!skb) {
1021 printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__); 1021 printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
1022 return; 1022 return;
1023 } 1023 }
1024 skb->priority = CPL_PRIORITY_CONTROL; 1024 skb->priority = CPL_PRIORITY_CONTROL;
@@ -1049,14 +1049,14 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1049 return; 1049 return;
1050 if (!is_offloading(newdev)) { 1050 if (!is_offloading(newdev)) {
1051 printk(KERN_WARNING "%s: Redirect to non-offload " 1051 printk(KERN_WARNING "%s: Redirect to non-offload "
1052 "device ignored.\n", __FUNCTION__); 1052 "device ignored.\n", __func__);
1053 return; 1053 return;
1054 } 1054 }
1055 tdev = dev2t3cdev(olddev); 1055 tdev = dev2t3cdev(olddev);
1056 BUG_ON(!tdev); 1056 BUG_ON(!tdev);
1057 if (tdev != dev2t3cdev(newdev)) { 1057 if (tdev != dev2t3cdev(newdev)) {
1058 printk(KERN_WARNING "%s: Redirect to different " 1058 printk(KERN_WARNING "%s: Redirect to different "
1059 "offload device ignored.\n", __FUNCTION__); 1059 "offload device ignored.\n", __func__);
1060 return; 1060 return;
1061 } 1061 }
1062 1062
@@ -1064,7 +1064,7 @@ void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1064 e = t3_l2t_get(tdev, new->neighbour, newdev); 1064 e = t3_l2t_get(tdev, new->neighbour, newdev);
1065 if (!e) { 1065 if (!e) {
1066 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1066 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
1067 __FUNCTION__); 1067 __func__);
1068 return; 1068 return;
1069 } 1069 }
1070 1070
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 825e510bd9ed..b2c5314582aa 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -86,6 +86,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
86 struct l2t_entry *e) 86 struct l2t_entry *e)
87{ 87{
88 struct cpl_l2t_write_req *req; 88 struct cpl_l2t_write_req *req;
89 struct sk_buff *tmp;
89 90
90 if (!skb) { 91 if (!skb) {
91 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 92 skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
@@ -103,13 +104,11 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
103 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 104 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
104 skb->priority = CPL_PRIORITY_CONTROL; 105 skb->priority = CPL_PRIORITY_CONTROL;
105 cxgb3_ofld_send(dev, skb); 106 cxgb3_ofld_send(dev, skb);
106 while (e->arpq_head) { 107
107 skb = e->arpq_head; 108 skb_queue_walk_safe(&e->arpq, skb, tmp) {
108 e->arpq_head = skb->next; 109 __skb_unlink(skb, &e->arpq);
109 skb->next = NULL;
110 cxgb3_ofld_send(dev, skb); 110 cxgb3_ofld_send(dev, skb);
111 } 111 }
112 e->arpq_tail = NULL;
113 e->state = L2T_STATE_VALID; 112 e->state = L2T_STATE_VALID;
114 113
115 return 0; 114 return 0;
@@ -121,12 +120,7 @@ static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
121 */ 120 */
122static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb) 121static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
123{ 122{
124 skb->next = NULL; 123 __skb_queue_tail(&e->arpq, skb);
125 if (e->arpq_head)
126 e->arpq_tail->next = skb;
127 else
128 e->arpq_head = skb;
129 e->arpq_tail = skb;
130} 124}
131 125
132int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, 126int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
@@ -167,7 +161,7 @@ again:
167 break; 161 break;
168 162
169 spin_lock_bh(&e->lock); 163 spin_lock_bh(&e->lock);
170 if (e->arpq_head) 164 if (!skb_queue_empty(&e->arpq))
171 setup_l2e_send_pending(dev, skb, e); 165 setup_l2e_send_pending(dev, skb, e);
172 else /* we lost the race */ 166 else /* we lost the race */
173 __kfree_skb(skb); 167 __kfree_skb(skb);
@@ -357,14 +351,14 @@ EXPORT_SYMBOL(t3_l2t_get);
357 * XXX: maybe we should abandon the latter behavior and just require a failure 351 * XXX: maybe we should abandon the latter behavior and just require a failure
358 * handler. 352 * handler.
359 */ 353 */
360static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq) 354static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
361{ 355{
362 while (arpq) { 356 struct sk_buff *skb, *tmp;
363 struct sk_buff *skb = arpq; 357
358 skb_queue_walk_safe(arpq, skb, tmp) {
364 struct l2t_skb_cb *cb = L2T_SKB_CB(skb); 359 struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
365 360
366 arpq = skb->next; 361 __skb_unlink(skb, arpq);
367 skb->next = NULL;
368 if (cb->arp_failure_handler) 362 if (cb->arp_failure_handler)
369 cb->arp_failure_handler(dev, skb); 363 cb->arp_failure_handler(dev, skb);
370 else 364 else
@@ -378,8 +372,8 @@ static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
378 */ 372 */
379void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh) 373void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
380{ 374{
375 struct sk_buff_head arpq;
381 struct l2t_entry *e; 376 struct l2t_entry *e;
382 struct sk_buff *arpq = NULL;
383 struct l2t_data *d = L2DATA(dev); 377 struct l2t_data *d = L2DATA(dev);
384 u32 addr = *(u32 *) neigh->primary_key; 378 u32 addr = *(u32 *) neigh->primary_key;
385 int ifidx = neigh->dev->ifindex; 379 int ifidx = neigh->dev->ifindex;
@@ -395,6 +389,8 @@ void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
395 return; 389 return;
396 390
397found: 391found:
392 __skb_queue_head_init(&arpq);
393
398 read_unlock(&d->lock); 394 read_unlock(&d->lock);
399 if (atomic_read(&e->refcnt)) { 395 if (atomic_read(&e->refcnt)) {
400 if (neigh != e->neigh) 396 if (neigh != e->neigh)
@@ -402,8 +398,7 @@ found:
402 398
403 if (e->state == L2T_STATE_RESOLVING) { 399 if (e->state == L2T_STATE_RESOLVING) {
404 if (neigh->nud_state & NUD_FAILED) { 400 if (neigh->nud_state & NUD_FAILED) {
405 arpq = e->arpq_head; 401 skb_queue_splice_init(&e->arpq, &arpq);
406 e->arpq_head = e->arpq_tail = NULL;
407 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) 402 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
408 setup_l2e_send_pending(dev, NULL, e); 403 setup_l2e_send_pending(dev, NULL, e);
409 } else { 404 } else {
@@ -415,8 +410,8 @@ found:
415 } 410 }
416 spin_unlock_bh(&e->lock); 411 spin_unlock_bh(&e->lock);
417 412
418 if (arpq) 413 if (!skb_queue_empty(&arpq))
419 handle_failed_resolution(dev, arpq); 414 handle_failed_resolution(dev, &arpq);
420} 415}
421 416
422struct l2t_data *t3_init_l2t(unsigned int l2t_capacity) 417struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
index d79001336cfd..42ce65f76a87 100644
--- a/drivers/net/cxgb3/l2t.h
+++ b/drivers/net/cxgb3/l2t.h
@@ -64,8 +64,7 @@ struct l2t_entry {
64 struct neighbour *neigh; /* associated neighbour */ 64 struct neighbour *neigh; /* associated neighbour */
65 struct l2t_entry *first; /* start of hash chain */ 65 struct l2t_entry *first; /* start of hash chain */
66 struct l2t_entry *next; /* next l2t_entry on chain */ 66 struct l2t_entry *next; /* next l2t_entry on chain */
67 struct sk_buff *arpq_head; /* queue of packets awaiting resolution */ 67 struct sk_buff_head arpq; /* queue of packets awaiting resolution */
68 struct sk_buff *arpq_tail;
69 spinlock_t lock; 68 spinlock_t lock;
70 atomic_t refcnt; /* entry reference count */ 69 atomic_t refcnt; /* entry reference count */
71 u8 dmac[6]; /* neighbour's MAC address */ 70 u8 dmac[6]; /* neighbour's MAC address */
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 1b0861d73ab7..89efd04be4e0 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1704,16 +1704,15 @@ int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1704 */ 1704 */
1705static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) 1705static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1706{ 1706{
1707 skb->next = skb->prev = NULL; 1707 int was_empty = skb_queue_empty(&q->rx_queue);
1708 if (q->rx_tail) 1708
1709 q->rx_tail->next = skb; 1709 __skb_queue_tail(&q->rx_queue, skb);
1710 else { 1710
1711 if (was_empty) {
1711 struct sge_qset *qs = rspq_to_qset(q); 1712 struct sge_qset *qs = rspq_to_qset(q);
1712 1713
1713 napi_schedule(&qs->napi); 1714 napi_schedule(&qs->napi);
1714 q->rx_head = skb;
1715 } 1715 }
1716 q->rx_tail = skb;
1717} 1716}
1718 1717
1719/** 1718/**
@@ -1754,26 +1753,29 @@ static int ofld_poll(struct napi_struct *napi, int budget)
1754 int work_done = 0; 1753 int work_done = 0;
1755 1754
1756 while (work_done < budget) { 1755 while (work_done < budget) {
1757 struct sk_buff *head, *tail, *skbs[RX_BUNDLE_SIZE]; 1756 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1757 struct sk_buff_head queue;
1758 int ngathered; 1758 int ngathered;
1759 1759
1760 spin_lock_irq(&q->lock); 1760 spin_lock_irq(&q->lock);
1761 head = q->rx_head; 1761 __skb_queue_head_init(&queue);
1762 if (!head) { 1762 skb_queue_splice_init(&q->rx_queue, &queue);
1763 if (skb_queue_empty(&queue)) {
1763 napi_complete(napi); 1764 napi_complete(napi);
1764 spin_unlock_irq(&q->lock); 1765 spin_unlock_irq(&q->lock);
1765 return work_done; 1766 return work_done;
1766 } 1767 }
1767
1768 tail = q->rx_tail;
1769 q->rx_head = q->rx_tail = NULL;
1770 spin_unlock_irq(&q->lock); 1768 spin_unlock_irq(&q->lock);
1771 1769
1772 for (ngathered = 0; work_done < budget && head; work_done++) { 1770 ngathered = 0;
1773 prefetch(head->data); 1771 skb_queue_walk_safe(&queue, skb, tmp) {
1774 skbs[ngathered] = head; 1772 if (work_done >= budget)
1775 head = head->next; 1773 break;
1776 skbs[ngathered]->next = NULL; 1774 work_done++;
1775
1776 __skb_unlink(skb, &queue);
1777 prefetch(skb->data);
1778 skbs[ngathered] = skb;
1777 if (++ngathered == RX_BUNDLE_SIZE) { 1779 if (++ngathered == RX_BUNDLE_SIZE) {
1778 q->offload_bundles++; 1780 q->offload_bundles++;
1779 adapter->tdev.recv(&adapter->tdev, skbs, 1781 adapter->tdev.recv(&adapter->tdev, skbs,
@@ -1781,12 +1783,10 @@ static int ofld_poll(struct napi_struct *napi, int budget)
1781 ngathered = 0; 1783 ngathered = 0;
1782 } 1784 }
1783 } 1785 }
1784 if (head) { /* splice remaining packets back onto Rx queue */ 1786 if (!skb_queue_empty(&queue)) {
1787 /* splice remaining packets back onto Rx queue */
1785 spin_lock_irq(&q->lock); 1788 spin_lock_irq(&q->lock);
1786 tail->next = q->rx_head; 1789 skb_queue_splice(&queue, &q->rx_queue);
1787 if (!q->rx_head)
1788 q->rx_tail = tail;
1789 q->rx_head = head;
1790 spin_unlock_irq(&q->lock); 1790 spin_unlock_irq(&q->lock);
1791 } 1791 }
1792 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); 1792 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
@@ -1937,38 +1937,6 @@ static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); 1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
1938} 1938}
1939 1939
1940#define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\
1941 TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\
1942 TCP_FLAG_SYN | TCP_FLAG_FIN)
1943#define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\
1944 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)
1945
1946/**
1947 * lro_segment_ok - check if a TCP segment is eligible for LRO
1948 * @tcph: the TCP header of the packet
1949 *
1950 * Returns true if a TCP packet is eligible for LRO. This requires that
1951 * the packet have only the ACK flag set and no TCP options besides
1952 * time stamps.
1953 */
1954static inline int lro_segment_ok(const struct tcphdr *tcph)
1955{
1956 int optlen;
1957
1958 if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK))
1959 return 0;
1960
1961 optlen = (tcph->doff << 2) - sizeof(*tcph);
1962 if (optlen) {
1963 const u32 *opt = (const u32 *)(tcph + 1);
1964
1965 if (optlen != TCPOLEN_TSTAMP_ALIGNED ||
1966 *opt != htonl(TSTAMP_WORD) || !opt[2])
1967 return 0;
1968 }
1969 return 1;
1970}
1971
1972static int t3_get_lro_header(void **eh, void **iph, void **tcph, 1940static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1973 u64 *hdr_flags, void *priv) 1941 u64 *hdr_flags, void *priv)
1974{ 1942{
@@ -1981,9 +1949,6 @@ static int t3_get_lro_header(void **eh, void **iph, void **tcph,
1981 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); 1949 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
1982 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); 1950 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
1983 1951
1984 if (!lro_segment_ok(*tcph))
1985 return -1;
1986
1987 *hdr_flags = LRO_IPV4 | LRO_TCP; 1952 *hdr_flags = LRO_IPV4 | LRO_TCP;
1988 return 0; 1953 return 0;
1989} 1954}
@@ -2934,6 +2899,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2934 q->rspq.gen = 1; 2899 q->rspq.gen = 1;
2935 q->rspq.size = p->rspq_size; 2900 q->rspq.size = p->rspq_size;
2936 spin_lock_init(&q->rspq.lock); 2901 spin_lock_init(&q->rspq.lock);
2902 skb_queue_head_init(&q->rspq.rx_queue);
2937 2903
2938 q->txq[TXQ_ETH].stop_thres = nports * 2904 q->txq[TXQ_ETH].stop_thres = nports *
2939 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 2905 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 5cf78d612c45..3d69fae781cf 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -191,7 +191,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
191#define DPRINTK(nlevel, klevel, fmt, args...) \ 191#define DPRINTK(nlevel, klevel, fmt, args...) \
192 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ 192 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
193 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ 193 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
194 __FUNCTION__ , ## args)) 194 __func__ , ## args))
195 195
196#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 196#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
197 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ 197 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 19e317eaf5bc..62f62970f978 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -155,8 +155,6 @@ do { \
155#endif 155#endif
156 156
157#define E1000_MNG_VLAN_NONE (-1) 157#define E1000_MNG_VLAN_NONE (-1)
158/* Number of packet split data buffers (not including the header buffer) */
159#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
160 158
161/* wrapper around a pointer to a socket buffer, 159/* wrapper around a pointer to a socket buffer,
162 * so a DMA handle can be stored along with the buffer */ 160 * so a DMA handle can be stored along with the buffer */
@@ -168,14 +166,6 @@ struct e1000_buffer {
168 u16 next_to_watch; 166 u16 next_to_watch;
169}; 167};
170 168
171struct e1000_ps_page {
172 struct page *ps_page[PS_PAGE_BUFFERS];
173};
174
175struct e1000_ps_page_dma {
176 u64 ps_page_dma[PS_PAGE_BUFFERS];
177};
178
179struct e1000_tx_ring { 169struct e1000_tx_ring {
180 /* pointer to the descriptor ring memory */ 170 /* pointer to the descriptor ring memory */
181 void *desc; 171 void *desc;
@@ -213,9 +203,6 @@ struct e1000_rx_ring {
213 unsigned int next_to_clean; 203 unsigned int next_to_clean;
214 /* array of buffer information structs */ 204 /* array of buffer information structs */
215 struct e1000_buffer *buffer_info; 205 struct e1000_buffer *buffer_info;
216 /* arrays of page information for packet split */
217 struct e1000_ps_page *ps_page;
218 struct e1000_ps_page_dma *ps_page_dma;
219 206
220 /* cpu for rx queue */ 207 /* cpu for rx queue */
221 int cpu; 208 int cpu;
@@ -228,8 +215,6 @@ struct e1000_rx_ring {
228 ((((R)->next_to_clean > (R)->next_to_use) \ 215 ((((R)->next_to_clean > (R)->next_to_use) \
229 ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) 216 ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
230 217
231#define E1000_RX_DESC_PS(R, i) \
232 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
233#define E1000_RX_DESC_EXT(R, i) \ 218#define E1000_RX_DESC_EXT(R, i) \
234 (&(((union e1000_rx_desc_extended *)((R).desc))[i])) 219 (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
235#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 220#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
@@ -311,10 +296,8 @@ struct e1000_adapter {
311 u32 rx_int_delay; 296 u32 rx_int_delay;
312 u32 rx_abs_int_delay; 297 u32 rx_abs_int_delay;
313 bool rx_csum; 298 bool rx_csum;
314 unsigned int rx_ps_pages;
315 u32 gorcl; 299 u32 gorcl;
316 u64 gorcl_old; 300 u64 gorcl_old;
317 u16 rx_ps_bsize0;
318 301
319 /* OS defined structs */ 302 /* OS defined structs */
320 struct net_device *netdev; 303 struct net_device *netdev;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ad6da7b67e55..2ab44db29fac 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -137,15 +137,9 @@ static int e1000_clean(struct napi_struct *napi, int budget);
137static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, 137static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
138 struct e1000_rx_ring *rx_ring, 138 struct e1000_rx_ring *rx_ring,
139 int *work_done, int work_to_do); 139 int *work_done, int work_to_do);
140static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
141 struct e1000_rx_ring *rx_ring,
142 int *work_done, int work_to_do);
143static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 140static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
144 struct e1000_rx_ring *rx_ring, 141 struct e1000_rx_ring *rx_ring,
145 int cleaned_count); 142 int cleaned_count);
146static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
147 struct e1000_rx_ring *rx_ring,
148 int cleaned_count);
149static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 143static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
150static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 144static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
151 int cmd); 145 int cmd);
@@ -1331,7 +1325,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1331 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); 1325 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1332 1326
1333 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1327 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1334 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1335 hw->max_frame_size = netdev->mtu + 1328 hw->max_frame_size = netdev->mtu +
1336 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 1329 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1337 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; 1330 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -1815,26 +1808,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1815 } 1808 }
1816 memset(rxdr->buffer_info, 0, size); 1809 memset(rxdr->buffer_info, 0, size);
1817 1810
1818 rxdr->ps_page = kcalloc(rxdr->count, sizeof(struct e1000_ps_page),
1819 GFP_KERNEL);
1820 if (!rxdr->ps_page) {
1821 vfree(rxdr->buffer_info);
1822 DPRINTK(PROBE, ERR,
1823 "Unable to allocate memory for the receive descriptor ring\n");
1824 return -ENOMEM;
1825 }
1826
1827 rxdr->ps_page_dma = kcalloc(rxdr->count,
1828 sizeof(struct e1000_ps_page_dma),
1829 GFP_KERNEL);
1830 if (!rxdr->ps_page_dma) {
1831 vfree(rxdr->buffer_info);
1832 kfree(rxdr->ps_page);
1833 DPRINTK(PROBE, ERR,
1834 "Unable to allocate memory for the receive descriptor ring\n");
1835 return -ENOMEM;
1836 }
1837
1838 if (hw->mac_type <= e1000_82547_rev_2) 1811 if (hw->mac_type <= e1000_82547_rev_2)
1839 desc_len = sizeof(struct e1000_rx_desc); 1812 desc_len = sizeof(struct e1000_rx_desc);
1840 else 1813 else
@@ -1852,8 +1825,6 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1852 "Unable to allocate memory for the receive descriptor ring\n"); 1825 "Unable to allocate memory for the receive descriptor ring\n");
1853setup_rx_desc_die: 1826setup_rx_desc_die:
1854 vfree(rxdr->buffer_info); 1827 vfree(rxdr->buffer_info);
1855 kfree(rxdr->ps_page);
1856 kfree(rxdr->ps_page_dma);
1857 return -ENOMEM; 1828 return -ENOMEM;
1858 } 1829 }
1859 1830
@@ -1932,11 +1903,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1932static void e1000_setup_rctl(struct e1000_adapter *adapter) 1903static void e1000_setup_rctl(struct e1000_adapter *adapter)
1933{ 1904{
1934 struct e1000_hw *hw = &adapter->hw; 1905 struct e1000_hw *hw = &adapter->hw;
1935 u32 rctl, rfctl; 1906 u32 rctl;
1936 u32 psrctl = 0;
1937#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1938 u32 pages = 0;
1939#endif
1940 1907
1941 rctl = er32(RCTL); 1908 rctl = er32(RCTL);
1942 1909
@@ -1988,55 +1955,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1988 break; 1955 break;
1989 } 1956 }
1990 1957
1991#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1992 /* 82571 and greater support packet-split where the protocol
1993 * header is placed in skb->data and the packet data is
1994 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1995 * In the case of a non-split, skb->data is linearly filled,
1996 * followed by the page buffers. Therefore, skb->data is
1997 * sized to hold the largest protocol header.
1998 */
1999 /* allocations using alloc_page take too long for regular MTU
2000 * so only enable packet split for jumbo frames */
2001 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2002 if ((hw->mac_type >= e1000_82571) && (pages <= 3) &&
2003 PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
2004 adapter->rx_ps_pages = pages;
2005 else
2006 adapter->rx_ps_pages = 0;
2007#endif
2008 if (adapter->rx_ps_pages) {
2009 /* Configure extra packet-split registers */
2010 rfctl = er32(RFCTL);
2011 rfctl |= E1000_RFCTL_EXTEN;
2012 /* disable packet split support for IPv6 extension headers,
2013 * because some malformed IPv6 headers can hang the RX */
2014 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2015 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2016
2017 ew32(RFCTL, rfctl);
2018
2019 rctl |= E1000_RCTL_DTYP_PS;
2020
2021 psrctl |= adapter->rx_ps_bsize0 >>
2022 E1000_PSRCTL_BSIZE0_SHIFT;
2023
2024 switch (adapter->rx_ps_pages) {
2025 case 3:
2026 psrctl |= PAGE_SIZE <<
2027 E1000_PSRCTL_BSIZE3_SHIFT;
2028 case 2:
2029 psrctl |= PAGE_SIZE <<
2030 E1000_PSRCTL_BSIZE2_SHIFT;
2031 case 1:
2032 psrctl |= PAGE_SIZE >>
2033 E1000_PSRCTL_BSIZE1_SHIFT;
2034 break;
2035 }
2036
2037 ew32(PSRCTL, psrctl);
2038 }
2039
2040 ew32(RCTL, rctl); 1958 ew32(RCTL, rctl);
2041} 1959}
2042 1960
@@ -2053,18 +1971,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2053 struct e1000_hw *hw = &adapter->hw; 1971 struct e1000_hw *hw = &adapter->hw;
2054 u32 rdlen, rctl, rxcsum, ctrl_ext; 1972 u32 rdlen, rctl, rxcsum, ctrl_ext;
2055 1973
2056 if (adapter->rx_ps_pages) { 1974 rdlen = adapter->rx_ring[0].count *
2057 /* this is a 32 byte descriptor */ 1975 sizeof(struct e1000_rx_desc);
2058 rdlen = adapter->rx_ring[0].count * 1976 adapter->clean_rx = e1000_clean_rx_irq;
2059 sizeof(union e1000_rx_desc_packet_split); 1977 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2060 adapter->clean_rx = e1000_clean_rx_irq_ps;
2061 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
2062 } else {
2063 rdlen = adapter->rx_ring[0].count *
2064 sizeof(struct e1000_rx_desc);
2065 adapter->clean_rx = e1000_clean_rx_irq;
2066 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
2067 }
2068 1978
2069 /* disable receives while setting up the descriptors */ 1979 /* disable receives while setting up the descriptors */
2070 rctl = er32(RCTL); 1980 rctl = er32(RCTL);
@@ -2109,28 +2019,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2109 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 2019 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2110 if (hw->mac_type >= e1000_82543) { 2020 if (hw->mac_type >= e1000_82543) {
2111 rxcsum = er32(RXCSUM); 2021 rxcsum = er32(RXCSUM);
2112 if (adapter->rx_csum) { 2022 if (adapter->rx_csum)
2113 rxcsum |= E1000_RXCSUM_TUOFL; 2023 rxcsum |= E1000_RXCSUM_TUOFL;
2114 2024 else
2115 /* Enable 82571 IPv4 payload checksum for UDP fragments
2116 * Must be used in conjunction with packet-split. */
2117 if ((hw->mac_type >= e1000_82571) &&
2118 (adapter->rx_ps_pages)) {
2119 rxcsum |= E1000_RXCSUM_IPPCSE;
2120 }
2121 } else {
2122 rxcsum &= ~E1000_RXCSUM_TUOFL;
2123 /* don't need to clear IPPCSE as it defaults to 0 */ 2025 /* don't need to clear IPPCSE as it defaults to 0 */
2124 } 2026 rxcsum &= ~E1000_RXCSUM_TUOFL;
2125 ew32(RXCSUM, rxcsum); 2027 ew32(RXCSUM, rxcsum);
2126 } 2028 }
2127 2029
2128 /* enable early receives on 82573, only takes effect if using > 2048
2129 * byte total frame size. for example only for jumbo frames */
2130#define E1000_ERT_2048 0x100
2131 if (hw->mac_type == e1000_82573)
2132 ew32(ERT, E1000_ERT_2048);
2133
2134 /* Enable Receives */ 2030 /* Enable Receives */
2135 ew32(RCTL, rctl); 2031 ew32(RCTL, rctl);
2136} 2032}
@@ -2256,10 +2152,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2256 2152
2257 vfree(rx_ring->buffer_info); 2153 vfree(rx_ring->buffer_info);
2258 rx_ring->buffer_info = NULL; 2154 rx_ring->buffer_info = NULL;
2259 kfree(rx_ring->ps_page);
2260 rx_ring->ps_page = NULL;
2261 kfree(rx_ring->ps_page_dma);
2262 rx_ring->ps_page_dma = NULL;
2263 2155
2264 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2156 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2265 2157
@@ -2292,11 +2184,9 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2292{ 2184{
2293 struct e1000_hw *hw = &adapter->hw; 2185 struct e1000_hw *hw = &adapter->hw;
2294 struct e1000_buffer *buffer_info; 2186 struct e1000_buffer *buffer_info;
2295 struct e1000_ps_page *ps_page;
2296 struct e1000_ps_page_dma *ps_page_dma;
2297 struct pci_dev *pdev = adapter->pdev; 2187 struct pci_dev *pdev = adapter->pdev;
2298 unsigned long size; 2188 unsigned long size;
2299 unsigned int i, j; 2189 unsigned int i;
2300 2190
2301 /* Free all the Rx ring sk_buffs */ 2191 /* Free all the Rx ring sk_buffs */
2302 for (i = 0; i < rx_ring->count; i++) { 2192 for (i = 0; i < rx_ring->count; i++) {
@@ -2310,25 +2200,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2310 dev_kfree_skb(buffer_info->skb); 2200 dev_kfree_skb(buffer_info->skb);
2311 buffer_info->skb = NULL; 2201 buffer_info->skb = NULL;
2312 } 2202 }
2313 ps_page = &rx_ring->ps_page[i];
2314 ps_page_dma = &rx_ring->ps_page_dma[i];
2315 for (j = 0; j < adapter->rx_ps_pages; j++) {
2316 if (!ps_page->ps_page[j]) break;
2317 pci_unmap_page(pdev,
2318 ps_page_dma->ps_page_dma[j],
2319 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2320 ps_page_dma->ps_page_dma[j] = 0;
2321 put_page(ps_page->ps_page[j]);
2322 ps_page->ps_page[j] = NULL;
2323 }
2324 } 2203 }
2325 2204
2326 size = sizeof(struct e1000_buffer) * rx_ring->count; 2205 size = sizeof(struct e1000_buffer) * rx_ring->count;
2327 memset(rx_ring->buffer_info, 0, size); 2206 memset(rx_ring->buffer_info, 0, size);
2328 size = sizeof(struct e1000_ps_page) * rx_ring->count;
2329 memset(rx_ring->ps_page, 0, size);
2330 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2331 memset(rx_ring->ps_page_dma, 0, size);
2332 2207
2333 /* Zero out the descriptor ring */ 2208 /* Zero out the descriptor ring */
2334 2209
@@ -4235,181 +4110,6 @@ next_desc:
4235} 4110}
4236 4111
4237/** 4112/**
4238 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
4239 * @adapter: board private structure
4240 **/
4241
4242static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4243 struct e1000_rx_ring *rx_ring,
4244 int *work_done, int work_to_do)
4245{
4246 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
4247 struct net_device *netdev = adapter->netdev;
4248 struct pci_dev *pdev = adapter->pdev;
4249 struct e1000_buffer *buffer_info, *next_buffer;
4250 struct e1000_ps_page *ps_page;
4251 struct e1000_ps_page_dma *ps_page_dma;
4252 struct sk_buff *skb;
4253 unsigned int i, j;
4254 u32 length, staterr;
4255 int cleaned_count = 0;
4256 bool cleaned = false;
4257 unsigned int total_rx_bytes=0, total_rx_packets=0;
4258
4259 i = rx_ring->next_to_clean;
4260 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4261 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4262 buffer_info = &rx_ring->buffer_info[i];
4263
4264 while (staterr & E1000_RXD_STAT_DD) {
4265 ps_page = &rx_ring->ps_page[i];
4266 ps_page_dma = &rx_ring->ps_page_dma[i];
4267
4268 if (unlikely(*work_done >= work_to_do))
4269 break;
4270 (*work_done)++;
4271
4272 skb = buffer_info->skb;
4273
4274 /* in the packet split case this is header only */
4275 prefetch(skb->data - NET_IP_ALIGN);
4276
4277 if (++i == rx_ring->count) i = 0;
4278 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
4279 prefetch(next_rxd);
4280
4281 next_buffer = &rx_ring->buffer_info[i];
4282
4283 cleaned = true;
4284 cleaned_count++;
4285 pci_unmap_single(pdev, buffer_info->dma,
4286 buffer_info->length,
4287 PCI_DMA_FROMDEVICE);
4288
4289 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
4290 E1000_DBG("%s: Packet Split buffers didn't pick up"
4291 " the full packet\n", netdev->name);
4292 dev_kfree_skb_irq(skb);
4293 goto next_desc;
4294 }
4295
4296 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
4297 dev_kfree_skb_irq(skb);
4298 goto next_desc;
4299 }
4300
4301 length = le16_to_cpu(rx_desc->wb.middle.length0);
4302
4303 if (unlikely(!length)) {
4304 E1000_DBG("%s: Last part of the packet spanning"
4305 " multiple descriptors\n", netdev->name);
4306 dev_kfree_skb_irq(skb);
4307 goto next_desc;
4308 }
4309
4310 /* Good Receive */
4311 skb_put(skb, length);
4312
4313 {
4314 /* this looks ugly, but it seems compiler issues make it
4315 more efficient than reusing j */
4316 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
4317
4318 /* page alloc/put takes too long and effects small packet
4319 * throughput, so unsplit small packets and save the alloc/put*/
4320 if (l1 && (l1 <= copybreak) && ((length + l1) <= adapter->rx_ps_bsize0)) {
4321 u8 *vaddr;
4322 /* there is no documentation about how to call
4323 * kmap_atomic, so we can't hold the mapping
4324 * very long */
4325 pci_dma_sync_single_for_cpu(pdev,
4326 ps_page_dma->ps_page_dma[0],
4327 PAGE_SIZE,
4328 PCI_DMA_FROMDEVICE);
4329 vaddr = kmap_atomic(ps_page->ps_page[0],
4330 KM_SKB_DATA_SOFTIRQ);
4331 memcpy(skb_tail_pointer(skb), vaddr, l1);
4332 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
4333 pci_dma_sync_single_for_device(pdev,
4334 ps_page_dma->ps_page_dma[0],
4335 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4336 /* remove the CRC */
4337 l1 -= 4;
4338 skb_put(skb, l1);
4339 goto copydone;
4340 } /* if */
4341 }
4342
4343 for (j = 0; j < adapter->rx_ps_pages; j++) {
4344 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
4345 if (!length)
4346 break;
4347 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
4348 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4349 ps_page_dma->ps_page_dma[j] = 0;
4350 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
4351 length);
4352 ps_page->ps_page[j] = NULL;
4353 skb->len += length;
4354 skb->data_len += length;
4355 skb->truesize += length;
4356 }
4357
4358 /* strip the ethernet crc, problem is we're using pages now so
4359 * this whole operation can get a little cpu intensive */
4360 pskb_trim(skb, skb->len - 4);
4361
4362copydone:
4363 total_rx_bytes += skb->len;
4364 total_rx_packets++;
4365
4366 e1000_rx_checksum(adapter, staterr,
4367 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
4368 skb->protocol = eth_type_trans(skb, netdev);
4369
4370 if (likely(rx_desc->wb.upper.header_status &
4371 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
4372 adapter->rx_hdr_split++;
4373
4374 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
4375 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4376 le16_to_cpu(rx_desc->wb.middle.vlan));
4377 } else {
4378 netif_receive_skb(skb);
4379 }
4380
4381 netdev->last_rx = jiffies;
4382
4383next_desc:
4384 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
4385 buffer_info->skb = NULL;
4386
4387 /* return some buffers to hardware, one at a time is too slow */
4388 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4389 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4390 cleaned_count = 0;
4391 }
4392
4393 /* use prefetched values */
4394 rx_desc = next_rxd;
4395 buffer_info = next_buffer;
4396
4397 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4398 }
4399 rx_ring->next_to_clean = i;
4400
4401 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4402 if (cleaned_count)
4403 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4404
4405 adapter->total_rx_packets += total_rx_packets;
4406 adapter->total_rx_bytes += total_rx_bytes;
4407 adapter->net_stats.rx_bytes += total_rx_bytes;
4408 adapter->net_stats.rx_packets += total_rx_packets;
4409 return cleaned;
4410}
4411
4412/**
4413 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended 4113 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4414 * @adapter: address of board private structure 4114 * @adapter: address of board private structure
4415 **/ 4115 **/
@@ -4521,104 +4221,6 @@ map_skb:
4521} 4221}
4522 4222
4523/** 4223/**
4524 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4525 * @adapter: address of board private structure
4526 **/
4527
4528static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4529 struct e1000_rx_ring *rx_ring,
4530 int cleaned_count)
4531{
4532 struct e1000_hw *hw = &adapter->hw;
4533 struct net_device *netdev = adapter->netdev;
4534 struct pci_dev *pdev = adapter->pdev;
4535 union e1000_rx_desc_packet_split *rx_desc;
4536 struct e1000_buffer *buffer_info;
4537 struct e1000_ps_page *ps_page;
4538 struct e1000_ps_page_dma *ps_page_dma;
4539 struct sk_buff *skb;
4540 unsigned int i, j;
4541
4542 i = rx_ring->next_to_use;
4543 buffer_info = &rx_ring->buffer_info[i];
4544 ps_page = &rx_ring->ps_page[i];
4545 ps_page_dma = &rx_ring->ps_page_dma[i];
4546
4547 while (cleaned_count--) {
4548 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4549
4550 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
4551 if (j < adapter->rx_ps_pages) {
4552 if (likely(!ps_page->ps_page[j])) {
4553 ps_page->ps_page[j] =
4554 alloc_page(GFP_ATOMIC);
4555 if (unlikely(!ps_page->ps_page[j])) {
4556 adapter->alloc_rx_buff_failed++;
4557 goto no_buffers;
4558 }
4559 ps_page_dma->ps_page_dma[j] =
4560 pci_map_page(pdev,
4561 ps_page->ps_page[j],
4562 0, PAGE_SIZE,
4563 PCI_DMA_FROMDEVICE);
4564 }
4565 /* Refresh the desc even if buffer_addrs didn't
4566 * change because each write-back erases
4567 * this info.
4568 */
4569 rx_desc->read.buffer_addr[j+1] =
4570 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
4571 } else
4572 rx_desc->read.buffer_addr[j+1] = ~cpu_to_le64(0);
4573 }
4574
4575 skb = netdev_alloc_skb(netdev,
4576 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4577
4578 if (unlikely(!skb)) {
4579 adapter->alloc_rx_buff_failed++;
4580 break;
4581 }
4582
4583 /* Make buffer alignment 2 beyond a 16 byte boundary
4584 * this will result in a 16 byte aligned IP header after
4585 * the 14 byte MAC header is removed
4586 */
4587 skb_reserve(skb, NET_IP_ALIGN);
4588
4589 buffer_info->skb = skb;
4590 buffer_info->length = adapter->rx_ps_bsize0;
4591 buffer_info->dma = pci_map_single(pdev, skb->data,
4592 adapter->rx_ps_bsize0,
4593 PCI_DMA_FROMDEVICE);
4594
4595 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4596
4597 if (unlikely(++i == rx_ring->count)) i = 0;
4598 buffer_info = &rx_ring->buffer_info[i];
4599 ps_page = &rx_ring->ps_page[i];
4600 ps_page_dma = &rx_ring->ps_page_dma[i];
4601 }
4602
4603no_buffers:
4604 if (likely(rx_ring->next_to_use != i)) {
4605 rx_ring->next_to_use = i;
4606 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4607
4608 /* Force memory writes to complete before letting h/w
4609 * know there are new descriptors to fetch. (Only
4610 * applicable for weak-ordered memory model archs,
4611 * such as IA-64). */
4612 wmb();
4613 /* Hardware increments by 16 bytes, but packet split
4614 * descriptors are 32 bytes...so we increment tail
4615 * twice as much.
4616 */
4617 writel(i<<1, hw->hw_addr + rx_ring->rdt);
4618 }
4619}
4620
4621/**
4622 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. 4224 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4623 * @adapter: 4225 * @adapter:
4624 **/ 4226 **/
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 462351ca2c81..b2c910c52df9 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -38,6 +38,7 @@
38 * 82573V Gigabit Ethernet Controller (Copper) 38 * 82573V Gigabit Ethernet Controller (Copper)
39 * 82573E Gigabit Ethernet Controller (Copper) 39 * 82573E Gigabit Ethernet Controller (Copper)
40 * 82573L Gigabit Ethernet Controller 40 * 82573L Gigabit Ethernet Controller
41 * 82574L Gigabit Network Connection
41 */ 42 */
42 43
43#include <linux/netdevice.h> 44#include <linux/netdevice.h>
@@ -54,6 +55,8 @@
54 55
55#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 56#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
56 57
58#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
59
57static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); 60static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
58static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); 61static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
59static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); 62static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
@@ -63,6 +66,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
63static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); 66static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
64static s32 e1000_setup_link_82571(struct e1000_hw *hw); 67static s32 e1000_setup_link_82571(struct e1000_hw *hw);
65static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); 68static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
69static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
70static s32 e1000_led_on_82574(struct e1000_hw *hw);
66 71
67/** 72/**
68 * e1000_init_phy_params_82571 - Init PHY func ptrs. 73 * e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -92,6 +97,9 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
92 case e1000_82573: 97 case e1000_82573:
93 phy->type = e1000_phy_m88; 98 phy->type = e1000_phy_m88;
94 break; 99 break;
100 case e1000_82574:
101 phy->type = e1000_phy_bm;
102 break;
95 default: 103 default:
96 return -E1000_ERR_PHY; 104 return -E1000_ERR_PHY;
97 break; 105 break;
@@ -111,6 +119,10 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
111 if (phy->id != M88E1111_I_PHY_ID) 119 if (phy->id != M88E1111_I_PHY_ID)
112 return -E1000_ERR_PHY; 120 return -E1000_ERR_PHY;
113 break; 121 break;
122 case e1000_82574:
123 if (phy->id != BME1000_E_PHY_ID_R2)
124 return -E1000_ERR_PHY;
125 break;
114 default: 126 default:
115 return -E1000_ERR_PHY; 127 return -E1000_ERR_PHY;
116 break; 128 break;
@@ -150,6 +162,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
150 162
151 switch (hw->mac.type) { 163 switch (hw->mac.type) {
152 case e1000_82573: 164 case e1000_82573:
165 case e1000_82574:
153 if (((eecd >> 15) & 0x3) == 0x3) { 166 if (((eecd >> 15) & 0x3) == 0x3) {
154 nvm->type = e1000_nvm_flash_hw; 167 nvm->type = e1000_nvm_flash_hw;
155 nvm->word_size = 2048; 168 nvm->word_size = 2048;
@@ -245,6 +258,17 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
245 break; 258 break;
246 } 259 }
247 260
261 switch (hw->mac.type) {
262 case e1000_82574:
263 func->check_mng_mode = e1000_check_mng_mode_82574;
264 func->led_on = e1000_led_on_82574;
265 break;
266 default:
267 func->check_mng_mode = e1000e_check_mng_mode_generic;
268 func->led_on = e1000e_led_on_generic;
269 break;
270 }
271
248 return 0; 272 return 0;
249} 273}
250 274
@@ -330,6 +354,8 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
330static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) 354static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
331{ 355{
332 struct e1000_phy_info *phy = &hw->phy; 356 struct e1000_phy_info *phy = &hw->phy;
357 s32 ret_val;
358 u16 phy_id = 0;
333 359
334 switch (hw->mac.type) { 360 switch (hw->mac.type) {
335 case e1000_82571: 361 case e1000_82571:
@@ -345,6 +371,20 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
345 case e1000_82573: 371 case e1000_82573:
346 return e1000e_get_phy_id(hw); 372 return e1000e_get_phy_id(hw);
347 break; 373 break;
374 case e1000_82574:
375 ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
376 if (ret_val)
377 return ret_val;
378
379 phy->id = (u32)(phy_id << 16);
380 udelay(20);
381 ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
382 if (ret_val)
383 return ret_val;
384
385 phy->id |= (u32)(phy_id);
386 phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
387 break;
348 default: 388 default:
349 return -E1000_ERR_PHY; 389 return -E1000_ERR_PHY;
350 break; 390 break;
@@ -421,7 +461,7 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
421 if (ret_val) 461 if (ret_val)
422 return ret_val; 462 return ret_val;
423 463
424 if (hw->mac.type != e1000_82573) 464 if (hw->mac.type != e1000_82573 && hw->mac.type != e1000_82574)
425 ret_val = e1000e_acquire_nvm(hw); 465 ret_val = e1000e_acquire_nvm(hw);
426 466
427 if (ret_val) 467 if (ret_val)
@@ -461,6 +501,7 @@ static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
461 501
462 switch (hw->mac.type) { 502 switch (hw->mac.type) {
463 case e1000_82573: 503 case e1000_82573:
504 case e1000_82574:
464 ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); 505 ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
465 break; 506 break;
466 case e1000_82571: 507 case e1000_82571:
@@ -735,7 +776,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
735 * Must acquire the MDIO ownership before MAC reset. 776 * Must acquire the MDIO ownership before MAC reset.
736 * Ownership defaults to firmware after a reset. 777 * Ownership defaults to firmware after a reset.
737 */ 778 */
738 if (hw->mac.type == e1000_82573) { 779 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
739 extcnf_ctrl = er32(EXTCNF_CTRL); 780 extcnf_ctrl = er32(EXTCNF_CTRL);
740 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 781 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
741 782
@@ -776,7 +817,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
776 * Need to wait for Phy configuration completion before accessing 817 * Need to wait for Phy configuration completion before accessing
777 * NVM and Phy. 818 * NVM and Phy.
778 */ 819 */
779 if (hw->mac.type == e1000_82573) 820 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574)
780 msleep(25); 821 msleep(25);
781 822
782 /* Clear any pending interrupt events. */ 823 /* Clear any pending interrupt events. */
@@ -843,7 +884,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
843 ew32(TXDCTL(0), reg_data); 884 ew32(TXDCTL(0), reg_data);
844 885
845 /* ...for both queues. */ 886 /* ...for both queues. */
846 if (mac->type != e1000_82573) { 887 if (mac->type != e1000_82573 && mac->type != e1000_82574) {
847 reg_data = er32(TXDCTL(1)); 888 reg_data = er32(TXDCTL(1));
848 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 889 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
849 E1000_TXDCTL_FULL_TX_DESC_WB | 890 E1000_TXDCTL_FULL_TX_DESC_WB |
@@ -918,19 +959,28 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
918 } 959 }
919 960
920 /* Device Control */ 961 /* Device Control */
921 if (hw->mac.type == e1000_82573) { 962 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
922 reg = er32(CTRL); 963 reg = er32(CTRL);
923 reg &= ~(1 << 29); 964 reg &= ~(1 << 29);
924 ew32(CTRL, reg); 965 ew32(CTRL, reg);
925 } 966 }
926 967
927 /* Extended Device Control */ 968 /* Extended Device Control */
928 if (hw->mac.type == e1000_82573) { 969 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
929 reg = er32(CTRL_EXT); 970 reg = er32(CTRL_EXT);
930 reg &= ~(1 << 23); 971 reg &= ~(1 << 23);
931 reg |= (1 << 22); 972 reg |= (1 << 22);
932 ew32(CTRL_EXT, reg); 973 ew32(CTRL_EXT, reg);
933 } 974 }
975
976 /* PCI-Ex Control Register */
977 if (hw->mac.type == e1000_82574) {
978 reg = er32(GCR);
979 reg |= (1 << 22);
980 ew32(GCR, reg);
981 }
982
983 return;
934} 984}
935 985
936/** 986/**
@@ -947,7 +997,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
947 u32 vfta_offset = 0; 997 u32 vfta_offset = 0;
948 u32 vfta_bit_in_reg = 0; 998 u32 vfta_bit_in_reg = 0;
949 999
950 if (hw->mac.type == e1000_82573) { 1000 if (hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) {
951 if (hw->mng_cookie.vlan_id != 0) { 1001 if (hw->mng_cookie.vlan_id != 0) {
952 /* 1002 /*
953 * The VFTA is a 4096b bit-field, each identifying 1003 * The VFTA is a 4096b bit-field, each identifying
@@ -976,6 +1026,48 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
976} 1026}
977 1027
978/** 1028/**
1029 * e1000_check_mng_mode_82574 - Check manageability is enabled
1030 * @hw: pointer to the HW structure
1031 *
1032 * Reads the NVM Initialization Control Word 2 and returns true
1033 * (>0) if any manageability is enabled, else false (0).
1034 **/
1035static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
1036{
1037 u16 data;
1038
1039 e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
1040 return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
1041}
1042
1043/**
1044 * e1000_led_on_82574 - Turn LED on
1045 * @hw: pointer to the HW structure
1046 *
1047 * Turn LED on.
1048 **/
1049static s32 e1000_led_on_82574(struct e1000_hw *hw)
1050{
1051 u32 ctrl;
1052 u32 i;
1053
1054 ctrl = hw->mac.ledctl_mode2;
1055 if (!(E1000_STATUS_LU & er32(STATUS))) {
1056 /*
1057 * If no link, then turn LED on by setting the invert bit
1058 * for each LED that's "on" (0x0E) in ledctl_mode2.
1059 */
1060 for (i = 0; i < 4; i++)
1061 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1062 E1000_LEDCTL_MODE_LED_ON)
1063 ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
1064 }
1065 ew32(LEDCTL, ctrl);
1066
1067 return 0;
1068}
1069
1070/**
979 * e1000_update_mc_addr_list_82571 - Update Multicast addresses 1071 * e1000_update_mc_addr_list_82571 - Update Multicast addresses
980 * @hw: pointer to the HW structure 1072 * @hw: pointer to the HW structure
981 * @mc_addr_list: array of multicast addresses to program 1073 * @mc_addr_list: array of multicast addresses to program
@@ -1018,7 +1110,8 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
1018 * the default flow control setting, so we explicitly 1110 * the default flow control setting, so we explicitly
1019 * set it to full. 1111 * set it to full.
1020 */ 1112 */
1021 if (hw->mac.type == e1000_82573) 1113 if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
1114 hw->fc.type == e1000_fc_default)
1022 hw->fc.type = e1000_fc_full; 1115 hw->fc.type = e1000_fc_full;
1023 1116
1024 return e1000e_setup_link(hw); 1117 return e1000e_setup_link(hw);
@@ -1045,6 +1138,7 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
1045 1138
1046 switch (hw->phy.type) { 1139 switch (hw->phy.type) {
1047 case e1000_phy_m88: 1140 case e1000_phy_m88:
1141 case e1000_phy_bm:
1048 ret_val = e1000e_copper_link_setup_m88(hw); 1142 ret_val = e1000e_copper_link_setup_m88(hw);
1049 break; 1143 break;
1050 case e1000_phy_igp_2: 1144 case e1000_phy_igp_2:
@@ -1114,11 +1208,10 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
1114 return ret_val; 1208 return ret_val;
1115 } 1209 }
1116 1210
1117 if (hw->mac.type == e1000_82573 && 1211 if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
1118 *data == ID_LED_RESERVED_F746) 1212 *data == ID_LED_RESERVED_F746)
1119 *data = ID_LED_DEFAULT_82573; 1213 *data = ID_LED_DEFAULT_82573;
1120 else if (*data == ID_LED_RESERVED_0000 || 1214 else if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1121 *data == ID_LED_RESERVED_FFFF)
1122 *data = ID_LED_DEFAULT; 1215 *data = ID_LED_DEFAULT;
1123 1216
1124 return 0; 1217 return 0;
@@ -1265,13 +1358,13 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
1265} 1358}
1266 1359
1267static struct e1000_mac_operations e82571_mac_ops = { 1360static struct e1000_mac_operations e82571_mac_ops = {
1268 .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, 1361 /* .check_mng_mode: mac type dependent */
1269 /* .check_for_link: media type dependent */ 1362 /* .check_for_link: media type dependent */
1270 .cleanup_led = e1000e_cleanup_led_generic, 1363 .cleanup_led = e1000e_cleanup_led_generic,
1271 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, 1364 .clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
1272 .get_bus_info = e1000e_get_bus_info_pcie, 1365 .get_bus_info = e1000e_get_bus_info_pcie,
1273 /* .get_link_up_info: media type dependent */ 1366 /* .get_link_up_info: media type dependent */
1274 .led_on = e1000e_led_on_generic, 1367 /* .led_on: mac type dependent */
1275 .led_off = e1000e_led_off_generic, 1368 .led_off = e1000e_led_off_generic,
1276 .update_mc_addr_list = e1000_update_mc_addr_list_82571, 1369 .update_mc_addr_list = e1000_update_mc_addr_list_82571,
1277 .reset_hw = e1000_reset_hw_82571, 1370 .reset_hw = e1000_reset_hw_82571,
@@ -1312,6 +1405,22 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
1312 .write_phy_reg = e1000e_write_phy_reg_m88, 1405 .write_phy_reg = e1000e_write_phy_reg_m88,
1313}; 1406};
1314 1407
1408static struct e1000_phy_operations e82_phy_ops_bm = {
1409 .acquire_phy = e1000_get_hw_semaphore_82571,
1410 .check_reset_block = e1000e_check_reset_block_generic,
1411 .commit_phy = e1000e_phy_sw_reset,
1412 .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
1413 .get_cfg_done = e1000e_get_cfg_done,
1414 .get_cable_length = e1000e_get_cable_length_m88,
1415 .get_phy_info = e1000e_get_phy_info_m88,
1416 .read_phy_reg = e1000e_read_phy_reg_bm2,
1417 .release_phy = e1000_put_hw_semaphore_82571,
1418 .reset_phy = e1000e_phy_hw_reset_generic,
1419 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1420 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1421 .write_phy_reg = e1000e_write_phy_reg_bm2,
1422};
1423
1315static struct e1000_nvm_operations e82571_nvm_ops = { 1424static struct e1000_nvm_operations e82571_nvm_ops = {
1316 .acquire_nvm = e1000_acquire_nvm_82571, 1425 .acquire_nvm = e1000_acquire_nvm_82571,
1317 .read_nvm = e1000e_read_nvm_eerd, 1426 .read_nvm = e1000e_read_nvm_eerd,
@@ -1375,3 +1484,21 @@ struct e1000_info e1000_82573_info = {
1375 .nvm_ops = &e82571_nvm_ops, 1484 .nvm_ops = &e82571_nvm_ops,
1376}; 1485};
1377 1486
1487struct e1000_info e1000_82574_info = {
1488 .mac = e1000_82574,
1489 .flags = FLAG_HAS_HW_VLAN_FILTER
1490 | FLAG_HAS_MSIX
1491 | FLAG_HAS_JUMBO_FRAMES
1492 | FLAG_HAS_WOL
1493 | FLAG_APME_IN_CTRL3
1494 | FLAG_RX_CSUM_ENABLED
1495 | FLAG_HAS_SMART_POWER_DOWN
1496 | FLAG_HAS_AMT
1497 | FLAG_HAS_CTRLEXT_ON_LOAD,
1498 .pba = 20,
1499 .get_variants = e1000_get_variants_82571,
1500 .mac_ops = &e82571_mac_ops,
1501 .phy_ops = &e82_phy_ops_bm,
1502 .nvm_ops = &e82571_nvm_ops,
1503};
1504
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 14b0e6cd3b8d..48f79ecb82a0 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -71,9 +71,11 @@
71#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 71#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
72#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 72#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
73#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 73#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
74#define E1000_CTRL_EXT_EIAME 0x01000000
74#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ 75#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
75#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 76#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
76#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 77#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
78#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
77 79
78/* Receive Descriptor bit definitions */ 80/* Receive Descriptor bit definitions */
79#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ 81#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */
@@ -299,6 +301,7 @@
299#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ 301#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
300 302
301/* Header split receive */ 303/* Header split receive */
304#define E1000_RFCTL_ACK_DIS 0x00001000
302#define E1000_RFCTL_EXTEN 0x00008000 305#define E1000_RFCTL_EXTEN 0x00008000
303#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 306#define E1000_RFCTL_IPV6_EX_DIS 0x00010000
304#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 307#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
@@ -363,6 +366,11 @@
363#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 366#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
364#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 367#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
365#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 368#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
369#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
370#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
371#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
372#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
373#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
366 374
367/* 375/*
368 * This defines the bits that are set in the Interrupt Mask 376 * This defines the bits that are set in the Interrupt Mask
@@ -386,6 +394,11 @@
386#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 394#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
387#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 395#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
388#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 396#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
397#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
398#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
399#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
400#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */
401#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */
389 402
390/* Interrupt Cause Set */ 403/* Interrupt Cause Set */
391#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 404#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
@@ -505,6 +518,7 @@
505#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ 518#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
506 519
507/* Autoneg Expansion Register */ 520/* Autoneg Expansion Register */
521#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
508 522
509/* 1000BASE-T Control Register */ 523/* 1000BASE-T Control Register */
510#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ 524#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
@@ -540,6 +554,7 @@
540#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ 554#define E1000_EECD_DO 0x00000008 /* NVM Data Out */
541#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ 555#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
542#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ 556#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
557#define E1000_EECD_PRES 0x00000100 /* NVM Present */
543#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ 558#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
544/* NVM Addressing bits based on type (0-small, 1-large) */ 559/* NVM Addressing bits based on type (0-small, 1-large) */
545#define E1000_EECD_ADDR_BITS 0x00000400 560#define E1000_EECD_ADDR_BITS 0x00000400
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index ac4e506b4f88..0a1916b0419d 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -62,6 +62,11 @@ struct e1000_info;
62 e_printk(KERN_NOTICE, adapter, format, ## arg) 62 e_printk(KERN_NOTICE, adapter, format, ## arg)
63 63
64 64
65/* Interrupt modes, as used by the IntMode paramter */
66#define E1000E_INT_MODE_LEGACY 0
67#define E1000E_INT_MODE_MSI 1
68#define E1000E_INT_MODE_MSIX 2
69
65/* Tx/Rx descriptor defines */ 70/* Tx/Rx descriptor defines */
66#define E1000_DEFAULT_TXD 256 71#define E1000_DEFAULT_TXD 256
67#define E1000_MAX_TXD 4096 72#define E1000_MAX_TXD 4096
@@ -95,9 +100,11 @@ enum e1000_boards {
95 board_82571, 100 board_82571,
96 board_82572, 101 board_82572,
97 board_82573, 102 board_82573,
103 board_82574,
98 board_80003es2lan, 104 board_80003es2lan,
99 board_ich8lan, 105 board_ich8lan,
100 board_ich9lan, 106 board_ich9lan,
107 board_ich10lan,
101}; 108};
102 109
103struct e1000_queue_stats { 110struct e1000_queue_stats {
@@ -146,6 +153,12 @@ struct e1000_ring {
146 /* array of buffer information structs */ 153 /* array of buffer information structs */
147 struct e1000_buffer *buffer_info; 154 struct e1000_buffer *buffer_info;
148 155
156 char name[IFNAMSIZ + 5];
157 u32 ims_val;
158 u32 itr_val;
159 u16 itr_register;
160 int set_itr;
161
149 struct sk_buff *rx_skb_top; 162 struct sk_buff *rx_skb_top;
150 163
151 struct e1000_queue_stats stats; 164 struct e1000_queue_stats stats;
@@ -274,6 +287,9 @@ struct e1000_adapter {
274 u32 test_icr; 287 u32 test_icr;
275 288
276 u32 msg_enable; 289 u32 msg_enable;
290 struct msix_entry *msix_entries;
291 int int_mode;
292 u32 eiac_mask;
277 293
278 u32 eeprom_wol; 294 u32 eeprom_wol;
279 u32 wol; 295 u32 wol;
@@ -306,6 +322,7 @@ struct e1000_info {
306#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) 322#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
307#define FLAG_HAS_JUMBO_FRAMES (1 << 7) 323#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
308#define FLAG_IS_ICH (1 << 9) 324#define FLAG_IS_ICH (1 << 9)
325#define FLAG_HAS_MSIX (1 << 10)
309#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) 326#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
310#define FLAG_IS_QUAD_PORT_A (1 << 12) 327#define FLAG_IS_QUAD_PORT_A (1 << 12)
311#define FLAG_IS_QUAD_PORT (1 << 13) 328#define FLAG_IS_QUAD_PORT (1 << 13)
@@ -364,6 +381,8 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
364extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 381extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
365extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 382extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
366extern void e1000e_update_stats(struct e1000_adapter *adapter); 383extern void e1000e_update_stats(struct e1000_adapter *adapter);
384extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
385extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
367 386
368extern unsigned int copybreak; 387extern unsigned int copybreak;
369 388
@@ -372,8 +391,10 @@ extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
372extern struct e1000_info e1000_82571_info; 391extern struct e1000_info e1000_82571_info;
373extern struct e1000_info e1000_82572_info; 392extern struct e1000_info e1000_82572_info;
374extern struct e1000_info e1000_82573_info; 393extern struct e1000_info e1000_82573_info;
394extern struct e1000_info e1000_82574_info;
375extern struct e1000_info e1000_ich8_info; 395extern struct e1000_info e1000_ich8_info;
376extern struct e1000_info e1000_ich9_info; 396extern struct e1000_info e1000_ich9_info;
397extern struct e1000_info e1000_ich10_info;
377extern struct e1000_info e1000_es2_info; 398extern struct e1000_info e1000_es2_info;
378 399
379extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num); 400extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
@@ -446,10 +467,13 @@ extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
446extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); 467extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
447extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); 468extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
448extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); 469extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
470extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
449extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); 471extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
450extern s32 e1000e_determine_phy_address(struct e1000_hw *hw); 472extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
451extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); 473extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
452extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); 474extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
475extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
476extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
453extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); 477extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
454extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); 478extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
455extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); 479extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
@@ -520,7 +544,12 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
520 return hw->phy.ops.get_phy_info(hw); 544 return hw->phy.ops.get_phy_info(hw);
521} 545}
522 546
523extern bool e1000e_check_mng_mode(struct e1000_hw *hw); 547static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
548{
549 return hw->mac.ops.check_mng_mode(hw);
550}
551
552extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
524extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); 553extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
525extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); 554extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
526 555
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index dc552d7d6fac..da9c09c248ed 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1247,7 +1247,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1247} 1247}
1248 1248
1249static struct e1000_mac_operations es2_mac_ops = { 1249static struct e1000_mac_operations es2_mac_ops = {
1250 .mng_mode_enab = E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, 1250 .check_mng_mode = e1000e_check_mng_mode_generic,
1251 /* check_for_link dependent on media type */ 1251 /* check_for_link dependent on media type */
1252 .cleanup_led = e1000e_cleanup_led_generic, 1252 .cleanup_led = e1000e_cleanup_led_generic,
1253 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, 1253 .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index e21c9e0f3738..52b762eb1745 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -568,6 +568,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
568 * and flush shadow RAM for 82573 controllers 568 * and flush shadow RAM for 82573 controllers
569 */ 569 */
570 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || 570 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
571 (hw->mac.type == e1000_82574) ||
571 (hw->mac.type == e1000_82573))) 572 (hw->mac.type == e1000_82573)))
572 e1000e_update_nvm_checksum(hw); 573 e1000e_update_nvm_checksum(hw);
573 574
@@ -779,8 +780,10 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
779 toggle = 0x7FFFF3FF; 780 toggle = 0x7FFFF3FF;
780 break; 781 break;
781 case e1000_82573: 782 case e1000_82573:
783 case e1000_82574:
782 case e1000_ich8lan: 784 case e1000_ich8lan:
783 case e1000_ich9lan: 785 case e1000_ich9lan:
786 case e1000_ich10lan:
784 toggle = 0x7FFFF033; 787 toggle = 0x7FFFF033;
785 break; 788 break;
786 default: 789 default:
@@ -833,7 +836,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
833 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); 836 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
834 for (i = 0; i < mac->rar_entry_count; i++) 837 for (i = 0; i < mac->rar_entry_count; i++)
835 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), 838 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
836 0x8003FFFF, 0xFFFFFFFF); 839 ((mac->type == e1000_ich10lan) ?
840 0x8007FFFF : 0x8003FFFF),
841 0xFFFFFFFF);
837 842
838 for (i = 0; i < mac->mta_reg_count; i++) 843 for (i = 0; i < mac->mta_reg_count; i++)
839 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); 844 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
@@ -884,10 +889,18 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
884 u32 shared_int = 1; 889 u32 shared_int = 1;
885 u32 irq = adapter->pdev->irq; 890 u32 irq = adapter->pdev->irq;
886 int i; 891 int i;
892 int ret_val = 0;
893 int int_mode = E1000E_INT_MODE_LEGACY;
887 894
888 *data = 0; 895 *data = 0;
889 896
890 /* NOTE: we don't test MSI interrupts here, yet */ 897 /* NOTE: we don't test MSI/MSI-X interrupts here, yet */
898 if (adapter->int_mode == E1000E_INT_MODE_MSIX) {
899 int_mode = adapter->int_mode;
900 e1000e_reset_interrupt_capability(adapter);
901 adapter->int_mode = E1000E_INT_MODE_LEGACY;
902 e1000e_set_interrupt_capability(adapter);
903 }
891 /* Hook up test interrupt handler just for this test */ 904 /* Hook up test interrupt handler just for this test */
892 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 905 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
893 netdev)) { 906 netdev)) {
@@ -895,7 +908,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
895 } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, 908 } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
896 netdev->name, netdev)) { 909 netdev->name, netdev)) {
897 *data = 1; 910 *data = 1;
898 return -1; 911 ret_val = -1;
912 goto out;
899 } 913 }
900 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); 914 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
901 915
@@ -905,12 +919,23 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
905 919
906 /* Test each interrupt */ 920 /* Test each interrupt */
907 for (i = 0; i < 10; i++) { 921 for (i = 0; i < 10; i++) {
908 if ((adapter->flags & FLAG_IS_ICH) && (i == 8))
909 continue;
910
911 /* Interrupt to test */ 922 /* Interrupt to test */
912 mask = 1 << i; 923 mask = 1 << i;
913 924
925 if (adapter->flags & FLAG_IS_ICH) {
926 switch (mask) {
927 case E1000_ICR_RXSEQ:
928 continue;
929 case 0x00000100:
930 if (adapter->hw.mac.type == e1000_ich8lan ||
931 adapter->hw.mac.type == e1000_ich9lan)
932 continue;
933 break;
934 default:
935 break;
936 }
937 }
938
914 if (!shared_int) { 939 if (!shared_int) {
915 /* 940 /*
916 * Disable the interrupt to be reported in 941 * Disable the interrupt to be reported in
@@ -974,7 +999,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
974 /* Unhook test interrupt handler */ 999 /* Unhook test interrupt handler */
975 free_irq(irq, netdev); 1000 free_irq(irq, netdev);
976 1001
977 return *data; 1002out:
1003 if (int_mode == E1000E_INT_MODE_MSIX) {
1004 e1000e_reset_interrupt_capability(adapter);
1005 adapter->int_mode = int_mode;
1006 e1000e_set_interrupt_capability(adapter);
1007 }
1008
1009 return ret_val;
978} 1010}
979 1011
980static void e1000_free_desc_rings(struct e1000_adapter *adapter) 1012static void e1000_free_desc_rings(struct e1000_adapter *adapter)
@@ -1755,11 +1787,13 @@ static void e1000_led_blink_callback(unsigned long data)
1755static int e1000_phys_id(struct net_device *netdev, u32 data) 1787static int e1000_phys_id(struct net_device *netdev, u32 data)
1756{ 1788{
1757 struct e1000_adapter *adapter = netdev_priv(netdev); 1789 struct e1000_adapter *adapter = netdev_priv(netdev);
1790 struct e1000_hw *hw = &adapter->hw;
1758 1791
1759 if (!data) 1792 if (!data)
1760 data = INT_MAX; 1793 data = INT_MAX;
1761 1794
1762 if (adapter->hw.phy.type == e1000_phy_ife) { 1795 if ((hw->phy.type == e1000_phy_ife) ||
1796 (hw->mac.type == e1000_82574)) {
1763 if (!adapter->blink_timer.function) { 1797 if (!adapter->blink_timer.function) {
1764 init_timer(&adapter->blink_timer); 1798 init_timer(&adapter->blink_timer);
1765 adapter->blink_timer.function = 1799 adapter->blink_timer.function =
@@ -1769,16 +1803,16 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1769 mod_timer(&adapter->blink_timer, jiffies); 1803 mod_timer(&adapter->blink_timer, jiffies);
1770 msleep_interruptible(data * 1000); 1804 msleep_interruptible(data * 1000);
1771 del_timer_sync(&adapter->blink_timer); 1805 del_timer_sync(&adapter->blink_timer);
1772 e1e_wphy(&adapter->hw, 1806 if (hw->phy.type == e1000_phy_ife)
1773 IFE_PHY_SPECIAL_CONTROL_LED, 0); 1807 e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
1774 } else { 1808 } else {
1775 e1000e_blink_led(&adapter->hw); 1809 e1000e_blink_led(hw);
1776 msleep_interruptible(data * 1000); 1810 msleep_interruptible(data * 1000);
1777 } 1811 }
1778 1812
1779 adapter->hw.mac.ops.led_off(&adapter->hw); 1813 hw->mac.ops.led_off(hw);
1780 clear_bit(E1000_LED_ON, &adapter->led_status); 1814 clear_bit(E1000_LED_ON, &adapter->led_status);
1781 adapter->hw.mac.ops.cleanup_led(&adapter->hw); 1815 hw->mac.ops.cleanup_led(hw);
1782 1816
1783 return 0; 1817 return 0;
1784} 1818}
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 74f263acb172..f66ed37a7f76 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -65,7 +65,11 @@ enum e1e_registers {
65 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ 65 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ 66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ 67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
68 E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
68 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ 69 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
70 E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */
71 E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
72#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
69 E1000_RCTL = 0x00100, /* Rx Control - RW */ 73 E1000_RCTL = 0x00100, /* Rx Control - RW */
70 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ 74 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
71 E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */ 75 E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
@@ -332,6 +336,7 @@ enum e1e_registers {
332#define E1000_DEV_ID_82573E 0x108B 336#define E1000_DEV_ID_82573E 0x108B
333#define E1000_DEV_ID_82573E_IAMT 0x108C 337#define E1000_DEV_ID_82573E_IAMT 0x108C
334#define E1000_DEV_ID_82573L 0x109A 338#define E1000_DEV_ID_82573L 0x109A
339#define E1000_DEV_ID_82574L 0x10D3
335 340
336#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 341#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
337#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 342#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
@@ -346,6 +351,7 @@ enum e1e_registers {
346#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 351#define E1000_DEV_ID_ICH8_IFE_G 0x10C5
347#define E1000_DEV_ID_ICH8_IGP_M 0x104D 352#define E1000_DEV_ID_ICH8_IGP_M 0x104D
348#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD 353#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD
354#define E1000_DEV_ID_ICH9_BM 0x10E5
349#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 355#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5
350#define E1000_DEV_ID_ICH9_IGP_M 0x10BF 356#define E1000_DEV_ID_ICH9_IGP_M 0x10BF
351#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB 357#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB
@@ -356,6 +362,10 @@ enum e1e_registers {
356#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC 362#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC
357#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD 363#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD
358#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE 364#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
365#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
366#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
367
368#define E1000_REVISION_4 4
359 369
360#define E1000_FUNC_1 1 370#define E1000_FUNC_1 1
361 371
@@ -363,9 +373,11 @@ enum e1000_mac_type {
363 e1000_82571, 373 e1000_82571,
364 e1000_82572, 374 e1000_82572,
365 e1000_82573, 375 e1000_82573,
376 e1000_82574,
366 e1000_80003es2lan, 377 e1000_80003es2lan,
367 e1000_ich8lan, 378 e1000_ich8lan,
368 e1000_ich9lan, 379 e1000_ich9lan,
380 e1000_ich10lan,
369}; 381};
370 382
371enum e1000_media_type { 383enum e1000_media_type {
@@ -696,8 +708,7 @@ struct e1000_host_mng_command_info {
696 708
697/* Function pointers and static data for the MAC. */ 709/* Function pointers and static data for the MAC. */
698struct e1000_mac_operations { 710struct e1000_mac_operations {
699 u32 mng_mode_enab; 711 bool (*check_mng_mode)(struct e1000_hw *);
700
701 s32 (*check_for_link)(struct e1000_hw *); 712 s32 (*check_for_link)(struct e1000_hw *);
702 s32 (*cleanup_led)(struct e1000_hw *); 713 s32 (*cleanup_led)(struct e1000_hw *);
703 void (*clear_hw_cntrs)(struct e1000_hw *); 714 void (*clear_hw_cntrs)(struct e1000_hw *);
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9e38452a738c..692251b60915 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -43,7 +43,9 @@
43 * 82567LM-2 Gigabit Network Connection 43 * 82567LM-2 Gigabit Network Connection
44 * 82567LF-2 Gigabit Network Connection 44 * 82567LF-2 Gigabit Network Connection
45 * 82567V-2 Gigabit Network Connection 45 * 82567V-2 Gigabit Network Connection
46 * 82562GT-3 10/100 Network Connection 46 * 82567LF-3 Gigabit Network Connection
47 * 82567LM-3 Gigabit Network Connection
48 * 82567LM-4 Gigabit Network Connection
47 */ 49 */
48 50
49#include <linux/netdevice.h> 51#include <linux/netdevice.h>
@@ -157,12 +159,15 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw);
157static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); 159static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
158static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, 160static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
159 u32 offset, u8 byte); 161 u32 offset, u8 byte);
162static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
163 u8 *data);
160static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, 164static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
161 u16 *data); 165 u16 *data);
162static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, 166static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
163 u8 size, u16 *data); 167 u8 size, u16 *data);
164static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); 168static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
165static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); 169static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
170static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
166 171
167static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 172static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
168{ 173{
@@ -393,6 +398,8 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
393 398
394 if (!timeout) { 399 if (!timeout) {
395 hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); 400 hw_dbg(hw, "FW or HW has locked the resource for too long.\n");
401 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
402 ew32(EXTCNF_CTRL, extcnf_ctrl);
396 return -E1000_ERR_CONFIG; 403 return -E1000_ERR_CONFIG;
397 } 404 }
398 405
@@ -417,6 +424,22 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
417} 424}
418 425
419/** 426/**
427 * e1000_check_mng_mode_ich8lan - Checks management mode
428 * @hw: pointer to the HW structure
429 *
430 * This checks if the adapter has manageability enabled.
431 * This is a function pointer entry point only called by read/write
432 * routines for the PHY and NVM parts.
433 **/
434static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
435{
436 u32 fwsm = er32(FWSM);
437
438 return (fwsm & E1000_FWSM_MODE_MASK) ==
439 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
440}
441
442/**
420 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 443 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
421 * @hw: pointer to the HW structure 444 * @hw: pointer to the HW structure
422 * 445 *
@@ -897,6 +920,56 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
897} 920}
898 921
899/** 922/**
923 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
924 * @hw: pointer to the HW structure
925 * @bank: pointer to the variable that returns the active bank
926 *
927 * Reads signature byte from the NVM using the flash access registers.
928 **/
929static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
930{
931 struct e1000_nvm_info *nvm = &hw->nvm;
932 /* flash bank size is in words */
933 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
934 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
935 u8 bank_high_byte = 0;
936
937 if (hw->mac.type != e1000_ich10lan) {
938 if (er32(EECD) & E1000_EECD_SEC1VAL)
939 *bank = 1;
940 else
941 *bank = 0;
942 } else {
943 /*
944 * Make sure the signature for bank 0 is valid,
945 * if not check for bank1
946 */
947 e1000_read_flash_byte_ich8lan(hw, act_offset, &bank_high_byte);
948 if ((bank_high_byte & 0xC0) == 0x80) {
949 *bank = 0;
950 } else {
951 /*
952 * find if segment 1 is valid by verifying
953 * bit 15:14 = 10b in word 0x13
954 */
955 e1000_read_flash_byte_ich8lan(hw,
956 act_offset + bank1_offset,
957 &bank_high_byte);
958
959 /* bank1 has a valid signature equivalent to SEC1V */
960 if ((bank_high_byte & 0xC0) == 0x80) {
961 *bank = 1;
962 } else {
963 hw_dbg(hw, "ERROR: EEPROM not present\n");
964 return -E1000_ERR_NVM;
965 }
966 }
967 }
968
969 return 0;
970}
971
972/**
900 * e1000_read_nvm_ich8lan - Read word(s) from the NVM 973 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
901 * @hw: pointer to the HW structure 974 * @hw: pointer to the HW structure
902 * @offset: The offset (in bytes) of the word(s) to read. 975 * @offset: The offset (in bytes) of the word(s) to read.
@@ -912,6 +985,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
912 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 985 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
913 u32 act_offset; 986 u32 act_offset;
914 s32 ret_val; 987 s32 ret_val;
988 u32 bank = 0;
915 u16 i, word; 989 u16 i, word;
916 990
917 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 991 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
@@ -924,10 +998,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
924 if (ret_val) 998 if (ret_val)
925 return ret_val; 999 return ret_val;
926 1000
927 /* Start with the bank offset, then add the relative offset. */ 1001 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
928 act_offset = (er32(EECD) & E1000_EECD_SEC1VAL) 1002 if (ret_val)
929 ? nvm->flash_bank_size 1003 return ret_val;
930 : 0; 1004
1005 act_offset = (bank) ? nvm->flash_bank_size : 0;
931 act_offset += offset; 1006 act_offset += offset;
932 1007
933 for (i = 0; i < words; i++) { 1008 for (i = 0; i < words; i++) {
@@ -1075,6 +1150,29 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
1075} 1150}
1076 1151
1077/** 1152/**
1153 * e1000_read_flash_byte_ich8lan - Read byte from flash
1154 * @hw: pointer to the HW structure
1155 * @offset: The offset of the byte to read.
1156 * @data: Pointer to a byte to store the value read.
1157 *
1158 * Reads a single byte from the NVM using the flash access registers.
1159 **/
1160static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
1161 u8 *data)
1162{
1163 s32 ret_val;
1164 u16 word = 0;
1165
1166 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
1167 if (ret_val)
1168 return ret_val;
1169
1170 *data = (u8)word;
1171
1172 return 0;
1173}
1174
1175/**
1078 * e1000_read_flash_data_ich8lan - Read byte or word from NVM 1176 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
1079 * @hw: pointer to the HW structure 1177 * @hw: pointer to the HW structure
1080 * @offset: The offset (in bytes) of the byte or word to read. 1178 * @offset: The offset (in bytes) of the byte or word to read.
@@ -1205,7 +1303,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1205{ 1303{
1206 struct e1000_nvm_info *nvm = &hw->nvm; 1304 struct e1000_nvm_info *nvm = &hw->nvm;
1207 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 1305 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
1208 u32 i, act_offset, new_bank_offset, old_bank_offset; 1306 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
1209 s32 ret_val; 1307 s32 ret_val;
1210 u16 data; 1308 u16 data;
1211 1309
@@ -1225,7 +1323,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1225 * write to bank 0 etc. We also need to erase the segment that 1323 * write to bank 0 etc. We also need to erase the segment that
1226 * is going to be written 1324 * is going to be written
1227 */ 1325 */
1228 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { 1326 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1327 if (ret_val)
1328 return ret_val;
1329
1330 if (bank == 0) {
1229 new_bank_offset = nvm->flash_bank_size; 1331 new_bank_offset = nvm->flash_bank_size;
1230 old_bank_offset = 0; 1332 old_bank_offset = 0;
1231 e1000_erase_flash_bank_ich8lan(hw, 1); 1333 e1000_erase_flash_bank_ich8lan(hw, 1);
@@ -2189,13 +2291,14 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
2189 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation 2291 * 'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
2190 * to a lower speed. 2292 * to a lower speed.
2191 * 2293 *
2192 * Should only be called for ICH9 devices. 2294 * Should only be called for ICH9 and ICH10 devices.
2193 **/ 2295 **/
2194void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) 2296void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
2195{ 2297{
2196 u32 phy_ctrl; 2298 u32 phy_ctrl;
2197 2299
2198 if (hw->mac.type == e1000_ich9lan) { 2300 if ((hw->mac.type == e1000_ich10lan) ||
2301 (hw->mac.type == e1000_ich9lan)) {
2199 phy_ctrl = er32(PHY_CTRL); 2302 phy_ctrl = er32(PHY_CTRL);
2200 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | 2303 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU |
2201 E1000_PHY_CTRL_GBE_DISABLE; 2304 E1000_PHY_CTRL_GBE_DISABLE;
@@ -2253,6 +2356,39 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
2253} 2356}
2254 2357
2255/** 2358/**
2359 * e1000_get_cfg_done_ich8lan - Read config done bit
2360 * @hw: pointer to the HW structure
2361 *
2362 * Read the management control register for the config done bit for
2363 * completion status. NOTE: silicon which is EEPROM-less will fail trying
2364 * to read the config done bit, so an error is *ONLY* logged and returns
2365 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
2366 * would not be able to be reset or change link.
2367 **/
2368static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
2369{
2370 u32 bank = 0;
2371
2372 e1000e_get_cfg_done(hw);
2373
2374 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
2375 if (hw->mac.type != e1000_ich10lan) {
2376 if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
2377 (hw->phy.type == e1000_phy_igp_3)) {
2378 e1000e_phy_init_script_igp3(hw);
2379 }
2380 } else {
2381 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
2382 /* Maybe we should do a basic PHY config */
2383 hw_dbg(hw, "EEPROM not present\n");
2384 return -E1000_ERR_CONFIG;
2385 }
2386 }
2387
2388 return 0;
2389}
2390
2391/**
2256 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters 2392 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
2257 * @hw: pointer to the HW structure 2393 * @hw: pointer to the HW structure
2258 * 2394 *
@@ -2282,7 +2418,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
2282} 2418}
2283 2419
2284static struct e1000_mac_operations ich8_mac_ops = { 2420static struct e1000_mac_operations ich8_mac_ops = {
2285 .mng_mode_enab = E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT, 2421 .check_mng_mode = e1000_check_mng_mode_ich8lan,
2286 .check_for_link = e1000e_check_for_copper_link, 2422 .check_for_link = e1000e_check_for_copper_link,
2287 .cleanup_led = e1000_cleanup_led_ich8lan, 2423 .cleanup_led = e1000_cleanup_led_ich8lan,
2288 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, 2424 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
@@ -2302,7 +2438,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
2302 .check_reset_block = e1000_check_reset_block_ich8lan, 2438 .check_reset_block = e1000_check_reset_block_ich8lan,
2303 .commit_phy = NULL, 2439 .commit_phy = NULL,
2304 .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan, 2440 .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan,
2305 .get_cfg_done = e1000e_get_cfg_done, 2441 .get_cfg_done = e1000_get_cfg_done_ich8lan,
2306 .get_cable_length = e1000e_get_cable_length_igp_2, 2442 .get_cable_length = e1000e_get_cable_length_igp_2,
2307 .get_phy_info = e1000_get_phy_info_ich8lan, 2443 .get_phy_info = e1000_get_phy_info_ich8lan,
2308 .read_phy_reg = e1000e_read_phy_reg_igp, 2444 .read_phy_reg = e1000e_read_phy_reg_igp,
@@ -2357,3 +2493,20 @@ struct e1000_info e1000_ich9_info = {
2357 .nvm_ops = &ich8_nvm_ops, 2493 .nvm_ops = &ich8_nvm_ops,
2358}; 2494};
2359 2495
2496struct e1000_info e1000_ich10_info = {
2497 .mac = e1000_ich10lan,
2498 .flags = FLAG_HAS_JUMBO_FRAMES
2499 | FLAG_IS_ICH
2500 | FLAG_HAS_WOL
2501 | FLAG_RX_CSUM_ENABLED
2502 | FLAG_HAS_CTRLEXT_ON_LOAD
2503 | FLAG_HAS_AMT
2504 | FLAG_HAS_ERT
2505 | FLAG_HAS_FLASH
2506 | FLAG_APME_IN_WUC,
2507 .pba = 10,
2508 .get_variants = e1000_get_variants_ich8lan,
2509 .mac_ops = &ich8_mac_ops,
2510 .phy_ops = &ich8_phy_ops,
2511 .nvm_ops = &ich8_nvm_ops,
2512};
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index f1f4e9dfd0a0..c7337306ffa7 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -2222,17 +2222,18 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2222} 2222}
2223 2223
2224/** 2224/**
2225 * e1000e_check_mng_mode - check management mode 2225 * e1000e_check_mng_mode_generic - check management mode
2226 * @hw: pointer to the HW structure 2226 * @hw: pointer to the HW structure
2227 * 2227 *
2228 * Reads the firmware semaphore register and returns true (>0) if 2228 * Reads the firmware semaphore register and returns true (>0) if
2229 * manageability is enabled, else false (0). 2229 * manageability is enabled, else false (0).
2230 **/ 2230 **/
2231bool e1000e_check_mng_mode(struct e1000_hw *hw) 2231bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
2232{ 2232{
2233 u32 fwsm = er32(FWSM); 2233 u32 fwsm = er32(FWSM);
2234 2234
2235 return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab; 2235 return (fwsm & E1000_FWSM_MODE_MASK) ==
2236 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
2236} 2237}
2237 2238
2238/** 2239/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index d266510c8a94..24d05cb70055 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -55,9 +55,11 @@ static const struct e1000_info *e1000_info_tbl[] = {
55 [board_82571] = &e1000_82571_info, 55 [board_82571] = &e1000_82571_info,
56 [board_82572] = &e1000_82572_info, 56 [board_82572] = &e1000_82572_info,
57 [board_82573] = &e1000_82573_info, 57 [board_82573] = &e1000_82573_info,
58 [board_82574] = &e1000_82574_info,
58 [board_80003es2lan] = &e1000_es2_info, 59 [board_80003es2lan] = &e1000_es2_info,
59 [board_ich8lan] = &e1000_ich8_info, 60 [board_ich8lan] = &e1000_ich8_info,
60 [board_ich9lan] = &e1000_ich9_info, 61 [board_ich9lan] = &e1000_ich9_info,
62 [board_ich10lan] = &e1000_ich10_info,
61}; 63};
62 64
63#ifdef DEBUG 65#ifdef DEBUG
@@ -1179,8 +1181,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
1179 struct net_device *netdev = data; 1181 struct net_device *netdev = data;
1180 struct e1000_adapter *adapter = netdev_priv(netdev); 1182 struct e1000_adapter *adapter = netdev_priv(netdev);
1181 struct e1000_hw *hw = &adapter->hw; 1183 struct e1000_hw *hw = &adapter->hw;
1182
1183 u32 rctl, icr = er32(ICR); 1184 u32 rctl, icr = er32(ICR);
1185
1184 if (!icr) 1186 if (!icr)
1185 return IRQ_NONE; /* Not our interrupt */ 1187 return IRQ_NONE; /* Not our interrupt */
1186 1188
@@ -1236,6 +1238,263 @@ static irqreturn_t e1000_intr(int irq, void *data)
1236 return IRQ_HANDLED; 1238 return IRQ_HANDLED;
1237} 1239}
1238 1240
1241static irqreturn_t e1000_msix_other(int irq, void *data)
1242{
1243 struct net_device *netdev = data;
1244 struct e1000_adapter *adapter = netdev_priv(netdev);
1245 struct e1000_hw *hw = &adapter->hw;
1246 u32 icr = er32(ICR);
1247
1248 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1249 ew32(IMS, E1000_IMS_OTHER);
1250 return IRQ_NONE;
1251 }
1252
1253 if (icr & adapter->eiac_mask)
1254 ew32(ICS, (icr & adapter->eiac_mask));
1255
1256 if (icr & E1000_ICR_OTHER) {
1257 if (!(icr & E1000_ICR_LSC))
1258 goto no_link_interrupt;
1259 hw->mac.get_link_status = 1;
1260 /* guard against interrupt when we're going down */
1261 if (!test_bit(__E1000_DOWN, &adapter->state))
1262 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1263 }
1264
1265no_link_interrupt:
1266 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1267
1268 return IRQ_HANDLED;
1269}
1270
1271
1272static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1273{
1274 struct net_device *netdev = data;
1275 struct e1000_adapter *adapter = netdev_priv(netdev);
1276 struct e1000_hw *hw = &adapter->hw;
1277 struct e1000_ring *tx_ring = adapter->tx_ring;
1278
1279
1280 adapter->total_tx_bytes = 0;
1281 adapter->total_tx_packets = 0;
1282
1283 if (!e1000_clean_tx_irq(adapter))
1284 /* Ring was not completely cleaned, so fire another interrupt */
1285 ew32(ICS, tx_ring->ims_val);
1286
1287 return IRQ_HANDLED;
1288}
1289
1290static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1291{
1292 struct net_device *netdev = data;
1293 struct e1000_adapter *adapter = netdev_priv(netdev);
1294
1295 /* Write the ITR value calculated at the end of the
1296 * previous interrupt.
1297 */
1298 if (adapter->rx_ring->set_itr) {
1299 writel(1000000000 / (adapter->rx_ring->itr_val * 256),
1300 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
1301 adapter->rx_ring->set_itr = 0;
1302 }
1303
1304 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
1305 adapter->total_rx_bytes = 0;
1306 adapter->total_rx_packets = 0;
1307 __netif_rx_schedule(netdev, &adapter->napi);
1308 }
1309 return IRQ_HANDLED;
1310}
1311
1312/**
1313 * e1000_configure_msix - Configure MSI-X hardware
1314 *
1315 * e1000_configure_msix sets up the hardware to properly
1316 * generate MSI-X interrupts.
1317 **/
1318static void e1000_configure_msix(struct e1000_adapter *adapter)
1319{
1320 struct e1000_hw *hw = &adapter->hw;
1321 struct e1000_ring *rx_ring = adapter->rx_ring;
1322 struct e1000_ring *tx_ring = adapter->tx_ring;
1323 int vector = 0;
1324 u32 ctrl_ext, ivar = 0;
1325
1326 adapter->eiac_mask = 0;
1327
1328 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1329 if (hw->mac.type == e1000_82574) {
1330 u32 rfctl = er32(RFCTL);
1331 rfctl |= E1000_RFCTL_ACK_DIS;
1332 ew32(RFCTL, rfctl);
1333 }
1334
1335#define E1000_IVAR_INT_ALLOC_VALID 0x8
1336 /* Configure Rx vector */
1337 rx_ring->ims_val = E1000_IMS_RXQ0;
1338 adapter->eiac_mask |= rx_ring->ims_val;
1339 if (rx_ring->itr_val)
1340 writel(1000000000 / (rx_ring->itr_val * 256),
1341 hw->hw_addr + rx_ring->itr_register);
1342 else
1343 writel(1, hw->hw_addr + rx_ring->itr_register);
1344 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1345
1346 /* Configure Tx vector */
1347 tx_ring->ims_val = E1000_IMS_TXQ0;
1348 vector++;
1349 if (tx_ring->itr_val)
1350 writel(1000000000 / (tx_ring->itr_val * 256),
1351 hw->hw_addr + tx_ring->itr_register);
1352 else
1353 writel(1, hw->hw_addr + tx_ring->itr_register);
1354 adapter->eiac_mask |= tx_ring->ims_val;
1355 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1356
1357 /* set vector for Other Causes, e.g. link changes */
1358 vector++;
1359 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1360 if (rx_ring->itr_val)
1361 writel(1000000000 / (rx_ring->itr_val * 256),
1362 hw->hw_addr + E1000_EITR_82574(vector));
1363 else
1364 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1365
1366 /* Cause Tx interrupts on every write back */
1367 ivar |= (1 << 31);
1368
1369 ew32(IVAR, ivar);
1370
1371 /* enable MSI-X PBA support */
1372 ctrl_ext = er32(CTRL_EXT);
1373 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1374
1375 /* Auto-Mask Other interrupts upon ICR read */
1376#define E1000_EIAC_MASK_82574 0x01F00000
1377 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1378 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1379 ew32(CTRL_EXT, ctrl_ext);
1380 e1e_flush();
1381}
1382
1383void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1384{
1385 if (adapter->msix_entries) {
1386 pci_disable_msix(adapter->pdev);
1387 kfree(adapter->msix_entries);
1388 adapter->msix_entries = NULL;
1389 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1390 pci_disable_msi(adapter->pdev);
1391 adapter->flags &= ~FLAG_MSI_ENABLED;
1392 }
1393
1394 return;
1395}
1396
1397/**
1398 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1399 *
1400 * Attempt to configure interrupts using the best available
1401 * capabilities of the hardware and kernel.
1402 **/
1403void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1404{
1405 int err;
1406 int numvecs, i;
1407
1408
1409 switch (adapter->int_mode) {
1410 case E1000E_INT_MODE_MSIX:
1411 if (adapter->flags & FLAG_HAS_MSIX) {
1412 numvecs = 3; /* RxQ0, TxQ0 and other */
1413 adapter->msix_entries = kcalloc(numvecs,
1414 sizeof(struct msix_entry),
1415 GFP_KERNEL);
1416 if (adapter->msix_entries) {
1417 for (i = 0; i < numvecs; i++)
1418 adapter->msix_entries[i].entry = i;
1419
1420 err = pci_enable_msix(adapter->pdev,
1421 adapter->msix_entries,
1422 numvecs);
1423 if (err == 0)
1424 return;
1425 }
1426 /* MSI-X failed, so fall through and try MSI */
1427 e_err("Failed to initialize MSI-X interrupts. "
1428 "Falling back to MSI interrupts.\n");
1429 e1000e_reset_interrupt_capability(adapter);
1430 }
1431 adapter->int_mode = E1000E_INT_MODE_MSI;
1432 /* Fall through */
1433 case E1000E_INT_MODE_MSI:
1434 if (!pci_enable_msi(adapter->pdev)) {
1435 adapter->flags |= FLAG_MSI_ENABLED;
1436 } else {
1437 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1438 e_err("Failed to initialize MSI interrupts. Falling "
1439 "back to legacy interrupts.\n");
1440 }
1441 /* Fall through */
1442 case E1000E_INT_MODE_LEGACY:
1443 /* Don't do anything; this is the system default */
1444 break;
1445 }
1446
1447 return;
1448}
1449
1450/**
1451 * e1000_request_msix - Initialize MSI-X interrupts
1452 *
1453 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1454 * kernel.
1455 **/
1456static int e1000_request_msix(struct e1000_adapter *adapter)
1457{
1458 struct net_device *netdev = adapter->netdev;
1459 int err = 0, vector = 0;
1460
1461 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1462 sprintf(adapter->rx_ring->name, "%s-rx0", netdev->name);
1463 else
1464 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1465 err = request_irq(adapter->msix_entries[vector].vector,
1466 &e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1467 netdev);
1468 if (err)
1469 goto out;
1470 adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
1471 adapter->rx_ring->itr_val = adapter->itr;
1472 vector++;
1473
1474 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1475 sprintf(adapter->tx_ring->name, "%s-tx0", netdev->name);
1476 else
1477 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1478 err = request_irq(adapter->msix_entries[vector].vector,
1479 &e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1480 netdev);
1481 if (err)
1482 goto out;
1483 adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
1484 adapter->tx_ring->itr_val = adapter->itr;
1485 vector++;
1486
1487 err = request_irq(adapter->msix_entries[vector].vector,
1488 &e1000_msix_other, 0, netdev->name, netdev);
1489 if (err)
1490 goto out;
1491
1492 e1000_configure_msix(adapter);
1493 return 0;
1494out:
1495 return err;
1496}
1497
1239/** 1498/**
1240 * e1000_request_irq - initialize interrupts 1499 * e1000_request_irq - initialize interrupts
1241 * 1500 *
@@ -1245,28 +1504,32 @@ static irqreturn_t e1000_intr(int irq, void *data)
1245static int e1000_request_irq(struct e1000_adapter *adapter) 1504static int e1000_request_irq(struct e1000_adapter *adapter)
1246{ 1505{
1247 struct net_device *netdev = adapter->netdev; 1506 struct net_device *netdev = adapter->netdev;
1248 int irq_flags = IRQF_SHARED;
1249 int err; 1507 int err;
1250 1508
1251 if (!(adapter->flags & FLAG_MSI_TEST_FAILED)) { 1509 if (adapter->msix_entries) {
1252 err = pci_enable_msi(adapter->pdev); 1510 err = e1000_request_msix(adapter);
1253 if (!err) { 1511 if (!err)
1254 adapter->flags |= FLAG_MSI_ENABLED; 1512 return err;
1255 irq_flags = 0; 1513 /* fall back to MSI */
1256 } 1514 e1000e_reset_interrupt_capability(adapter);
1515 adapter->int_mode = E1000E_INT_MODE_MSI;
1516 e1000e_set_interrupt_capability(adapter);
1517 }
1518 if (adapter->flags & FLAG_MSI_ENABLED) {
1519 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0,
1520 netdev->name, netdev);
1521 if (!err)
1522 return err;
1523
1524 /* fall back to legacy interrupt */
1525 e1000e_reset_interrupt_capability(adapter);
1526 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1257 } 1527 }
1258 1528
1259 err = request_irq(adapter->pdev->irq, 1529 err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED,
1260 ((adapter->flags & FLAG_MSI_ENABLED) ? 1530 netdev->name, netdev);
1261 &e1000_intr_msi : &e1000_intr), 1531 if (err)
1262 irq_flags, netdev->name, netdev);
1263 if (err) {
1264 if (adapter->flags & FLAG_MSI_ENABLED) {
1265 pci_disable_msi(adapter->pdev);
1266 adapter->flags &= ~FLAG_MSI_ENABLED;
1267 }
1268 e_err("Unable to allocate interrupt, Error: %d\n", err); 1532 e_err("Unable to allocate interrupt, Error: %d\n", err);
1269 }
1270 1533
1271 return err; 1534 return err;
1272} 1535}
@@ -1275,11 +1538,21 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
1275{ 1538{
1276 struct net_device *netdev = adapter->netdev; 1539 struct net_device *netdev = adapter->netdev;
1277 1540
1278 free_irq(adapter->pdev->irq, netdev); 1541 if (adapter->msix_entries) {
1279 if (adapter->flags & FLAG_MSI_ENABLED) { 1542 int vector = 0;
1280 pci_disable_msi(adapter->pdev); 1543
1281 adapter->flags &= ~FLAG_MSI_ENABLED; 1544 free_irq(adapter->msix_entries[vector].vector, netdev);
1545 vector++;
1546
1547 free_irq(adapter->msix_entries[vector].vector, netdev);
1548 vector++;
1549
1550 /* Other Causes interrupt vector */
1551 free_irq(adapter->msix_entries[vector].vector, netdev);
1552 return;
1282 } 1553 }
1554
1555 free_irq(adapter->pdev->irq, netdev);
1283} 1556}
1284 1557
1285/** 1558/**
@@ -1290,6 +1563,8 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
1290 struct e1000_hw *hw = &adapter->hw; 1563 struct e1000_hw *hw = &adapter->hw;
1291 1564
1292 ew32(IMC, ~0); 1565 ew32(IMC, ~0);
1566 if (adapter->msix_entries)
1567 ew32(EIAC_82574, 0);
1293 e1e_flush(); 1568 e1e_flush();
1294 synchronize_irq(adapter->pdev->irq); 1569 synchronize_irq(adapter->pdev->irq);
1295} 1570}
@@ -1301,7 +1576,12 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
1301{ 1576{
1302 struct e1000_hw *hw = &adapter->hw; 1577 struct e1000_hw *hw = &adapter->hw;
1303 1578
1304 ew32(IMS, IMS_ENABLE_MASK); 1579 if (adapter->msix_entries) {
1580 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
1581 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
1582 } else {
1583 ew32(IMS, IMS_ENABLE_MASK);
1584 }
1305 e1e_flush(); 1585 e1e_flush();
1306} 1586}
1307 1587
@@ -1551,9 +1831,8 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1551 * traffic pattern. Constants in this function were computed 1831 * traffic pattern. Constants in this function were computed
1552 * based on theoretical maximum wire speed and thresholds were set based 1832 * based on theoretical maximum wire speed and thresholds were set based
1553 * on testing data as well as attempting to minimize response time 1833 * on testing data as well as attempting to minimize response time
1554 * while increasing bulk throughput. 1834 * while increasing bulk throughput. This functionality is controlled
1555 * this functionality is controlled by the InterruptThrottleRate module 1835 * by the InterruptThrottleRate module parameter.
1556 * parameter (see e1000_param.c)
1557 **/ 1836 **/
1558static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 1837static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
1559 u16 itr_setting, int packets, 1838 u16 itr_setting, int packets,
@@ -1661,11 +1940,37 @@ set_itr_now:
1661 min(adapter->itr + (new_itr >> 2), new_itr) : 1940 min(adapter->itr + (new_itr >> 2), new_itr) :
1662 new_itr; 1941 new_itr;
1663 adapter->itr = new_itr; 1942 adapter->itr = new_itr;
1664 ew32(ITR, 1000000000 / (new_itr * 256)); 1943 adapter->rx_ring->itr_val = new_itr;
1944 if (adapter->msix_entries)
1945 adapter->rx_ring->set_itr = 1;
1946 else
1947 ew32(ITR, 1000000000 / (new_itr * 256));
1665 } 1948 }
1666} 1949}
1667 1950
1668/** 1951/**
1952 * e1000_alloc_queues - Allocate memory for all rings
1953 * @adapter: board private structure to initialize
1954 **/
1955static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1956{
1957 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1958 if (!adapter->tx_ring)
1959 goto err;
1960
1961 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
1962 if (!adapter->rx_ring)
1963 goto err;
1964
1965 return 0;
1966err:
1967 e_err("Unable to allocate memory for queues\n");
1968 kfree(adapter->rx_ring);
1969 kfree(adapter->tx_ring);
1970 return -ENOMEM;
1971}
1972
1973/**
1669 * e1000_clean - NAPI Rx polling callback 1974 * e1000_clean - NAPI Rx polling callback
1670 * @napi: struct associated with this polling callback 1975 * @napi: struct associated with this polling callback
1671 * @budget: amount of packets driver is allowed to process this poll 1976 * @budget: amount of packets driver is allowed to process this poll
@@ -1673,12 +1978,17 @@ set_itr_now:
1673static int e1000_clean(struct napi_struct *napi, int budget) 1978static int e1000_clean(struct napi_struct *napi, int budget)
1674{ 1979{
1675 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 1980 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
1981 struct e1000_hw *hw = &adapter->hw;
1676 struct net_device *poll_dev = adapter->netdev; 1982 struct net_device *poll_dev = adapter->netdev;
1677 int tx_cleaned = 0, work_done = 0; 1983 int tx_cleaned = 0, work_done = 0;
1678 1984
1679 /* Must NOT use netdev_priv macro here. */ 1985 /* Must NOT use netdev_priv macro here. */
1680 adapter = poll_dev->priv; 1986 adapter = poll_dev->priv;
1681 1987
1988 if (adapter->msix_entries &&
1989 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
1990 goto clean_rx;
1991
1682 /* 1992 /*
1683 * e1000_clean is called per-cpu. This lock protects 1993 * e1000_clean is called per-cpu. This lock protects
1684 * tx_ring from being cleaned by multiple cpus 1994 * tx_ring from being cleaned by multiple cpus
@@ -1690,6 +2000,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
1690 spin_unlock(&adapter->tx_queue_lock); 2000 spin_unlock(&adapter->tx_queue_lock);
1691 } 2001 }
1692 2002
2003clean_rx:
1693 adapter->clean_rx(adapter, &work_done, budget); 2004 adapter->clean_rx(adapter, &work_done, budget);
1694 2005
1695 if (tx_cleaned) 2006 if (tx_cleaned)
@@ -1700,7 +2011,10 @@ static int e1000_clean(struct napi_struct *napi, int budget)
1700 if (adapter->itr_setting & 3) 2011 if (adapter->itr_setting & 3)
1701 e1000_set_itr(adapter); 2012 e1000_set_itr(adapter);
1702 netif_rx_complete(poll_dev, napi); 2013 netif_rx_complete(poll_dev, napi);
1703 e1000_irq_enable(adapter); 2014 if (adapter->msix_entries)
2015 ew32(IMS, adapter->rx_ring->ims_val);
2016 else
2017 e1000_irq_enable(adapter);
1704 } 2018 }
1705 2019
1706 return work_done; 2020 return work_done;
@@ -2496,6 +2810,8 @@ int e1000e_up(struct e1000_adapter *adapter)
2496 clear_bit(__E1000_DOWN, &adapter->state); 2810 clear_bit(__E1000_DOWN, &adapter->state);
2497 2811
2498 napi_enable(&adapter->napi); 2812 napi_enable(&adapter->napi);
2813 if (adapter->msix_entries)
2814 e1000_configure_msix(adapter);
2499 e1000_irq_enable(adapter); 2815 e1000_irq_enable(adapter);
2500 2816
2501 /* fire a link change interrupt to start the watchdog */ 2817 /* fire a link change interrupt to start the watchdog */
@@ -2579,13 +2895,10 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2579 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2895 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2580 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2896 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2581 2897
2582 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 2898 e1000e_set_interrupt_capability(adapter);
2583 if (!adapter->tx_ring)
2584 goto err;
2585 2899
2586 adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 2900 if (e1000_alloc_queues(adapter))
2587 if (!adapter->rx_ring) 2901 return -ENOMEM;
2588 goto err;
2589 2902
2590 spin_lock_init(&adapter->tx_queue_lock); 2903 spin_lock_init(&adapter->tx_queue_lock);
2591 2904
@@ -2596,12 +2909,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2596 2909
2597 set_bit(__E1000_DOWN, &adapter->state); 2910 set_bit(__E1000_DOWN, &adapter->state);
2598 return 0; 2911 return 0;
2599
2600err:
2601 e_err("Unable to allocate memory for queues\n");
2602 kfree(adapter->rx_ring);
2603 kfree(adapter->tx_ring);
2604 return -ENOMEM;
2605} 2912}
2606 2913
2607/** 2914/**
@@ -2643,6 +2950,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2643 2950
2644 /* free the real vector and request a test handler */ 2951 /* free the real vector and request a test handler */
2645 e1000_free_irq(adapter); 2952 e1000_free_irq(adapter);
2953 e1000e_reset_interrupt_capability(adapter);
2646 2954
2647 /* Assume that the test fails, if it succeeds then the test 2955 /* Assume that the test fails, if it succeeds then the test
2648 * MSI irq handler will unset this flag */ 2956 * MSI irq handler will unset this flag */
@@ -2673,6 +2981,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2673 rmb(); 2981 rmb();
2674 2982
2675 if (adapter->flags & FLAG_MSI_TEST_FAILED) { 2983 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
2984 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2676 err = -EIO; 2985 err = -EIO;
2677 e_info("MSI interrupt test failed!\n"); 2986 e_info("MSI interrupt test failed!\n");
2678 } 2987 }
@@ -2686,7 +2995,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
2686 /* okay so the test worked, restore settings */ 2995 /* okay so the test worked, restore settings */
2687 e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); 2996 e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name);
2688msi_test_failed: 2997msi_test_failed:
2689 /* restore the original vector, even if it failed */ 2998 e1000e_set_interrupt_capability(adapter);
2690 e1000_request_irq(adapter); 2999 e1000_request_irq(adapter);
2691 return err; 3000 return err;
2692} 3001}
@@ -2796,7 +3105,7 @@ static int e1000_open(struct net_device *netdev)
2796 * ignore e1000e MSI messages, which means we need to test our MSI 3105 * ignore e1000e MSI messages, which means we need to test our MSI
2797 * interrupt now 3106 * interrupt now
2798 */ 3107 */
2799 { 3108 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
2800 err = e1000_test_msi(adapter); 3109 err = e1000_test_msi(adapter);
2801 if (err) { 3110 if (err) {
2802 e_err("Interrupt allocation failed\n"); 3111 e_err("Interrupt allocation failed\n");
@@ -2988,7 +3297,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2988 3297
2989 adapter->stats.algnerrc += er32(ALGNERRC); 3298 adapter->stats.algnerrc += er32(ALGNERRC);
2990 adapter->stats.rxerrc += er32(RXERRC); 3299 adapter->stats.rxerrc += er32(RXERRC);
2991 adapter->stats.tncrs += er32(TNCRS); 3300 if (hw->mac.type != e1000_82574)
3301 adapter->stats.tncrs += er32(TNCRS);
2992 adapter->stats.cexterr += er32(CEXTERR); 3302 adapter->stats.cexterr += er32(CEXTERR);
2993 adapter->stats.tsctc += er32(TSCTC); 3303 adapter->stats.tsctc += er32(TSCTC);
2994 adapter->stats.tsctfc += er32(TSCTFC); 3304 adapter->stats.tsctfc += er32(TSCTFC);
@@ -3201,6 +3511,27 @@ static void e1000_watchdog_task(struct work_struct *work)
3201 &adapter->link_duplex); 3511 &adapter->link_duplex);
3202 e1000_print_link_info(adapter); 3512 e1000_print_link_info(adapter);
3203 /* 3513 /*
3514 * On supported PHYs, check for duplex mismatch only
3515 * if link has autonegotiated at 10/100 half
3516 */
3517 if ((hw->phy.type == e1000_phy_igp_3 ||
3518 hw->phy.type == e1000_phy_bm) &&
3519 (hw->mac.autoneg == true) &&
3520 (adapter->link_speed == SPEED_10 ||
3521 adapter->link_speed == SPEED_100) &&
3522 (adapter->link_duplex == HALF_DUPLEX)) {
3523 u16 autoneg_exp;
3524
3525 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
3526
3527 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
3528 e_info("Autonegotiated half duplex but"
3529 " link partner cannot autoneg. "
3530 " Try forcing full duplex if "
3531 "link gets many collisions.\n");
3532 }
3533
3534 /*
3204 * tweak tx_queue_len according to speed/duplex 3535 * tweak tx_queue_len according to speed/duplex
3205 * and adjust the timeout factor 3536 * and adjust the timeout factor
3206 */ 3537 */
@@ -3315,7 +3646,10 @@ link_up:
3315 } 3646 }
3316 3647
3317 /* Cause software interrupt to ensure Rx ring is cleaned */ 3648 /* Cause software interrupt to ensure Rx ring is cleaned */
3318 ew32(ICS, E1000_ICS_RXDMT0); 3649 if (adapter->msix_entries)
3650 ew32(ICS, adapter->rx_ring->ims_val);
3651 else
3652 ew32(ICS, E1000_ICS_RXDMT0);
3319 3653
3320 /* Force detection of hung controller every watchdog period */ 3654 /* Force detection of hung controller every watchdog period */
3321 adapter->detect_tx_hung = 1; 3655 adapter->detect_tx_hung = 1;
@@ -4032,6 +4366,7 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4032 e1000e_down(adapter); 4366 e1000e_down(adapter);
4033 e1000_free_irq(adapter); 4367 e1000_free_irq(adapter);
4034 } 4368 }
4369 e1000e_reset_interrupt_capability(adapter);
4035 4370
4036 retval = pci_save_state(pdev); 4371 retval = pci_save_state(pdev);
4037 if (retval) 4372 if (retval)
@@ -4158,6 +4493,7 @@ static int e1000_resume(struct pci_dev *pdev)
4158 pci_enable_wake(pdev, PCI_D3hot, 0); 4493 pci_enable_wake(pdev, PCI_D3hot, 0);
4159 pci_enable_wake(pdev, PCI_D3cold, 0); 4494 pci_enable_wake(pdev, PCI_D3cold, 0);
4160 4495
4496 e1000e_set_interrupt_capability(adapter);
4161 if (netif_running(netdev)) { 4497 if (netif_running(netdev)) {
4162 err = e1000_request_irq(adapter); 4498 err = e1000_request_irq(adapter);
4163 if (err) 4499 if (err)
@@ -4335,13 +4671,15 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4335 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 4671 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
4336 if (!(le16_to_cpu(buf) & (1 << 0))) { 4672 if (!(le16_to_cpu(buf) & (1 << 0))) {
4337 /* Deep Smart Power Down (DSPD) */ 4673 /* Deep Smart Power Down (DSPD) */
4338 e_warn("Warning: detected DSPD enabled in EEPROM\n"); 4674 dev_warn(&adapter->pdev->dev,
4675 "Warning: detected DSPD enabled in EEPROM\n");
4339 } 4676 }
4340 4677
4341 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); 4678 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4342 if (le16_to_cpu(buf) & (3 << 2)) { 4679 if (le16_to_cpu(buf) & (3 << 2)) {
4343 /* ASPM enable */ 4680 /* ASPM enable */
4344 e_warn("Warning: detected ASPM enabled in EEPROM\n"); 4681 dev_warn(&adapter->pdev->dev,
4682 "Warning: detected ASPM enabled in EEPROM\n");
4345 } 4683 }
4346} 4684}
4347 4685
@@ -4467,6 +4805,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4467 4805
4468 adapter->bd_number = cards_found++; 4806 adapter->bd_number = cards_found++;
4469 4807
4808 e1000e_check_options(adapter);
4809
4470 /* setup adapter struct */ 4810 /* setup adapter struct */
4471 err = e1000_sw_init(adapter); 4811 err = e1000_sw_init(adapter);
4472 if (err) 4812 if (err)
@@ -4573,8 +4913,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4573 INIT_WORK(&adapter->reset_task, e1000_reset_task); 4913 INIT_WORK(&adapter->reset_task, e1000_reset_task);
4574 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 4914 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
4575 4915
4576 e1000e_check_options(adapter);
4577
4578 /* Initialize link parameters. User can change them with ethtool */ 4916 /* Initialize link parameters. User can change them with ethtool */
4579 adapter->hw.mac.autoneg = 1; 4917 adapter->hw.mac.autoneg = 1;
4580 adapter->fc_autoneg = 1; 4918 adapter->fc_autoneg = 1;
@@ -4704,6 +5042,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
4704 if (!e1000_check_reset_block(&adapter->hw)) 5042 if (!e1000_check_reset_block(&adapter->hw))
4705 e1000_phy_hw_reset(&adapter->hw); 5043 e1000_phy_hw_reset(&adapter->hw);
4706 5044
5045 e1000e_reset_interrupt_capability(adapter);
4707 kfree(adapter->tx_ring); 5046 kfree(adapter->tx_ring);
4708 kfree(adapter->rx_ring); 5047 kfree(adapter->rx_ring);
4709 5048
@@ -4745,6 +5084,8 @@ static struct pci_device_id e1000_pci_tbl[] = {
4745 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 5084 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
4746 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 5085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
4747 5086
5087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
5088
4748 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 5089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
4749 board_80003es2lan }, 5090 board_80003es2lan },
4750 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 5091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
@@ -4767,6 +5108,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
4767 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 5108 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
4768 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, 5109 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
4769 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, 5110 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
5111 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
4770 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, 5112 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
4771 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, 5113 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
4772 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, 5114 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
@@ -4775,6 +5117,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
4775 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, 5117 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
4776 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, 5118 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
4777 5119
5120 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5121 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5122
4778 { } /* terminate list */ 5123 { } /* terminate list */
4779}; 5124};
4780MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 5125MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index ed912e023a72..f46db6cda487 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -114,6 +114,15 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
114#define DEFAULT_ITR 3 114#define DEFAULT_ITR 3
115#define MAX_ITR 100000 115#define MAX_ITR 100000
116#define MIN_ITR 100 116#define MIN_ITR 100
117/* IntMode (Interrupt Mode)
118 *
119 * Valid Range: 0 - 2
120 *
121 * Default Value: 2 (MSI-X)
122 */
123E1000_PARAM(IntMode, "Interrupt Mode");
124#define MAX_INTMODE 2
125#define MIN_INTMODE 0
117 126
118/* 127/*
119 * Enable Smart Power Down of the PHY 128 * Enable Smart Power Down of the PHY
@@ -352,6 +361,24 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
352 adapter->itr = 20000; 361 adapter->itr = 20000;
353 } 362 }
354 } 363 }
364 { /* Interrupt Mode */
365 struct e1000_option opt = {
366 .type = range_option,
367 .name = "Interrupt Mode",
368 .err = "defaulting to 2 (MSI-X)",
369 .def = E1000E_INT_MODE_MSIX,
370 .arg = { .r = { .min = MIN_INTMODE,
371 .max = MAX_INTMODE } }
372 };
373
374 if (num_IntMode > bd) {
375 unsigned int int_mode = IntMode[bd];
376 e1000_validate_option(&int_mode, &opt, adapter);
377 adapter->int_mode = int_mode;
378 } else {
379 adapter->int_mode = opt.def;
380 }
381 }
355 { /* Smart Power Down */ 382 { /* Smart Power Down */
356 const struct e1000_option opt = { 383 const struct e1000_option opt = {
357 .type = enable_option, 384 .type = enable_option,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index b133dcf0e950..6cd333ae61d0 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -476,7 +476,9 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
476 if (ret_val) 476 if (ret_val)
477 return ret_val; 477 return ret_val;
478 478
479 if ((phy->type == e1000_phy_m88) && (phy->revision < 4)) { 479 if ((phy->type == e1000_phy_m88) &&
480 (phy->revision < E1000_REVISION_4) &&
481 (phy->id != BME1000_E_PHY_ID_R2)) {
480 /* 482 /*
481 * Force TX_CLK in the Extended PHY Specific Control Register 483 * Force TX_CLK in the Extended PHY Specific Control Register
482 * to 25MHz clock. 484 * to 25MHz clock.
@@ -504,6 +506,18 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
504 return ret_val; 506 return ret_val;
505 } 507 }
506 508
509 if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
510 /* Set PHY page 0, register 29 to 0x0003 */
511 ret_val = e1e_wphy(hw, 29, 0x0003);
512 if (ret_val)
513 return ret_val;
514
515 /* Set PHY page 0, register 30 to 0x0000 */
516 ret_val = e1e_wphy(hw, 30, 0x0000);
517 if (ret_val)
518 return ret_val;
519 }
520
507 /* Commit the changes. */ 521 /* Commit the changes. */
508 ret_val = e1000e_commit_phy(hw); 522 ret_val = e1000e_commit_phy(hw);
509 if (ret_val) 523 if (ret_val)
@@ -1720,6 +1734,91 @@ s32 e1000e_get_cfg_done(struct e1000_hw *hw)
1720 return 0; 1734 return 0;
1721} 1735}
1722 1736
1737/**
1738 * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY
1739 * @hw: pointer to the HW structure
1740 *
1741 * Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
1742 **/
1743s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
1744{
1745 hw_dbg(hw, "Running IGP 3 PHY init script\n");
1746
1747 /* PHY init IGP 3 */
1748 /* Enable rise/fall, 10-mode work in class-A */
1749 e1e_wphy(hw, 0x2F5B, 0x9018);
1750 /* Remove all caps from Replica path filter */
1751 e1e_wphy(hw, 0x2F52, 0x0000);
1752 /* Bias trimming for ADC, AFE and Driver (Default) */
1753 e1e_wphy(hw, 0x2FB1, 0x8B24);
1754 /* Increase Hybrid poly bias */
1755 e1e_wphy(hw, 0x2FB2, 0xF8F0);
1756 /* Add 4% to Tx amplitude in Gig mode */
1757 e1e_wphy(hw, 0x2010, 0x10B0);
1758 /* Disable trimming (TTT) */
1759 e1e_wphy(hw, 0x2011, 0x0000);
1760 /* Poly DC correction to 94.6% + 2% for all channels */
1761 e1e_wphy(hw, 0x20DD, 0x249A);
1762 /* ABS DC correction to 95.9% */
1763 e1e_wphy(hw, 0x20DE, 0x00D3);
1764 /* BG temp curve trim */
1765 e1e_wphy(hw, 0x28B4, 0x04CE);
1766 /* Increasing ADC OPAMP stage 1 currents to max */
1767 e1e_wphy(hw, 0x2F70, 0x29E4);
1768 /* Force 1000 ( required for enabling PHY regs configuration) */
1769 e1e_wphy(hw, 0x0000, 0x0140);
1770 /* Set upd_freq to 6 */
1771 e1e_wphy(hw, 0x1F30, 0x1606);
1772 /* Disable NPDFE */
1773 e1e_wphy(hw, 0x1F31, 0xB814);
1774 /* Disable adaptive fixed FFE (Default) */
1775 e1e_wphy(hw, 0x1F35, 0x002A);
1776 /* Enable FFE hysteresis */
1777 e1e_wphy(hw, 0x1F3E, 0x0067);
1778 /* Fixed FFE for short cable lengths */
1779 e1e_wphy(hw, 0x1F54, 0x0065);
1780 /* Fixed FFE for medium cable lengths */
1781 e1e_wphy(hw, 0x1F55, 0x002A);
1782 /* Fixed FFE for long cable lengths */
1783 e1e_wphy(hw, 0x1F56, 0x002A);
1784 /* Enable Adaptive Clip Threshold */
1785 e1e_wphy(hw, 0x1F72, 0x3FB0);
1786 /* AHT reset limit to 1 */
1787 e1e_wphy(hw, 0x1F76, 0xC0FF);
1788 /* Set AHT master delay to 127 msec */
1789 e1e_wphy(hw, 0x1F77, 0x1DEC);
1790 /* Set scan bits for AHT */
1791 e1e_wphy(hw, 0x1F78, 0xF9EF);
1792 /* Set AHT Preset bits */
1793 e1e_wphy(hw, 0x1F79, 0x0210);
1794 /* Change integ_factor of channel A to 3 */
1795 e1e_wphy(hw, 0x1895, 0x0003);
1796 /* Change prop_factor of channels BCD to 8 */
1797 e1e_wphy(hw, 0x1796, 0x0008);
1798 /* Change cg_icount + enable integbp for channels BCD */
1799 e1e_wphy(hw, 0x1798, 0xD008);
1800 /*
1801 * Change cg_icount + enable integbp + change prop_factor_master
1802 * to 8 for channel A
1803 */
1804 e1e_wphy(hw, 0x1898, 0xD918);
1805 /* Disable AHT in Slave mode on channel A */
1806 e1e_wphy(hw, 0x187A, 0x0800);
1807 /*
1808 * Enable LPLU and disable AN to 1000 in non-D0a states,
1809 * Enable SPD+B2B
1810 */
1811 e1e_wphy(hw, 0x0019, 0x008D);
1812 /* Enable restart AN on an1000_dis change */
1813 e1e_wphy(hw, 0x001B, 0x2080);
1814 /* Enable wh_fifo read clock in 10/100 modes */
1815 e1e_wphy(hw, 0x0014, 0x0045);
1816 /* Restart AN, Speed selection is 1000 */
1817 e1e_wphy(hw, 0x0000, 0x1340);
1818
1819 return 0;
1820}
1821
1723/* Internal function pointers */ 1822/* Internal function pointers */
1724 1823
1725/** 1824/**
@@ -1969,6 +2068,99 @@ out:
1969} 2068}
1970 2069
1971/** 2070/**
2071 * e1000e_read_phy_reg_bm2 - Read BM PHY register
2072 * @hw: pointer to the HW structure
2073 * @offset: register offset to be read
2074 * @data: pointer to the read data
2075 *
2076 * Acquires semaphore, if necessary, then reads the PHY register at offset
2077 * and storing the retrieved information in data. Release any acquired
2078 * semaphores before exiting.
2079 **/
2080s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2081{
2082 s32 ret_val;
2083 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2084
2085 /* Page 800 works differently than the rest so it has its own func */
2086 if (page == BM_WUC_PAGE) {
2087 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
2088 true);
2089 return ret_val;
2090 }
2091
2092 ret_val = hw->phy.ops.acquire_phy(hw);
2093 if (ret_val)
2094 return ret_val;
2095
2096 hw->phy.addr = 1;
2097
2098 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2099
2100 /* Page is shifted left, PHY expects (page x 32) */
2101 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2102 page);
2103
2104 if (ret_val) {
2105 hw->phy.ops.release_phy(hw);
2106 return ret_val;
2107 }
2108 }
2109
2110 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2111 data);
2112 hw->phy.ops.release_phy(hw);
2113
2114 return ret_val;
2115}
2116
2117/**
2118 * e1000e_write_phy_reg_bm2 - Write BM PHY register
2119 * @hw: pointer to the HW structure
2120 * @offset: register offset to write to
2121 * @data: data to write at register offset
2122 *
2123 * Acquires semaphore, if necessary, then writes the data to PHY register
2124 * at the offset. Release any acquired semaphores before exiting.
2125 **/
2126s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2127{
2128 s32 ret_val;
2129 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2130
2131 /* Page 800 works differently than the rest so it has its own func */
2132 if (page == BM_WUC_PAGE) {
2133 ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
2134 false);
2135 return ret_val;
2136 }
2137
2138 ret_val = hw->phy.ops.acquire_phy(hw);
2139 if (ret_val)
2140 return ret_val;
2141
2142 hw->phy.addr = 1;
2143
2144 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2145 /* Page is shifted left, PHY expects (page x 32) */
2146 ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
2147 page);
2148
2149 if (ret_val) {
2150 hw->phy.ops.release_phy(hw);
2151 return ret_val;
2152 }
2153 }
2154
2155 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2156 data);
2157
2158 hw->phy.ops.release_phy(hw);
2159
2160 return ret_val;
2161}
2162
2163/**
1972 * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register 2164 * e1000_access_phy_wakeup_reg_bm - Read BM PHY wakeup register
1973 * @hw: pointer to the HW structure 2165 * @hw: pointer to the HW structure
1974 * @offset: register offset to be read or written 2166 * @offset: register offset to be read or written
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index e01926b7b5b7..5524271eedca 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,13 +40,13 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0092" 43#define DRV_VERSION "EHEA_0093"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
47#define DLPAR_MEM_ADD 2 47#define DLPAR_MEM_ADD 2
48#define DLPAR_MEM_REM 4 48#define DLPAR_MEM_REM 4
49#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD) 49#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
50 50
51#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 51#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 156eb6320b4e..2a33a613d9e6 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -535,7 +535,7 @@ u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
535 cb_logaddr, /* R5 */ 535 cb_logaddr, /* R5 */
536 0, 0, 0, 0, 0); /* R6-R10 */ 536 0, 0, 0, 0, 0); /* R6-R10 */
537#ifdef DEBUG 537#ifdef DEBUG
538 ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea"); 538 ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
539#endif 539#endif
540 return hret; 540 return hret;
541} 541}
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 140f05baafd8..db8a9257e680 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -595,7 +595,8 @@ static int ehea_create_busmap_callback(unsigned long pfn,
595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); 595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
596 mr_len = *(unsigned long *)arg; 596 mr_len = *(unsigned long *)arg;
597 597
598 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); 598 if (!ehea_bmap)
599 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
599 if (!ehea_bmap) 600 if (!ehea_bmap)
600 return -ENOMEM; 601 return -ENOMEM;
601 602
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index aa0bf6e1c694..e1b441effbbe 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -110,7 +110,7 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
110 } 110 }
111 if (ret && netif_msg_drv(priv)) 111 if (ret && netif_msg_drv(priv))
112 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 112 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
113 __FUNCTION__, ret); 113 __func__, ret);
114 114
115 return ret; 115 return ret;
116} 116}
@@ -131,7 +131,7 @@ static int spi_write_buf(struct enc28j60_net *priv, int len,
131 ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1); 131 ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
132 if (ret && netif_msg_drv(priv)) 132 if (ret && netif_msg_drv(priv))
133 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 133 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
134 __FUNCTION__, ret); 134 __func__, ret);
135 } 135 }
136 return ret; 136 return ret;
137} 137}
@@ -156,7 +156,7 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
156 ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen); 156 ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
157 if (ret) 157 if (ret)
158 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 158 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
159 __FUNCTION__, ret); 159 __func__, ret);
160 else 160 else
161 val = rx_buf[slen - 1]; 161 val = rx_buf[slen - 1];
162 162
@@ -176,14 +176,14 @@ static int spi_write_op(struct enc28j60_net *priv, u8 op,
176 ret = spi_write(priv->spi, priv->spi_transfer_buf, 2); 176 ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
177 if (ret && netif_msg_drv(priv)) 177 if (ret && netif_msg_drv(priv))
178 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 178 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
179 __FUNCTION__, ret); 179 __func__, ret);
180 return ret; 180 return ret;
181} 181}
182 182
183static void enc28j60_soft_reset(struct enc28j60_net *priv) 183static void enc28j60_soft_reset(struct enc28j60_net *priv)
184{ 184{
185 if (netif_msg_hw(priv)) 185 if (netif_msg_hw(priv))
186 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 186 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
187 187
188 spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET); 188 spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
189 /* Errata workaround #1, CLKRDY check is unreliable, 189 /* Errata workaround #1, CLKRDY check is unreliable,
@@ -357,7 +357,7 @@ static void enc28j60_mem_read(struct enc28j60_net *priv,
357 reg = nolock_regw_read(priv, ERDPTL); 357 reg = nolock_regw_read(priv, ERDPTL);
358 if (reg != addr) 358 if (reg != addr)
359 printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT " 359 printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT "
360 "(0x%04x - 0x%04x)\n", __FUNCTION__, reg, addr); 360 "(0x%04x - 0x%04x)\n", __func__, reg, addr);
361 } 361 }
362#endif 362#endif
363 spi_read_buf(priv, len, data); 363 spi_read_buf(priv, len, data);
@@ -380,7 +380,7 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
380 if (reg != TXSTART_INIT) 380 if (reg != TXSTART_INIT)
381 printk(KERN_DEBUG DRV_NAME 381 printk(KERN_DEBUG DRV_NAME
382 ": %s() ERWPT:0x%04x != 0x%04x\n", 382 ": %s() ERWPT:0x%04x != 0x%04x\n",
383 __FUNCTION__, reg, TXSTART_INIT); 383 __func__, reg, TXSTART_INIT);
384 } 384 }
385#endif 385#endif
386 /* Set the TXND pointer to correspond to the packet size given */ 386 /* Set the TXND pointer to correspond to the packet size given */
@@ -390,13 +390,13 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
390 if (netif_msg_hw(priv)) 390 if (netif_msg_hw(priv))
391 printk(KERN_DEBUG DRV_NAME 391 printk(KERN_DEBUG DRV_NAME
392 ": %s() after control byte ERWPT:0x%04x\n", 392 ": %s() after control byte ERWPT:0x%04x\n",
393 __FUNCTION__, nolock_regw_read(priv, EWRPTL)); 393 __func__, nolock_regw_read(priv, EWRPTL));
394 /* copy the packet into the transmit buffer */ 394 /* copy the packet into the transmit buffer */
395 spi_write_buf(priv, len, data); 395 spi_write_buf(priv, len, data);
396 if (netif_msg_hw(priv)) 396 if (netif_msg_hw(priv))
397 printk(KERN_DEBUG DRV_NAME 397 printk(KERN_DEBUG DRV_NAME
398 ": %s() after write packet ERWPT:0x%04x, len=%d\n", 398 ": %s() after write packet ERWPT:0x%04x, len=%d\n",
399 __FUNCTION__, nolock_regw_read(priv, EWRPTL), len); 399 __func__, nolock_regw_read(priv, EWRPTL), len);
400 mutex_unlock(&priv->lock); 400 mutex_unlock(&priv->lock);
401} 401}
402 402
@@ -495,7 +495,7 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
495 if (netif_msg_drv(priv)) 495 if (netif_msg_drv(priv))
496 printk(KERN_DEBUG DRV_NAME 496 printk(KERN_DEBUG DRV_NAME
497 ": %s() Hardware must be disabled to set " 497 ": %s() Hardware must be disabled to set "
498 "Mac address\n", __FUNCTION__); 498 "Mac address\n", __func__);
499 ret = -EBUSY; 499 ret = -EBUSY;
500 } 500 }
501 mutex_unlock(&priv->lock); 501 mutex_unlock(&priv->lock);
@@ -575,7 +575,7 @@ static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
575 if (start > 0x1FFF || end > 0x1FFF || start > end) { 575 if (start > 0x1FFF || end > 0x1FFF || start > end) {
576 if (netif_msg_drv(priv)) 576 if (netif_msg_drv(priv))
577 printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO " 577 printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO "
578 "bad parameters!\n", __FUNCTION__, start, end); 578 "bad parameters!\n", __func__, start, end);
579 return; 579 return;
580 } 580 }
581 /* set receive buffer start + end */ 581 /* set receive buffer start + end */
@@ -591,7 +591,7 @@ static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
591 if (start > 0x1FFF || end > 0x1FFF || start > end) { 591 if (start > 0x1FFF || end > 0x1FFF || start > end) {
592 if (netif_msg_drv(priv)) 592 if (netif_msg_drv(priv))
593 printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO " 593 printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO "
594 "bad parameters!\n", __FUNCTION__, start, end); 594 "bad parameters!\n", __func__, start, end);
595 return; 595 return;
596 } 596 }
597 /* set transmit buffer start + end */ 597 /* set transmit buffer start + end */
@@ -630,7 +630,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
630 u8 reg; 630 u8 reg;
631 631
632 if (netif_msg_drv(priv)) 632 if (netif_msg_drv(priv))
633 printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __FUNCTION__, 633 printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__,
634 priv->full_duplex ? "FullDuplex" : "HalfDuplex"); 634 priv->full_duplex ? "FullDuplex" : "HalfDuplex");
635 635
636 mutex_lock(&priv->lock); 636 mutex_lock(&priv->lock);
@@ -661,7 +661,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
661 if (reg == 0x00 || reg == 0xff) { 661 if (reg == 0x00 || reg == 0xff) {
662 if (netif_msg_drv(priv)) 662 if (netif_msg_drv(priv))
663 printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n", 663 printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n",
664 __FUNCTION__, reg); 664 __func__, reg);
665 return 0; 665 return 0;
666 } 666 }
667 667
@@ -724,7 +724,7 @@ static void enc28j60_hw_enable(struct enc28j60_net *priv)
724 /* enable interrupts */ 724 /* enable interrupts */
725 if (netif_msg_hw(priv)) 725 if (netif_msg_hw(priv))
726 printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", 726 printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n",
727 __FUNCTION__); 727 __func__);
728 728
729 enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE); 729 enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
730 730
@@ -888,7 +888,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
888 if (netif_msg_rx_err(priv)) 888 if (netif_msg_rx_err(priv))
889 dev_err(&ndev->dev, 889 dev_err(&ndev->dev,
890 "%s() Invalid packet address!! 0x%04x\n", 890 "%s() Invalid packet address!! 0x%04x\n",
891 __FUNCTION__, priv->next_pk_ptr); 891 __func__, priv->next_pk_ptr);
892 /* packet address corrupted: reset RX logic */ 892 /* packet address corrupted: reset RX logic */
893 mutex_lock(&priv->lock); 893 mutex_lock(&priv->lock);
894 nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); 894 nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
@@ -917,7 +917,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
917 rxstat |= rsv[4]; 917 rxstat |= rsv[4];
918 918
919 if (netif_msg_rx_status(priv)) 919 if (netif_msg_rx_status(priv))
920 enc28j60_dump_rsv(priv, __FUNCTION__, next_packet, len, rxstat); 920 enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat);
921 921
922 if (!RSV_GETBIT(rxstat, RSV_RXOK)) { 922 if (!RSV_GETBIT(rxstat, RSV_RXOK)) {
923 if (netif_msg_rx_err(priv)) 923 if (netif_msg_rx_err(priv))
@@ -941,7 +941,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv),
942 len, skb_put(skb, len)); 942 len, skb_put(skb, len));
943 if (netif_msg_pktdata(priv)) 943 if (netif_msg_pktdata(priv))
944 dump_packet(__FUNCTION__, skb->len, skb->data); 944 dump_packet(__func__, skb->len, skb->data);
945 skb->protocol = eth_type_trans(skb, ndev); 945 skb->protocol = eth_type_trans(skb, ndev);
946 /* update statistics */ 946 /* update statistics */
947 ndev->stats.rx_packets++; 947 ndev->stats.rx_packets++;
@@ -958,7 +958,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
958 erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT); 958 erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
959 if (netif_msg_hw(priv)) 959 if (netif_msg_hw(priv))
960 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n", 960 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n",
961 __FUNCTION__, erxrdpt); 961 __func__, erxrdpt);
962 962
963 mutex_lock(&priv->lock); 963 mutex_lock(&priv->lock);
964 nolock_regw_write(priv, ERXRDPTL, erxrdpt); 964 nolock_regw_write(priv, ERXRDPTL, erxrdpt);
@@ -968,7 +968,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
968 reg = nolock_regw_read(priv, ERXRDPTL); 968 reg = nolock_regw_read(priv, ERXRDPTL);
969 if (reg != erxrdpt) 969 if (reg != erxrdpt)
970 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify " 970 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify "
971 "error (0x%04x - 0x%04x)\n", __FUNCTION__, 971 "error (0x%04x - 0x%04x)\n", __func__,
972 reg, erxrdpt); 972 reg, erxrdpt);
973 } 973 }
974#endif 974#endif
@@ -1006,7 +1006,7 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
1006 mutex_unlock(&priv->lock); 1006 mutex_unlock(&priv->lock);
1007 if (netif_msg_rx_status(priv)) 1007 if (netif_msg_rx_status(priv))
1008 printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n", 1008 printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n",
1009 __FUNCTION__, free_space); 1009 __func__, free_space);
1010 return free_space; 1010 return free_space;
1011} 1011}
1012 1012
@@ -1022,7 +1022,7 @@ static void enc28j60_check_link_status(struct net_device *ndev)
1022 reg = enc28j60_phy_read(priv, PHSTAT2); 1022 reg = enc28j60_phy_read(priv, PHSTAT2);
1023 if (netif_msg_hw(priv)) 1023 if (netif_msg_hw(priv))
1024 printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, " 1024 printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, "
1025 "PHSTAT2: %04x\n", __FUNCTION__, 1025 "PHSTAT2: %04x\n", __func__,
1026 enc28j60_phy_read(priv, PHSTAT1), reg); 1026 enc28j60_phy_read(priv, PHSTAT1), reg);
1027 duplex = reg & PHSTAT2_DPXSTAT; 1027 duplex = reg & PHSTAT2_DPXSTAT;
1028 1028
@@ -1095,7 +1095,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1095 int intflags, loop; 1095 int intflags, loop;
1096 1096
1097 if (netif_msg_intr(priv)) 1097 if (netif_msg_intr(priv))
1098 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1098 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1099 /* disable further interrupts */ 1099 /* disable further interrupts */
1100 locked_reg_bfclr(priv, EIE, EIE_INTIE); 1100 locked_reg_bfclr(priv, EIE, EIE_INTIE);
1101 1101
@@ -1198,7 +1198,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1198 /* re-enable interrupts */ 1198 /* re-enable interrupts */
1199 locked_reg_bfset(priv, EIE, EIE_INTIE); 1199 locked_reg_bfset(priv, EIE, EIE_INTIE);
1200 if (netif_msg_intr(priv)) 1200 if (netif_msg_intr(priv))
1201 printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __FUNCTION__); 1201 printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__);
1202} 1202}
1203 1203
1204/* 1204/*
@@ -1213,7 +1213,7 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv)
1213 ": Tx Packet Len:%d\n", priv->tx_skb->len); 1213 ": Tx Packet Len:%d\n", priv->tx_skb->len);
1214 1214
1215 if (netif_msg_pktdata(priv)) 1215 if (netif_msg_pktdata(priv))
1216 dump_packet(__FUNCTION__, 1216 dump_packet(__func__,
1217 priv->tx_skb->len, priv->tx_skb->data); 1217 priv->tx_skb->len, priv->tx_skb->data);
1218 enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data); 1218 enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data);
1219 1219
@@ -1254,7 +1254,7 @@ static int enc28j60_send_packet(struct sk_buff *skb, struct net_device *dev)
1254 struct enc28j60_net *priv = netdev_priv(dev); 1254 struct enc28j60_net *priv = netdev_priv(dev);
1255 1255
1256 if (netif_msg_tx_queued(priv)) 1256 if (netif_msg_tx_queued(priv))
1257 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1257 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1258 1258
1259 /* If some error occurs while trying to transmit this 1259 /* If some error occurs while trying to transmit this
1260 * packet, you should return '1' from this function. 1260 * packet, you should return '1' from this function.
@@ -1325,7 +1325,7 @@ static int enc28j60_net_open(struct net_device *dev)
1325 struct enc28j60_net *priv = netdev_priv(dev); 1325 struct enc28j60_net *priv = netdev_priv(dev);
1326 1326
1327 if (netif_msg_drv(priv)) 1327 if (netif_msg_drv(priv))
1328 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1328 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1329 1329
1330 if (!is_valid_ether_addr(dev->dev_addr)) { 1330 if (!is_valid_ether_addr(dev->dev_addr)) {
1331 if (netif_msg_ifup(priv)) { 1331 if (netif_msg_ifup(priv)) {
@@ -1363,7 +1363,7 @@ static int enc28j60_net_close(struct net_device *dev)
1363 struct enc28j60_net *priv = netdev_priv(dev); 1363 struct enc28j60_net *priv = netdev_priv(dev);
1364 1364
1365 if (netif_msg_drv(priv)) 1365 if (netif_msg_drv(priv))
1366 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1366 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1367 1367
1368 enc28j60_hw_disable(priv); 1368 enc28j60_hw_disable(priv);
1369 enc28j60_lowpower(priv, true); 1369 enc28j60_lowpower(priv, true);
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
new file mode 100644
index 000000000000..391c3bce5b79
--- /dev/null
+++ b/drivers/net/enic/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o
2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o vnic_dev.o vnic_rq.o
5
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h
new file mode 100644
index 000000000000..c036a8bfd043
--- /dev/null
+++ b/drivers/net/enic/cq_desc.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _CQ_DESC_H_
21#define _CQ_DESC_H_
22
23/*
24 * Completion queue descriptor types
25 */
26enum cq_desc_types {
27 CQ_DESC_TYPE_WQ_ENET = 0,
28 CQ_DESC_TYPE_DESC_COPY = 1,
29 CQ_DESC_TYPE_WQ_EXCH = 2,
30 CQ_DESC_TYPE_RQ_ENET = 3,
31 CQ_DESC_TYPE_RQ_FCP = 4,
32};
33
34/* Completion queue descriptor: 16B
35 *
36 * All completion queues have this basic layout. The
37 * type_specfic area is unique for each completion
38 * queue type.
39 */
40struct cq_desc {
41 __le16 completed_index;
42 __le16 q_number;
43 u8 type_specfic[11];
44 u8 type_color;
45};
46
47#define CQ_DESC_TYPE_BITS 7
48#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
49#define CQ_DESC_COLOR_MASK 1
50#define CQ_DESC_Q_NUM_BITS 10
51#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
52#define CQ_DESC_COMP_NDX_BITS 12
53#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
54
55static inline void cq_desc_dec(const struct cq_desc *desc_arg,
56 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
57{
58 const struct cq_desc *desc = desc_arg;
59 const u8 type_color = desc->type_color;
60
61 *color = (type_color >> CQ_DESC_TYPE_BITS) & CQ_DESC_COLOR_MASK;
62
63 /*
64 * Make sure color bit is read from desc *before* other fields
65 * are read from desc. Hardware guarantees color bit is last
66 * bit (byte) written. Adding the rmb() prevents the compiler
67 * and/or CPU from reordering the reads which would potentially
68 * result in reading stale values.
69 */
70
71 rmb();
72
73 *type = type_color & CQ_DESC_TYPE_MASK;
74 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
75 *completed_index = le16_to_cpu(desc->completed_index) &
76 CQ_DESC_COMP_NDX_MASK;
77}
78
79#endif /* _CQ_DESC_H_ */
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
new file mode 100644
index 000000000000..03dce9ed612c
--- /dev/null
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -0,0 +1,169 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _CQ_ENET_DESC_H_
21#define _CQ_ENET_DESC_H_
22
23#include "cq_desc.h"
24
25/* Ethernet completion queue descriptor: 16B */
26struct cq_enet_wq_desc {
27 __le16 completed_index;
28 __le16 q_number;
29 u8 reserved[11];
30 u8 type_color;
31};
32
33static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc,
34 u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
35{
36 cq_desc_dec((struct cq_desc *)desc, type,
37 color, q_number, completed_index);
38}
39
40/* Completion queue descriptor: Ethernet receive queue, 16B */
41struct cq_enet_rq_desc {
42 __le16 completed_index_flags;
43 __le16 q_number_rss_type_flags;
44 __le32 rss_hash;
45 __le16 bytes_written_flags;
46 __le16 vlan;
47 __le16 checksum_fcoe;
48 u8 flags;
49 u8 type_color;
50};
51
52#define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12)
53#define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13)
54#define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14)
55#define CQ_ENET_RQ_DESC_FLAGS_SOP (0x1 << 15)
56
57#define CQ_ENET_RQ_DESC_RSS_TYPE_BITS 4
58#define CQ_ENET_RQ_DESC_RSS_TYPE_MASK \
59 ((1 << CQ_ENET_RQ_DESC_RSS_TYPE_BITS) - 1)
60#define CQ_ENET_RQ_DESC_RSS_TYPE_NONE 0
61#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv4 1
62#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4 2
63#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6 3
64#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6 4
65#define CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX 5
66#define CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX 6
67
68#define CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC (0x1 << 14)
69
70#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS 14
71#define CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK \
72 ((1 << CQ_ENET_RQ_DESC_BYTES_WRITTEN_BITS) - 1)
73#define CQ_ENET_RQ_DESC_FLAGS_TRUNCATED (0x1 << 14)
74#define CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED (0x1 << 15)
75
76#define CQ_ENET_RQ_DESC_FCOE_SOF_BITS 4
77#define CQ_ENET_RQ_DESC_FCOE_SOF_MASK \
78 ((1 << CQ_ENET_RQ_DESC_FCOE_SOF_BITS) - 1)
79#define CQ_ENET_RQ_DESC_FCOE_EOF_BITS 8
80#define CQ_ENET_RQ_DESC_FCOE_EOF_MASK \
81 ((1 << CQ_ENET_RQ_DESC_FCOE_EOF_BITS) - 1)
82#define CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT 8
83
84#define CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK (0x1 << 0)
85#define CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK (0x1 << 0)
86#define CQ_ENET_RQ_DESC_FLAGS_UDP (0x1 << 1)
87#define CQ_ENET_RQ_DESC_FCOE_ENC_ERROR (0x1 << 1)
88#define CQ_ENET_RQ_DESC_FLAGS_TCP (0x1 << 2)
89#define CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK (0x1 << 3)
90#define CQ_ENET_RQ_DESC_FLAGS_IPV6 (0x1 << 4)
91#define CQ_ENET_RQ_DESC_FLAGS_IPV4 (0x1 << 5)
92#define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6)
93#define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7)
94
95static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
96 u8 *type, u8 *color, u16 *q_number, u16 *completed_index,
97 u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type,
98 u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error,
99 u8 *vlan_stripped, u16 *vlan, u16 *checksum, u8 *fcoe_sof,
100 u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof,
101 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
102 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
103{
104 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
105 u16 q_number_rss_type_flags =
106 le16_to_cpu(desc->q_number_rss_type_flags);
107 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
108
109 cq_desc_dec((struct cq_desc *)desc, type,
110 color, q_number, completed_index);
111
112 *ingress_port = (completed_index_flags &
113 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
114 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
115 1 : 0;
116 *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ?
117 1 : 0;
118 *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ?
119 1 : 0;
120
121 *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) &
122 CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
123 *csum_not_calc = (q_number_rss_type_flags &
124 CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0;
125
126 *rss_hash = le32_to_cpu(desc->rss_hash);
127
128 *bytes_written = bytes_written_flags &
129 CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
130 *packet_error = (bytes_written_flags &
131 CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0;
132 *vlan_stripped = (bytes_written_flags &
133 CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0;
134
135 *vlan = le16_to_cpu(desc->vlan);
136
137 if (*fcoe) {
138 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) &
139 CQ_ENET_RQ_DESC_FCOE_SOF_MASK);
140 *fcoe_fc_crc_ok = (desc->flags &
141 CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
142 *fcoe_enc_error = (desc->flags &
143 CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
144 *fcoe_eof = (u8)((desc->checksum_fcoe >>
145 CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
146 CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
147 *checksum = 0;
148 } else {
149 *fcoe_sof = 0;
150 *fcoe_fc_crc_ok = 0;
151 *fcoe_enc_error = 0;
152 *fcoe_eof = 0;
153 *checksum = le16_to_cpu(desc->checksum_fcoe);
154 }
155
156 *tcp_udp_csum_ok =
157 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0;
158 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0;
159 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0;
160 *ipv4_csum_ok =
161 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0;
162 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0;
163 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0;
164 *ipv4_fragment =
165 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0;
166 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0;
167}
168
169#endif /* _CQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
new file mode 100644
index 000000000000..7f677e89a788
--- /dev/null
+++ b/drivers/net/enic/enic.h
@@ -0,0 +1,114 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _ENIC_H_
21#define _ENIC_H_
22
23#include <linux/inet_lro.h>
24
25#include "vnic_enet.h"
26#include "vnic_dev.h"
27#include "vnic_wq.h"
28#include "vnic_rq.h"
29#include "vnic_cq.h"
30#include "vnic_intr.h"
31#include "vnic_stats.h"
32#include "vnic_rss.h"
33
34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
36#define DRV_VERSION "0.0.1-18163.472-k1"
37#define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc"
38#define PFX DRV_NAME ": "
39
40#define ENIC_LRO_MAX_DESC 8
41#define ENIC_LRO_MAX_AGGR 64
42
43enum enic_cq_index {
44 ENIC_CQ_RQ,
45 ENIC_CQ_WQ,
46 ENIC_CQ_MAX,
47};
48
49enum enic_intx_intr_index {
50 ENIC_INTX_WQ_RQ,
51 ENIC_INTX_ERR,
52 ENIC_INTX_NOTIFY,
53 ENIC_INTX_MAX,
54};
55
56enum enic_msix_intr_index {
57 ENIC_MSIX_RQ,
58 ENIC_MSIX_WQ,
59 ENIC_MSIX_ERR,
60 ENIC_MSIX_NOTIFY,
61 ENIC_MSIX_MAX,
62};
63
64struct enic_msix_entry {
65 int requested;
66 char devname[IFNAMSIZ];
67 irqreturn_t (*isr)(int, void *);
68 void *devid;
69};
70
71/* Per-instance private data structure */
72struct enic {
73 struct net_device *netdev;
74 struct pci_dev *pdev;
75 struct vnic_enet_config config;
76 struct vnic_dev_bar bar0;
77 struct vnic_dev *vdev;
78 struct timer_list notify_timer;
79 struct work_struct reset;
80 struct msix_entry msix_entry[ENIC_MSIX_MAX];
81 struct enic_msix_entry msix[ENIC_MSIX_MAX];
82 u32 msg_enable;
83 spinlock_t devcmd_lock;
84 u8 mac_addr[ETH_ALEN];
85 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
86 unsigned int mc_count;
87 int csum_rx_enabled;
88 u32 port_mtu;
89
90 /* work queue cache line section */
91 ____cacheline_aligned struct vnic_wq wq[1];
92 spinlock_t wq_lock[1];
93 unsigned int wq_count;
94 struct vlan_group *vlan_group;
95
96 /* receive queue cache line section */
97 ____cacheline_aligned struct vnic_rq rq[1];
98 unsigned int rq_count;
99 int (*rq_alloc_buf)(struct vnic_rq *rq);
100 struct napi_struct napi;
101 struct net_lro_mgr lro_mgr;
102 struct net_lro_desc lro_desc[ENIC_LRO_MAX_DESC];
103
104 /* interrupt resource cache line section */
105 ____cacheline_aligned struct vnic_intr intr[ENIC_MSIX_MAX];
106 unsigned int intr_count;
107 u32 __iomem *legacy_pba; /* memory-mapped */
108
109 /* completion queue cache line section */
110 ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
111 unsigned int cq_count;
112};
113
114#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
new file mode 100644
index 000000000000..f3a47a87dbbe
--- /dev/null
+++ b/drivers/net/enic/enic_main.c
@@ -0,0 +1,1934 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/workqueue.h>
27#include <linux/pci.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
31#include <linux/if_vlan.h>
32#include <linux/ethtool.h>
33#include <linux/in.h>
34#include <linux/ip.h>
35#include <linux/ipv6.h>
36#include <linux/tcp.h>
37
38#include "cq_enet_desc.h"
39#include "vnic_dev.h"
40#include "vnic_intr.h"
41#include "vnic_stats.h"
42#include "enic_res.h"
43#include "enic.h"
44
45#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
46
47/* Supported devices */
48static struct pci_device_id enic_id_table[] = {
49 { PCI_VDEVICE(CISCO, 0x0043) },
50 { 0, } /* end of table */
51};
52
53MODULE_DESCRIPTION(DRV_DESCRIPTION);
54MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
55MODULE_LICENSE("GPL");
56MODULE_VERSION(DRV_VERSION);
57MODULE_DEVICE_TABLE(pci, enic_id_table);
58
59struct enic_stat {
60 char name[ETH_GSTRING_LEN];
61 unsigned int offset;
62};
63
64#define ENIC_TX_STAT(stat) \
65 { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
66#define ENIC_RX_STAT(stat) \
67 { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
68
69static const struct enic_stat enic_tx_stats[] = {
70 ENIC_TX_STAT(tx_frames_ok),
71 ENIC_TX_STAT(tx_unicast_frames_ok),
72 ENIC_TX_STAT(tx_multicast_frames_ok),
73 ENIC_TX_STAT(tx_broadcast_frames_ok),
74 ENIC_TX_STAT(tx_bytes_ok),
75 ENIC_TX_STAT(tx_unicast_bytes_ok),
76 ENIC_TX_STAT(tx_multicast_bytes_ok),
77 ENIC_TX_STAT(tx_broadcast_bytes_ok),
78 ENIC_TX_STAT(tx_drops),
79 ENIC_TX_STAT(tx_errors),
80 ENIC_TX_STAT(tx_tso),
81};
82
83static const struct enic_stat enic_rx_stats[] = {
84 ENIC_RX_STAT(rx_frames_ok),
85 ENIC_RX_STAT(rx_frames_total),
86 ENIC_RX_STAT(rx_unicast_frames_ok),
87 ENIC_RX_STAT(rx_multicast_frames_ok),
88 ENIC_RX_STAT(rx_broadcast_frames_ok),
89 ENIC_RX_STAT(rx_bytes_ok),
90 ENIC_RX_STAT(rx_unicast_bytes_ok),
91 ENIC_RX_STAT(rx_multicast_bytes_ok),
92 ENIC_RX_STAT(rx_broadcast_bytes_ok),
93 ENIC_RX_STAT(rx_drop),
94 ENIC_RX_STAT(rx_no_bufs),
95 ENIC_RX_STAT(rx_errors),
96 ENIC_RX_STAT(rx_rss),
97 ENIC_RX_STAT(rx_crc_errors),
98 ENIC_RX_STAT(rx_frames_64),
99 ENIC_RX_STAT(rx_frames_127),
100 ENIC_RX_STAT(rx_frames_255),
101 ENIC_RX_STAT(rx_frames_511),
102 ENIC_RX_STAT(rx_frames_1023),
103 ENIC_RX_STAT(rx_frames_1518),
104 ENIC_RX_STAT(rx_frames_to_max),
105};
106
107static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
108static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
109
110static int enic_get_settings(struct net_device *netdev,
111 struct ethtool_cmd *ecmd)
112{
113 struct enic *enic = netdev_priv(netdev);
114
115 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
116 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
117 ecmd->port = PORT_FIBRE;
118 ecmd->transceiver = XCVR_EXTERNAL;
119
120 if (netif_carrier_ok(netdev)) {
121 ecmd->speed = vnic_dev_port_speed(enic->vdev);
122 ecmd->duplex = DUPLEX_FULL;
123 } else {
124 ecmd->speed = -1;
125 ecmd->duplex = -1;
126 }
127
128 ecmd->autoneg = AUTONEG_DISABLE;
129
130 return 0;
131}
132
133static void enic_get_drvinfo(struct net_device *netdev,
134 struct ethtool_drvinfo *drvinfo)
135{
136 struct enic *enic = netdev_priv(netdev);
137 struct vnic_devcmd_fw_info *fw_info;
138
139 spin_lock(&enic->devcmd_lock);
140 vnic_dev_fw_info(enic->vdev, &fw_info);
141 spin_unlock(&enic->devcmd_lock);
142
143 strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
144 strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
145 strncpy(drvinfo->fw_version, fw_info->fw_version,
146 sizeof(drvinfo->fw_version));
147 strncpy(drvinfo->bus_info, pci_name(enic->pdev),
148 sizeof(drvinfo->bus_info));
149}
150
151static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
152{
153 unsigned int i;
154
155 switch (stringset) {
156 case ETH_SS_STATS:
157 for (i = 0; i < enic_n_tx_stats; i++) {
158 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
159 data += ETH_GSTRING_LEN;
160 }
161 for (i = 0; i < enic_n_rx_stats; i++) {
162 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
163 data += ETH_GSTRING_LEN;
164 }
165 break;
166 }
167}
168
169static int enic_get_sset_count(struct net_device *netdev, int sset)
170{
171 switch (sset) {
172 case ETH_SS_STATS:
173 return enic_n_tx_stats + enic_n_rx_stats;
174 default:
175 return -EOPNOTSUPP;
176 }
177}
178
179static void enic_get_ethtool_stats(struct net_device *netdev,
180 struct ethtool_stats *stats, u64 *data)
181{
182 struct enic *enic = netdev_priv(netdev);
183 struct vnic_stats *vstats;
184 unsigned int i;
185
186 spin_lock(&enic->devcmd_lock);
187 vnic_dev_stats_dump(enic->vdev, &vstats);
188 spin_unlock(&enic->devcmd_lock);
189
190 for (i = 0; i < enic_n_tx_stats; i++)
191 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
192 for (i = 0; i < enic_n_rx_stats; i++)
193 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
194}
195
196static u32 enic_get_rx_csum(struct net_device *netdev)
197{
198 struct enic *enic = netdev_priv(netdev);
199 return enic->csum_rx_enabled;
200}
201
202static int enic_set_rx_csum(struct net_device *netdev, u32 data)
203{
204 struct enic *enic = netdev_priv(netdev);
205
206 if (data && !ENIC_SETTING(enic, RXCSUM))
207 return -EINVAL;
208
209 enic->csum_rx_enabled = !!data;
210
211 return 0;
212}
213
214static int enic_set_tx_csum(struct net_device *netdev, u32 data)
215{
216 struct enic *enic = netdev_priv(netdev);
217
218 if (data && !ENIC_SETTING(enic, TXCSUM))
219 return -EINVAL;
220
221 if (data)
222 netdev->features |= NETIF_F_HW_CSUM;
223 else
224 netdev->features &= ~NETIF_F_HW_CSUM;
225
226 return 0;
227}
228
229static int enic_set_tso(struct net_device *netdev, u32 data)
230{
231 struct enic *enic = netdev_priv(netdev);
232
233 if (data && !ENIC_SETTING(enic, TSO))
234 return -EINVAL;
235
236 if (data)
237 netdev->features |=
238 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
239 else
240 netdev->features &=
241 ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
242
243 return 0;
244}
245
246static u32 enic_get_msglevel(struct net_device *netdev)
247{
248 struct enic *enic = netdev_priv(netdev);
249 return enic->msg_enable;
250}
251
252static void enic_set_msglevel(struct net_device *netdev, u32 value)
253{
254 struct enic *enic = netdev_priv(netdev);
255 enic->msg_enable = value;
256}
257
258static struct ethtool_ops enic_ethtool_ops = {
259 .get_settings = enic_get_settings,
260 .get_drvinfo = enic_get_drvinfo,
261 .get_msglevel = enic_get_msglevel,
262 .set_msglevel = enic_set_msglevel,
263 .get_link = ethtool_op_get_link,
264 .get_strings = enic_get_strings,
265 .get_sset_count = enic_get_sset_count,
266 .get_ethtool_stats = enic_get_ethtool_stats,
267 .get_rx_csum = enic_get_rx_csum,
268 .set_rx_csum = enic_set_rx_csum,
269 .get_tx_csum = ethtool_op_get_tx_csum,
270 .set_tx_csum = enic_set_tx_csum,
271 .get_sg = ethtool_op_get_sg,
272 .set_sg = ethtool_op_set_sg,
273 .get_tso = ethtool_op_get_tso,
274 .set_tso = enic_set_tso,
275};
276
277static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
278{
279 struct enic *enic = vnic_dev_priv(wq->vdev);
280
281 if (buf->sop)
282 pci_unmap_single(enic->pdev, buf->dma_addr,
283 buf->len, PCI_DMA_TODEVICE);
284 else
285 pci_unmap_page(enic->pdev, buf->dma_addr,
286 buf->len, PCI_DMA_TODEVICE);
287
288 if (buf->os_buf)
289 dev_kfree_skb_any(buf->os_buf);
290}
291
292static void enic_wq_free_buf(struct vnic_wq *wq,
293 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
294{
295 enic_free_wq_buf(wq, buf);
296}
297
298static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
299 u8 type, u16 q_number, u16 completed_index, void *opaque)
300{
301 struct enic *enic = vnic_dev_priv(vdev);
302
303 spin_lock(&enic->wq_lock[q_number]);
304
305 vnic_wq_service(&enic->wq[q_number], cq_desc,
306 completed_index, enic_wq_free_buf,
307 opaque);
308
309 if (netif_queue_stopped(enic->netdev) &&
310 vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1)
311 netif_wake_queue(enic->netdev);
312
313 spin_unlock(&enic->wq_lock[q_number]);
314
315 return 0;
316}
317
318static void enic_log_q_error(struct enic *enic)
319{
320 unsigned int i;
321 u32 error_status;
322
323 for (i = 0; i < enic->wq_count; i++) {
324 error_status = vnic_wq_error_status(&enic->wq[i]);
325 if (error_status)
326 printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n",
327 enic->netdev->name, i, error_status);
328 }
329
330 for (i = 0; i < enic->rq_count; i++) {
331 error_status = vnic_rq_error_status(&enic->rq[i]);
332 if (error_status)
333 printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n",
334 enic->netdev->name, i, error_status);
335 }
336}
337
338static void enic_link_check(struct enic *enic)
339{
340 int link_status = vnic_dev_link_status(enic->vdev);
341 int carrier_ok = netif_carrier_ok(enic->netdev);
342
343 if (link_status && !carrier_ok) {
344 printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name);
345 netif_carrier_on(enic->netdev);
346 } else if (!link_status && carrier_ok) {
347 printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name);
348 netif_carrier_off(enic->netdev);
349 }
350}
351
352static void enic_mtu_check(struct enic *enic)
353{
354 u32 mtu = vnic_dev_mtu(enic->vdev);
355
356 if (mtu != enic->port_mtu) {
357 if (mtu < enic->netdev->mtu)
358 printk(KERN_WARNING PFX
359 "%s: interface MTU (%d) set higher "
360 "than switch port MTU (%d)\n",
361 enic->netdev->name, enic->netdev->mtu, mtu);
362 enic->port_mtu = mtu;
363 }
364}
365
366static void enic_msglvl_check(struct enic *enic)
367{
368 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
369
370 if (msg_enable != enic->msg_enable) {
371 printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n",
372 enic->netdev->name, enic->msg_enable, msg_enable);
373 enic->msg_enable = msg_enable;
374 }
375}
376
377static void enic_notify_check(struct enic *enic)
378{
379 enic_msglvl_check(enic);
380 enic_mtu_check(enic);
381 enic_link_check(enic);
382}
383
384#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))
385
386static irqreturn_t enic_isr_legacy(int irq, void *data)
387{
388 struct net_device *netdev = data;
389 struct enic *enic = netdev_priv(netdev);
390 u32 pba;
391
392 vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);
393
394 pba = vnic_intr_legacy_pba(enic->legacy_pba);
395 if (!pba) {
396 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
397 return IRQ_NONE; /* not our interrupt */
398 }
399
400 if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY))
401 enic_notify_check(enic);
402
403 if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
404 enic_log_q_error(enic);
405 /* schedule recovery from WQ/RQ error */
406 schedule_work(&enic->reset);
407 return IRQ_HANDLED;
408 }
409
410 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
411 if (netif_rx_schedule_prep(netdev, &enic->napi))
412 __netif_rx_schedule(netdev, &enic->napi);
413 } else {
414 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
415 }
416
417 return IRQ_HANDLED;
418}
419
420static irqreturn_t enic_isr_msi(int irq, void *data)
421{
422 struct enic *enic = data;
423
424 /* With MSI, there is no sharing of interrupts, so this is
425 * our interrupt and there is no need to ack it. The device
426 * is not providing per-vector masking, so the OS will not
427 * write to PCI config space to mask/unmask the interrupt.
428 * We're using mask_on_assertion for MSI, so the device
429 * automatically masks the interrupt when the interrupt is
430 * generated. Later, when exiting polling, the interrupt
431 * will be unmasked (see enic_poll).
432 *
433 * Also, the device uses the same PCIe Traffic Class (TC)
434 * for Memory Write data and MSI, so there are no ordering
435 * issues; the MSI will always arrive at the Root Complex
436 * _after_ corresponding Memory Writes (i.e. descriptor
437 * writes).
438 */
439
440 netif_rx_schedule(enic->netdev, &enic->napi);
441
442 return IRQ_HANDLED;
443}
444
445static irqreturn_t enic_isr_msix_rq(int irq, void *data)
446{
447 struct enic *enic = data;
448
449 /* schedule NAPI polling for RQ cleanup */
450 netif_rx_schedule(enic->netdev, &enic->napi);
451
452 return IRQ_HANDLED;
453}
454
455static irqreturn_t enic_isr_msix_wq(int irq, void *data)
456{
457 struct enic *enic = data;
458 unsigned int wq_work_to_do = -1; /* no limit */
459 unsigned int wq_work_done;
460
461 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
462 wq_work_to_do, enic_wq_service, NULL);
463
464 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ],
465 wq_work_done,
466 1 /* unmask intr */,
467 1 /* reset intr timer */);
468
469 return IRQ_HANDLED;
470}
471
472static irqreturn_t enic_isr_msix_err(int irq, void *data)
473{
474 struct enic *enic = data;
475
476 enic_log_q_error(enic);
477
478 /* schedule recovery from WQ/RQ error */
479 schedule_work(&enic->reset);
480
481 return IRQ_HANDLED;
482}
483
484static irqreturn_t enic_isr_msix_notify(int irq, void *data)
485{
486 struct enic *enic = data;
487
488 enic_notify_check(enic);
489 vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]);
490
491 return IRQ_HANDLED;
492}
493
494static inline void enic_queue_wq_skb_cont(struct enic *enic,
495 struct vnic_wq *wq, struct sk_buff *skb,
496 unsigned int len_left)
497{
498 skb_frag_t *frag;
499
500 /* Queue additional data fragments */
501 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
502 len_left -= frag->size;
503 enic_queue_wq_desc_cont(wq, skb,
504 pci_map_page(enic->pdev, frag->page,
505 frag->page_offset, frag->size,
506 PCI_DMA_TODEVICE),
507 frag->size,
508 (len_left == 0)); /* EOP? */
509 }
510}
511
512static inline void enic_queue_wq_skb_vlan(struct enic *enic,
513 struct vnic_wq *wq, struct sk_buff *skb,
514 int vlan_tag_insert, unsigned int vlan_tag)
515{
516 unsigned int head_len = skb_headlen(skb);
517 unsigned int len_left = skb->len - head_len;
518 int eop = (len_left == 0);
519
520 /* Queue the main skb fragment */
521 enic_queue_wq_desc(wq, skb,
522 pci_map_single(enic->pdev, skb->data,
523 head_len, PCI_DMA_TODEVICE),
524 head_len,
525 vlan_tag_insert, vlan_tag,
526 eop);
527
528 if (!eop)
529 enic_queue_wq_skb_cont(enic, wq, skb, len_left);
530}
531
532static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
533 struct vnic_wq *wq, struct sk_buff *skb,
534 int vlan_tag_insert, unsigned int vlan_tag)
535{
536 unsigned int head_len = skb_headlen(skb);
537 unsigned int len_left = skb->len - head_len;
538 unsigned int hdr_len = skb_transport_offset(skb);
539 unsigned int csum_offset = hdr_len + skb->csum_offset;
540 int eop = (len_left == 0);
541
542 /* Queue the main skb fragment */
543 enic_queue_wq_desc_csum_l4(wq, skb,
544 pci_map_single(enic->pdev, skb->data,
545 head_len, PCI_DMA_TODEVICE),
546 head_len,
547 csum_offset,
548 hdr_len,
549 vlan_tag_insert, vlan_tag,
550 eop);
551
552 if (!eop)
553 enic_queue_wq_skb_cont(enic, wq, skb, len_left);
554}
555
556static inline void enic_queue_wq_skb_tso(struct enic *enic,
557 struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
558 int vlan_tag_insert, unsigned int vlan_tag)
559{
560 unsigned int head_len = skb_headlen(skb);
561 unsigned int len_left = skb->len - head_len;
562 unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
563 int eop = (len_left == 0);
564
565 /* Preload TCP csum field with IP pseudo hdr calculated
566 * with IP length set to zero. HW will later add in length
567 * to each TCP segment resulting from the TSO.
568 */
569
570 if (skb->protocol == __constant_htons(ETH_P_IP)) {
571 ip_hdr(skb)->check = 0;
572 tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
573 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
574 } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
575 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
576 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
577 }
578
579 /* Queue the main skb fragment */
580 enic_queue_wq_desc_tso(wq, skb,
581 pci_map_single(enic->pdev, skb->data,
582 head_len, PCI_DMA_TODEVICE),
583 head_len,
584 mss, hdr_len,
585 vlan_tag_insert, vlan_tag,
586 eop);
587
588 if (!eop)
589 enic_queue_wq_skb_cont(enic, wq, skb, len_left);
590}
591
592static inline void enic_queue_wq_skb(struct enic *enic,
593 struct vnic_wq *wq, struct sk_buff *skb)
594{
595 unsigned int mss = skb_shinfo(skb)->gso_size;
596 unsigned int vlan_tag = 0;
597 int vlan_tag_insert = 0;
598
599 if (enic->vlan_group && vlan_tx_tag_present(skb)) {
600 /* VLAN tag from trunking driver */
601 vlan_tag_insert = 1;
602 vlan_tag = vlan_tx_tag_get(skb);
603 }
604
605 if (mss)
606 enic_queue_wq_skb_tso(enic, wq, skb, mss,
607 vlan_tag_insert, vlan_tag);
608 else if (skb->ip_summed == CHECKSUM_PARTIAL)
609 enic_queue_wq_skb_csum_l4(enic, wq, skb,
610 vlan_tag_insert, vlan_tag);
611 else
612 enic_queue_wq_skb_vlan(enic, wq, skb,
613 vlan_tag_insert, vlan_tag);
614}
615
616/* netif_tx_lock held, process context with BHs disabled */
617static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev)
618{
619 struct enic *enic = netdev_priv(netdev);
620 struct vnic_wq *wq = &enic->wq[0];
621 unsigned long flags;
622
623 if (skb->len <= 0) {
624 dev_kfree_skb(skb);
625 return NETDEV_TX_OK;
626 }
627
628 /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
629 * which is very likely. In the off chance it's going to take
630 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
631 */
632
633 if (skb_shinfo(skb)->gso_size == 0 &&
634 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
635 skb_linearize(skb)) {
636 dev_kfree_skb(skb);
637 return NETDEV_TX_OK;
638 }
639
640 spin_lock_irqsave(&enic->wq_lock[0], flags);
641
642 if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) {
643 netif_stop_queue(netdev);
644 /* This is a hard error, log it */
645 printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
646 "queue awake!\n", netdev->name);
647 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
648 return NETDEV_TX_BUSY;
649 }
650
651 enic_queue_wq_skb(enic, wq, skb);
652
653 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
654 netif_stop_queue(netdev);
655
656 netdev->trans_start = jiffies;
657
658 spin_unlock_irqrestore(&enic->wq_lock[0], flags);
659
660 return NETDEV_TX_OK;
661}
662
663/* dev_base_lock rwlock held, nominally process context */
664static struct net_device_stats *enic_get_stats(struct net_device *netdev)
665{
666 struct enic *enic = netdev_priv(netdev);
667 struct net_device_stats *net_stats = &netdev->stats;
668 struct vnic_stats *stats;
669
670 spin_lock(&enic->devcmd_lock);
671 vnic_dev_stats_dump(enic->vdev, &stats);
672 spin_unlock(&enic->devcmd_lock);
673
674 net_stats->tx_packets = stats->tx.tx_frames_ok;
675 net_stats->tx_bytes = stats->tx.tx_bytes_ok;
676 net_stats->tx_errors = stats->tx.tx_errors;
677 net_stats->tx_dropped = stats->tx.tx_drops;
678
679 net_stats->rx_packets = stats->rx.rx_frames_ok;
680 net_stats->rx_bytes = stats->rx.rx_bytes_ok;
681 net_stats->rx_errors = stats->rx.rx_errors;
682 net_stats->multicast = stats->rx.rx_multicast_frames_ok;
683 net_stats->rx_crc_errors = stats->rx.rx_crc_errors;
684 net_stats->rx_dropped = stats->rx.rx_no_bufs;
685
686 return net_stats;
687}
688
689static void enic_reset_mcaddrs(struct enic *enic)
690{
691 enic->mc_count = 0;
692}
693
694static int enic_set_mac_addr(struct net_device *netdev, char *addr)
695{
696 if (!is_valid_ether_addr(addr))
697 return -EADDRNOTAVAIL;
698
699 memcpy(netdev->dev_addr, addr, netdev->addr_len);
700
701 return 0;
702}
703
704/* netif_tx_lock held, BHs disabled */
705static void enic_set_multicast_list(struct net_device *netdev)
706{
707 struct enic *enic = netdev_priv(netdev);
708 struct dev_mc_list *list = netdev->mc_list;
709 int directed = 1;
710 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
711 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
712 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
713 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
714 (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
715 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
716 unsigned int mc_count = netdev->mc_count;
717 unsigned int i, j;
718
719 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
720 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
721
722 spin_lock(&enic->devcmd_lock);
723
724 vnic_dev_packet_filter(enic->vdev, directed,
725 multicast, broadcast, promisc, allmulti);
726
727 /* Is there an easier way? Trying to minimize to
728 * calls to add/del multicast addrs. We keep the
729 * addrs from the last call in enic->mc_addr and
730 * look for changes to add/del.
731 */
732
733 for (i = 0; list && i < mc_count; i++) {
734 memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN);
735 list = list->next;
736 }
737
738 for (i = 0; i < enic->mc_count; i++) {
739 for (j = 0; j < mc_count; j++)
740 if (compare_ether_addr(enic->mc_addr[i],
741 mc_addr[j]) == 0)
742 break;
743 if (j == mc_count)
744 enic_del_multicast_addr(enic, enic->mc_addr[i]);
745 }
746
747 for (i = 0; i < mc_count; i++) {
748 for (j = 0; j < enic->mc_count; j++)
749 if (compare_ether_addr(mc_addr[i],
750 enic->mc_addr[j]) == 0)
751 break;
752 if (j == enic->mc_count)
753 enic_add_multicast_addr(enic, mc_addr[i]);
754 }
755
756 /* Save the list to compare against next time
757 */
758
759 for (i = 0; i < mc_count; i++)
760 memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
761
762 enic->mc_count = mc_count;
763
764 spin_unlock(&enic->devcmd_lock);
765}
766
767/* rtnl lock is held */
768static void enic_vlan_rx_register(struct net_device *netdev,
769 struct vlan_group *vlan_group)
770{
771 struct enic *enic = netdev_priv(netdev);
772 enic->vlan_group = vlan_group;
773}
774
775/* rtnl lock is held */
776static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
777{
778 struct enic *enic = netdev_priv(netdev);
779
780 spin_lock(&enic->devcmd_lock);
781 enic_add_vlan(enic, vid);
782 spin_unlock(&enic->devcmd_lock);
783}
784
785/* rtnl lock is held */
786static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
787{
788 struct enic *enic = netdev_priv(netdev);
789
790 spin_lock(&enic->devcmd_lock);
791 enic_del_vlan(enic, vid);
792 spin_unlock(&enic->devcmd_lock);
793}
794
795/* netif_tx_lock held, BHs disabled */
796static void enic_tx_timeout(struct net_device *netdev)
797{
798 struct enic *enic = netdev_priv(netdev);
799 schedule_work(&enic->reset);
800}
801
802static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
803{
804 struct enic *enic = vnic_dev_priv(rq->vdev);
805
806 if (!buf->os_buf)
807 return;
808
809 pci_unmap_single(enic->pdev, buf->dma_addr,
810 buf->len, PCI_DMA_FROMDEVICE);
811 dev_kfree_skb_any(buf->os_buf);
812}
813
814static inline struct sk_buff *enic_rq_alloc_skb(unsigned int size)
815{
816 struct sk_buff *skb;
817
818 skb = dev_alloc_skb(size + NET_IP_ALIGN);
819
820 if (skb)
821 skb_reserve(skb, NET_IP_ALIGN);
822
823 return skb;
824}
825
826static int enic_rq_alloc_buf(struct vnic_rq *rq)
827{
828 struct enic *enic = vnic_dev_priv(rq->vdev);
829 struct sk_buff *skb;
830 unsigned int len = enic->netdev->mtu + ETH_HLEN;
831 unsigned int os_buf_index = 0;
832 dma_addr_t dma_addr;
833
834 skb = enic_rq_alloc_skb(len);
835 if (!skb)
836 return -ENOMEM;
837
838 dma_addr = pci_map_single(enic->pdev, skb->data,
839 len, PCI_DMA_FROMDEVICE);
840
841 enic_queue_rq_desc(rq, skb, os_buf_index,
842 dma_addr, len);
843
844 return 0;
845}
846
847static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
848 void **tcph, u64 *hdr_flags, void *priv)
849{
850 struct cq_enet_rq_desc *cq_desc = priv;
851 unsigned int ip_len;
852 struct iphdr *iph;
853
854 u8 type, color, eop, sop, ingress_port, vlan_stripped;
855 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
856 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
857 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
858 u8 packet_error;
859 u16 q_number, completed_index, bytes_written, vlan, checksum;
860 u32 rss_hash;
861
862 cq_enet_rq_desc_dec(cq_desc,
863 &type, &color, &q_number, &completed_index,
864 &ingress_port, &fcoe, &eop, &sop, &rss_type,
865 &csum_not_calc, &rss_hash, &bytes_written,
866 &packet_error, &vlan_stripped, &vlan, &checksum,
867 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
868 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
869 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
870 &fcs_ok);
871
872 if (!(ipv4 && tcp && !ipv4_fragment))
873 return -1;
874
875 skb_reset_network_header(skb);
876 iph = ip_hdr(skb);
877
878 ip_len = ip_hdrlen(skb);
879 skb_set_transport_header(skb, ip_len);
880
881 /* check if ip header and tcp header are complete */
882 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
883 return -1;
884
885 *hdr_flags = LRO_IPV4 | LRO_TCP;
886 *tcph = tcp_hdr(skb);
887 *iphdr = iph;
888
889 return 0;
890}
891
892static void enic_rq_indicate_buf(struct vnic_rq *rq,
893 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
894 int skipped, void *opaque)
895{
896 struct enic *enic = vnic_dev_priv(rq->vdev);
897 struct sk_buff *skb;
898
899 u8 type, color, eop, sop, ingress_port, vlan_stripped;
900 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
901 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
902 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
903 u8 packet_error;
904 u16 q_number, completed_index, bytes_written, vlan, checksum;
905 u32 rss_hash;
906
907 if (skipped)
908 return;
909
910 skb = buf->os_buf;
911 prefetch(skb->data - NET_IP_ALIGN);
912 pci_unmap_single(enic->pdev, buf->dma_addr,
913 buf->len, PCI_DMA_FROMDEVICE);
914
915 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
916 &type, &color, &q_number, &completed_index,
917 &ingress_port, &fcoe, &eop, &sop, &rss_type,
918 &csum_not_calc, &rss_hash, &bytes_written,
919 &packet_error, &vlan_stripped, &vlan, &checksum,
920 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
921 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
922 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
923 &fcs_ok);
924
925 if (packet_error) {
926
927 if (bytes_written > 0 && !fcs_ok) {
928 if (net_ratelimit())
929 printk(KERN_ERR PFX
930 "%s: packet error: bad FCS\n",
931 enic->netdev->name);
932 }
933
934 dev_kfree_skb_any(skb);
935
936 return;
937 }
938
939 if (eop && bytes_written > 0) {
940
941 /* Good receive
942 */
943
944 skb_put(skb, bytes_written);
945 skb->protocol = eth_type_trans(skb, enic->netdev);
946
947 if (enic->csum_rx_enabled && !csum_not_calc) {
948 skb->csum = htons(checksum);
949 skb->ip_summed = CHECKSUM_COMPLETE;
950 }
951
952 skb->dev = enic->netdev;
953 enic->netdev->last_rx = jiffies;
954
955 if (enic->vlan_group && vlan_stripped) {
956
957 if (ENIC_SETTING(enic, LRO) && ipv4)
958 lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
959 skb, enic->vlan_group,
960 vlan, cq_desc);
961 else
962 vlan_hwaccel_receive_skb(skb,
963 enic->vlan_group, vlan);
964
965 } else {
966
967 if (ENIC_SETTING(enic, LRO) && ipv4)
968 lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
969 else
970 netif_receive_skb(skb);
971
972 }
973
974 } else {
975
976 /* Buffer overflow
977 */
978
979 dev_kfree_skb_any(skb);
980 }
981}
982
983static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
984 u8 type, u16 q_number, u16 completed_index, void *opaque)
985{
986 struct enic *enic = vnic_dev_priv(vdev);
987
988 vnic_rq_service(&enic->rq[q_number], cq_desc,
989 completed_index, VNIC_RQ_RETURN_DESC,
990 enic_rq_indicate_buf, opaque);
991
992 return 0;
993}
994
995static void enic_rq_drop_buf(struct vnic_rq *rq,
996 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
997 int skipped, void *opaque)
998{
999 struct enic *enic = vnic_dev_priv(rq->vdev);
1000 struct sk_buff *skb = buf->os_buf;
1001
1002 if (skipped)
1003 return;
1004
1005 pci_unmap_single(enic->pdev, buf->dma_addr,
1006 buf->len, PCI_DMA_FROMDEVICE);
1007
1008 dev_kfree_skb_any(skb);
1009}
1010
1011static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
1012 u8 type, u16 q_number, u16 completed_index, void *opaque)
1013{
1014 struct enic *enic = vnic_dev_priv(vdev);
1015
1016 vnic_rq_service(&enic->rq[q_number], cq_desc,
1017 completed_index, VNIC_RQ_RETURN_DESC,
1018 enic_rq_drop_buf, opaque);
1019
1020 return 0;
1021}
1022
1023static int enic_poll(struct napi_struct *napi, int budget)
1024{
1025 struct enic *enic = container_of(napi, struct enic, napi);
1026 struct net_device *netdev = enic->netdev;
1027 unsigned int rq_work_to_do = budget;
1028 unsigned int wq_work_to_do = -1; /* no limit */
1029 unsigned int work_done, rq_work_done, wq_work_done;
1030
1031 /* Service RQ (first) and WQ
1032 */
1033
1034 rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1035 rq_work_to_do, enic_rq_service, NULL);
1036
1037 wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1038 wq_work_to_do, enic_wq_service, NULL);
1039
1040 /* Accumulate intr event credits for this polling
1041 * cycle. An intr event is the completion of a
1042 * a WQ or RQ packet.
1043 */
1044
1045 work_done = rq_work_done + wq_work_done;
1046
1047 if (work_done > 0)
1048 vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ],
1049 work_done,
1050 0 /* don't unmask intr */,
1051 0 /* don't reset intr timer */);
1052
1053 if (rq_work_done > 0) {
1054
1055 /* Replenish RQ
1056 */
1057
1058 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1059
1060 } else {
1061
1062 /* If no work done, flush all LROs and exit polling
1063 */
1064
1065 if (ENIC_SETTING(enic, LRO))
1066 lro_flush_all(&enic->lro_mgr);
1067
1068 netif_rx_complete(netdev, napi);
1069 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1070 }
1071
1072 return rq_work_done;
1073}
1074
1075static int enic_poll_msix(struct napi_struct *napi, int budget)
1076{
1077 struct enic *enic = container_of(napi, struct enic, napi);
1078 struct net_device *netdev = enic->netdev;
1079 unsigned int work_to_do = budget;
1080 unsigned int work_done;
1081
1082 /* Service RQ
1083 */
1084
1085 work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1086 work_to_do, enic_rq_service, NULL);
1087
1088 if (work_done > 0) {
1089
1090 /* Replenish RQ
1091 */
1092
1093 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
1094
1095 /* Accumulate intr event credits for this polling
1096 * cycle. An intr event is the completion of a
1097 * a WQ or RQ packet.
1098 */
1099
1100 vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
1101 work_done,
1102 0 /* don't unmask intr */,
1103 0 /* don't reset intr timer */);
1104 } else {
1105
1106 /* If no work done, flush all LROs and exit polling
1107 */
1108
1109 if (ENIC_SETTING(enic, LRO))
1110 lro_flush_all(&enic->lro_mgr);
1111
1112 netif_rx_complete(netdev, napi);
1113 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1114 }
1115
1116 return work_done;
1117}
1118
1119static void enic_notify_timer(unsigned long data)
1120{
1121 struct enic *enic = (struct enic *)data;
1122
1123 enic_notify_check(enic);
1124
1125 mod_timer(&enic->notify_timer,
1126 round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD));
1127}
1128
1129static void enic_free_intr(struct enic *enic)
1130{
1131 struct net_device *netdev = enic->netdev;
1132 unsigned int i;
1133
1134 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1135 case VNIC_DEV_INTR_MODE_INTX:
1136 free_irq(enic->pdev->irq, netdev);
1137 break;
1138 case VNIC_DEV_INTR_MODE_MSI:
1139 free_irq(enic->pdev->irq, enic);
1140 break;
1141 case VNIC_DEV_INTR_MODE_MSIX:
1142 for (i = 0; i < ARRAY_SIZE(enic->msix); i++)
1143 if (enic->msix[i].requested)
1144 free_irq(enic->msix_entry[i].vector,
1145 enic->msix[i].devid);
1146 break;
1147 default:
1148 break;
1149 }
1150}
1151
1152static int enic_request_intr(struct enic *enic)
1153{
1154 struct net_device *netdev = enic->netdev;
1155 unsigned int i;
1156 int err = 0;
1157
1158 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1159
1160 case VNIC_DEV_INTR_MODE_INTX:
1161
1162 err = request_irq(enic->pdev->irq, enic_isr_legacy,
1163 IRQF_SHARED, netdev->name, netdev);
1164 break;
1165
1166 case VNIC_DEV_INTR_MODE_MSI:
1167
1168 err = request_irq(enic->pdev->irq, enic_isr_msi,
1169 0, netdev->name, enic);
1170 break;
1171
1172 case VNIC_DEV_INTR_MODE_MSIX:
1173
1174 sprintf(enic->msix[ENIC_MSIX_RQ].devname,
1175 "%.11s-rx-0", netdev->name);
1176 enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq;
1177 enic->msix[ENIC_MSIX_RQ].devid = enic;
1178
1179 sprintf(enic->msix[ENIC_MSIX_WQ].devname,
1180 "%.11s-tx-0", netdev->name);
1181 enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq;
1182 enic->msix[ENIC_MSIX_WQ].devid = enic;
1183
1184 sprintf(enic->msix[ENIC_MSIX_ERR].devname,
1185 "%.11s-err", netdev->name);
1186 enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err;
1187 enic->msix[ENIC_MSIX_ERR].devid = enic;
1188
1189 sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname,
1190 "%.11s-notify", netdev->name);
1191 enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify;
1192 enic->msix[ENIC_MSIX_NOTIFY].devid = enic;
1193
1194 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) {
1195 err = request_irq(enic->msix_entry[i].vector,
1196 enic->msix[i].isr, 0,
1197 enic->msix[i].devname,
1198 enic->msix[i].devid);
1199 if (err) {
1200 enic_free_intr(enic);
1201 break;
1202 }
1203 enic->msix[i].requested = 1;
1204 }
1205
1206 break;
1207
1208 default:
1209 break;
1210 }
1211
1212 return err;
1213}
1214
1215static int enic_notify_set(struct enic *enic)
1216{
1217 int err;
1218
1219 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1220 case VNIC_DEV_INTR_MODE_INTX:
1221 err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY);
1222 break;
1223 case VNIC_DEV_INTR_MODE_MSIX:
1224 err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY);
1225 break;
1226 default:
1227 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
1228 break;
1229 }
1230
1231 return err;
1232}
1233
1234static void enic_notify_timer_start(struct enic *enic)
1235{
1236 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1237 case VNIC_DEV_INTR_MODE_MSI:
1238 mod_timer(&enic->notify_timer, jiffies);
1239 break;
1240 default:
1241 /* Using intr for notification for INTx/MSI-X */
1242 break;
1243 };
1244}
1245
1246/* rtnl lock is held, process context */
1247static int enic_open(struct net_device *netdev)
1248{
1249 struct enic *enic = netdev_priv(netdev);
1250 unsigned int i;
1251 int err;
1252
1253 err = enic_request_intr(enic);
1254 if (err) {
1255 printk(KERN_ERR PFX "%s: Unable to request irq.\n",
1256 netdev->name);
1257 return err;
1258 }
1259
1260 err = enic_notify_set(enic);
1261 if (err) {
1262 printk(KERN_ERR PFX
1263 "%s: Failed to alloc notify buffer, aborting.\n",
1264 netdev->name);
1265 goto err_out_free_intr;
1266 }
1267
1268 for (i = 0; i < enic->rq_count; i++) {
1269 err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
1270 if (err) {
1271 printk(KERN_ERR PFX
1272 "%s: Unable to alloc receive buffers.\n",
1273 netdev->name);
1274 goto err_out_notify_unset;
1275 }
1276 }
1277
1278 for (i = 0; i < enic->wq_count; i++)
1279 vnic_wq_enable(&enic->wq[i]);
1280 for (i = 0; i < enic->rq_count; i++)
1281 vnic_rq_enable(&enic->rq[i]);
1282
1283 enic_add_station_addr(enic);
1284 enic_set_multicast_list(netdev);
1285
1286 netif_wake_queue(netdev);
1287 napi_enable(&enic->napi);
1288 vnic_dev_enable(enic->vdev);
1289
1290 for (i = 0; i < enic->intr_count; i++)
1291 vnic_intr_unmask(&enic->intr[i]);
1292
1293 enic_notify_timer_start(enic);
1294
1295 return 0;
1296
1297err_out_notify_unset:
1298 vnic_dev_notify_unset(enic->vdev);
1299err_out_free_intr:
1300 enic_free_intr(enic);
1301
1302 return err;
1303}
1304
1305/* rtnl lock is held, process context */
1306static int enic_stop(struct net_device *netdev)
1307{
1308 struct enic *enic = netdev_priv(netdev);
1309 unsigned int i;
1310 int err;
1311
1312 del_timer_sync(&enic->notify_timer);
1313
1314 vnic_dev_disable(enic->vdev);
1315 napi_disable(&enic->napi);
1316 netif_stop_queue(netdev);
1317
1318 for (i = 0; i < enic->intr_count; i++)
1319 vnic_intr_mask(&enic->intr[i]);
1320
1321 for (i = 0; i < enic->wq_count; i++) {
1322 err = vnic_wq_disable(&enic->wq[i]);
1323 if (err)
1324 return err;
1325 }
1326 for (i = 0; i < enic->rq_count; i++) {
1327 err = vnic_rq_disable(&enic->rq[i]);
1328 if (err)
1329 return err;
1330 }
1331
1332 vnic_dev_notify_unset(enic->vdev);
1333 enic_free_intr(enic);
1334
1335 (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
1336 -1, enic_rq_service_drop, NULL);
1337 (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
1338 -1, enic_wq_service, NULL);
1339
1340 for (i = 0; i < enic->wq_count; i++)
1341 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
1342 for (i = 0; i < enic->rq_count; i++)
1343 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
1344 for (i = 0; i < enic->cq_count; i++)
1345 vnic_cq_clean(&enic->cq[i]);
1346 for (i = 0; i < enic->intr_count; i++)
1347 vnic_intr_clean(&enic->intr[i]);
1348
1349 return 0;
1350}
1351
1352static int enic_change_mtu(struct net_device *netdev, int new_mtu)
1353{
1354 struct enic *enic = netdev_priv(netdev);
1355 int running = netif_running(netdev);
1356
1357 if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
1358 return -EINVAL;
1359
1360 if (running)
1361 enic_stop(netdev);
1362
1363 netdev->mtu = new_mtu;
1364
1365 if (netdev->mtu > enic->port_mtu)
1366 printk(KERN_WARNING PFX
1367 "%s: interface MTU (%d) set higher "
1368 "than port MTU (%d)\n",
1369 netdev->name, netdev->mtu, enic->port_mtu);
1370
1371 if (running)
1372 enic_open(netdev);
1373
1374 return 0;
1375}
1376
1377#ifdef CONFIG_NET_POLL_CONTROLLER
1378static void enic_poll_controller(struct net_device *netdev)
1379{
1380 struct enic *enic = netdev_priv(netdev);
1381 struct vnic_dev *vdev = enic->vdev;
1382
1383 switch (vnic_dev_get_intr_mode(vdev)) {
1384 case VNIC_DEV_INTR_MODE_MSIX:
1385 enic_isr_msix_rq(enic->pdev->irq, enic);
1386 enic_isr_msix_wq(enic->pdev->irq, enic);
1387 break;
1388 case VNIC_DEV_INTR_MODE_MSI:
1389 enic_isr_msi(enic->pdev->irq, enic);
1390 break;
1391 case VNIC_DEV_INTR_MODE_INTX:
1392 enic_isr_legacy(enic->pdev->irq, netdev);
1393 break;
1394 default:
1395 break;
1396 }
1397}
1398#endif
1399
1400static int enic_dev_wait(struct vnic_dev *vdev,
1401 int (*start)(struct vnic_dev *, int),
1402 int (*finished)(struct vnic_dev *, int *),
1403 int arg)
1404{
1405 unsigned long time;
1406 int done;
1407 int err;
1408
1409 BUG_ON(in_interrupt());
1410
1411 err = start(vdev, arg);
1412 if (err)
1413 return err;
1414
1415 /* Wait for func to complete...2 seconds max
1416 */
1417
1418 time = jiffies + (HZ * 2);
1419 do {
1420
1421 err = finished(vdev, &done);
1422 if (err)
1423 return err;
1424
1425 if (done)
1426 return 0;
1427
1428 schedule_timeout_uninterruptible(HZ / 10);
1429
1430 } while (time_after(time, jiffies));
1431
1432 return -ETIMEDOUT;
1433}
1434
1435static int enic_dev_open(struct enic *enic)
1436{
1437 int err;
1438
1439 err = enic_dev_wait(enic->vdev, vnic_dev_open,
1440 vnic_dev_open_done, 0);
1441 if (err)
1442 printk(KERN_ERR PFX
1443 "vNIC device open failed, err %d.\n", err);
1444
1445 return err;
1446}
1447
1448static int enic_dev_soft_reset(struct enic *enic)
1449{
1450 int err;
1451
1452 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset,
1453 vnic_dev_soft_reset_done, 0);
1454 if (err)
1455 printk(KERN_ERR PFX
1456 "vNIC soft reset failed, err %d.\n", err);
1457
1458 return err;
1459}
1460
1461static void enic_reset(struct work_struct *work)
1462{
1463 struct enic *enic = container_of(work, struct enic, reset);
1464
1465 if (!netif_running(enic->netdev))
1466 return;
1467
1468 rtnl_lock();
1469
1470 spin_lock(&enic->devcmd_lock);
1471 vnic_dev_hang_notify(enic->vdev);
1472 spin_unlock(&enic->devcmd_lock);
1473
1474 enic_stop(enic->netdev);
1475 enic_dev_soft_reset(enic);
1476 enic_reset_mcaddrs(enic);
1477 enic_init_vnic_resources(enic);
1478 enic_open(enic->netdev);
1479
1480 rtnl_unlock();
1481}
1482
1483static int enic_set_intr_mode(struct enic *enic)
1484{
1485 unsigned int n = ARRAY_SIZE(enic->rq);
1486 unsigned int m = ARRAY_SIZE(enic->wq);
1487 unsigned int i;
1488
1489 /* Set interrupt mode (INTx, MSI, MSI-X) depending
1490 * system capabilities.
1491 *
1492 * Try MSI-X first
1493 *
1494 * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs
1495 * (the second to last INTR is used for WQ/RQ errors)
1496 * (the last INTR is used for notifications)
1497 */
1498
1499 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2);
1500 for (i = 0; i < n + m + 2; i++)
1501 enic->msix_entry[i].entry = i;
1502
1503 if (enic->config.intr_mode < 1 &&
1504 enic->rq_count >= n &&
1505 enic->wq_count >= m &&
1506 enic->cq_count >= n + m &&
1507 enic->intr_count >= n + m + 2 &&
1508 !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
1509
1510 enic->rq_count = n;
1511 enic->wq_count = m;
1512 enic->cq_count = n + m;
1513 enic->intr_count = n + m + 2;
1514
1515 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX);
1516
1517 return 0;
1518 }
1519
1520 /* Next try MSI
1521 *
1522 * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR
1523 */
1524
1525 if (enic->config.intr_mode < 2 &&
1526 enic->rq_count >= 1 &&
1527 enic->wq_count >= 1 &&
1528 enic->cq_count >= 2 &&
1529 enic->intr_count >= 1 &&
1530 !pci_enable_msi(enic->pdev)) {
1531
1532 enic->rq_count = 1;
1533 enic->wq_count = 1;
1534 enic->cq_count = 2;
1535 enic->intr_count = 1;
1536
1537 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI);
1538
1539 return 0;
1540 }
1541
1542 /* Next try INTx
1543 *
1544 * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs
1545 * (the first INTR is used for WQ/RQ)
1546 * (the second INTR is used for WQ/RQ errors)
1547 * (the last INTR is used for notifications)
1548 */
1549
1550 if (enic->config.intr_mode < 3 &&
1551 enic->rq_count >= 1 &&
1552 enic->wq_count >= 1 &&
1553 enic->cq_count >= 2 &&
1554 enic->intr_count >= 3) {
1555
1556 enic->rq_count = 1;
1557 enic->wq_count = 1;
1558 enic->cq_count = 2;
1559 enic->intr_count = 3;
1560
1561 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX);
1562
1563 return 0;
1564 }
1565
1566 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1567
1568 return -EINVAL;
1569}
1570
1571static void enic_clear_intr_mode(struct enic *enic)
1572{
1573 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1574 case VNIC_DEV_INTR_MODE_MSIX:
1575 pci_disable_msix(enic->pdev);
1576 break;
1577 case VNIC_DEV_INTR_MODE_MSI:
1578 pci_disable_msi(enic->pdev);
1579 break;
1580 default:
1581 break;
1582 }
1583
1584 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1585}
1586
1587static void enic_iounmap(struct enic *enic)
1588{
1589 if (enic->bar0.vaddr)
1590 iounmap(enic->bar0.vaddr);
1591}
1592
1593static int __devinit enic_probe(struct pci_dev *pdev,
1594 const struct pci_device_id *ent)
1595{
1596 struct net_device *netdev;
1597 struct enic *enic;
1598 int using_dac = 0;
1599 unsigned int i;
1600 int err;
1601
1602 const u8 rss_default_cpu = 0;
1603 const u8 rss_hash_type = 0;
1604 const u8 rss_hash_bits = 0;
1605 const u8 rss_base_cpu = 0;
1606 const u8 rss_enable = 0;
1607 const u8 tso_ipid_split_en = 0;
1608 const u8 ig_vlan_strip_en = 1;
1609
1610 /* Allocate net device structure and initialize. Private
1611 * instance data is initialized to zero.
1612 */
1613
1614 netdev = alloc_etherdev(sizeof(struct enic));
1615 if (!netdev) {
1616 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
1617 return -ENOMEM;
1618 }
1619
1620 pci_set_drvdata(pdev, netdev);
1621
1622 SET_NETDEV_DEV(netdev, &pdev->dev);
1623
1624 enic = netdev_priv(netdev);
1625 enic->netdev = netdev;
1626 enic->pdev = pdev;
1627
1628 /* Setup PCI resources
1629 */
1630
1631 err = pci_enable_device(pdev);
1632 if (err) {
1633 printk(KERN_ERR PFX
1634 "Cannot enable PCI device, aborting.\n");
1635 goto err_out_free_netdev;
1636 }
1637
1638 err = pci_request_regions(pdev, DRV_NAME);
1639 if (err) {
1640 printk(KERN_ERR PFX
1641 "Cannot request PCI regions, aborting.\n");
1642 goto err_out_disable_device;
1643 }
1644
1645 pci_set_master(pdev);
1646
1647 /* Query PCI controller on system for DMA addressing
1648 * limitation for the device. Try 40-bit first, and
1649 * fail to 32-bit.
1650 */
1651
1652 err = pci_set_dma_mask(pdev, DMA_40BIT_MASK);
1653 if (err) {
1654 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1655 if (err) {
1656 printk(KERN_ERR PFX
1657 "No usable DMA configuration, aborting.\n");
1658 goto err_out_release_regions;
1659 }
1660 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1661 if (err) {
1662 printk(KERN_ERR PFX
1663 "Unable to obtain 32-bit DMA "
1664 "for consistent allocations, aborting.\n");
1665 goto err_out_release_regions;
1666 }
1667 } else {
1668 err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK);
1669 if (err) {
1670 printk(KERN_ERR PFX
1671 "Unable to obtain 40-bit DMA "
1672 "for consistent allocations, aborting.\n");
1673 goto err_out_release_regions;
1674 }
1675 using_dac = 1;
1676 }
1677
1678 /* Map vNIC resources from BAR0
1679 */
1680
1681 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1682 printk(KERN_ERR PFX
1683 "BAR0 not memory-map'able, aborting.\n");
1684 err = -ENODEV;
1685 goto err_out_release_regions;
1686 }
1687
1688 enic->bar0.vaddr = pci_iomap(pdev, 0, enic->bar0.len);
1689 enic->bar0.bus_addr = pci_resource_start(pdev, 0);
1690 enic->bar0.len = pci_resource_len(pdev, 0);
1691
1692 if (!enic->bar0.vaddr) {
1693 printk(KERN_ERR PFX
1694 "Cannot memory-map BAR0 res hdr, aborting.\n");
1695 err = -ENODEV;
1696 goto err_out_release_regions;
1697 }
1698
1699 /* Register vNIC device
1700 */
1701
1702 enic->vdev = vnic_dev_register(NULL, enic, pdev, &enic->bar0);
1703 if (!enic->vdev) {
1704 printk(KERN_ERR PFX
1705 "vNIC registration failed, aborting.\n");
1706 err = -ENODEV;
1707 goto err_out_iounmap;
1708 }
1709
1710 /* Issue device open to get device in known state
1711 */
1712
1713 err = enic_dev_open(enic);
1714 if (err) {
1715 printk(KERN_ERR PFX
1716 "vNIC dev open failed, aborting.\n");
1717 goto err_out_vnic_unregister;
1718 }
1719
1720 /* Issue device init to initialize the vnic-to-switch link.
1721 * We'll start with carrier off and wait for link UP
1722 * notification later to turn on carrier. We don't need
1723 * to wait here for the vnic-to-switch link initialization
1724 * to complete; link UP notification is the indication that
1725 * the process is complete.
1726 */
1727
1728 netif_carrier_off(netdev);
1729
1730 err = vnic_dev_init(enic->vdev, 0);
1731 if (err) {
1732 printk(KERN_ERR PFX
1733 "vNIC dev init failed, aborting.\n");
1734 goto err_out_dev_close;
1735 }
1736
1737 /* Get vNIC configuration
1738 */
1739
1740 err = enic_get_vnic_config(enic);
1741 if (err) {
1742 printk(KERN_ERR PFX
1743 "Get vNIC configuration failed, aborting.\n");
1744 goto err_out_dev_close;
1745 }
1746
1747 /* Get available resource counts
1748 */
1749
1750 enic_get_res_counts(enic);
1751
1752 /* Set interrupt mode based on resource counts and system
1753 * capabilities
1754 */
1755
1756 err = enic_set_intr_mode(enic);
1757 if (err) {
1758 printk(KERN_ERR PFX
1759 "Failed to set intr mode, aborting.\n");
1760 goto err_out_dev_close;
1761 }
1762
1763 /* Allocate and configure vNIC resources
1764 */
1765
1766 err = enic_alloc_vnic_resources(enic);
1767 if (err) {
1768 printk(KERN_ERR PFX
1769 "Failed to alloc vNIC resources, aborting.\n");
1770 goto err_out_free_vnic_resources;
1771 }
1772
1773 enic_init_vnic_resources(enic);
1774
1775 /* Enable VLAN tag stripping. RSS not enabled (yet).
1776 */
1777
1778 err = enic_set_nic_cfg(enic,
1779 rss_default_cpu, rss_hash_type,
1780 rss_hash_bits, rss_base_cpu,
1781 rss_enable, tso_ipid_split_en,
1782 ig_vlan_strip_en);
1783 if (err) {
1784 printk(KERN_ERR PFX
1785 "Failed to config nic, aborting.\n");
1786 goto err_out_free_vnic_resources;
1787 }
1788
1789 /* Setup notification timer, HW reset task, and locks
1790 */
1791
1792 init_timer(&enic->notify_timer);
1793 enic->notify_timer.function = enic_notify_timer;
1794 enic->notify_timer.data = (unsigned long)enic;
1795
1796 INIT_WORK(&enic->reset, enic_reset);
1797
1798 for (i = 0; i < enic->wq_count; i++)
1799 spin_lock_init(&enic->wq_lock[i]);
1800
1801 spin_lock_init(&enic->devcmd_lock);
1802
1803 /* Register net device
1804 */
1805
1806 enic->port_mtu = enic->config.mtu;
1807 (void)enic_change_mtu(netdev, enic->port_mtu);
1808
1809 err = enic_set_mac_addr(netdev, enic->mac_addr);
1810 if (err) {
1811 printk(KERN_ERR PFX
1812 "Invalid MAC address, aborting.\n");
1813 goto err_out_free_vnic_resources;
1814 }
1815
1816 netdev->open = enic_open;
1817 netdev->stop = enic_stop;
1818 netdev->hard_start_xmit = enic_hard_start_xmit;
1819 netdev->get_stats = enic_get_stats;
1820 netdev->set_multicast_list = enic_set_multicast_list;
1821 netdev->change_mtu = enic_change_mtu;
1822 netdev->vlan_rx_register = enic_vlan_rx_register;
1823 netdev->vlan_rx_add_vid = enic_vlan_rx_add_vid;
1824 netdev->vlan_rx_kill_vid = enic_vlan_rx_kill_vid;
1825 netdev->tx_timeout = enic_tx_timeout;
1826 netdev->watchdog_timeo = 2 * HZ;
1827 netdev->ethtool_ops = &enic_ethtool_ops;
1828#ifdef CONFIG_NET_POLL_CONTROLLER
1829 netdev->poll_controller = enic_poll_controller;
1830#endif
1831
1832 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1833 default:
1834 netif_napi_add(netdev, &enic->napi, enic_poll, 64);
1835 break;
1836 case VNIC_DEV_INTR_MODE_MSIX:
1837 netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64);
1838 break;
1839 }
1840
1841 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1842 if (ENIC_SETTING(enic, TXCSUM))
1843 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1844 if (ENIC_SETTING(enic, TSO))
1845 netdev->features |= NETIF_F_TSO |
1846 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
1847 if (using_dac)
1848 netdev->features |= NETIF_F_HIGHDMA;
1849
1850
1851 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
1852
1853 if (ENIC_SETTING(enic, LRO)) {
1854 enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
1855 enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC;
1856 enic->lro_mgr.lro_arr = enic->lro_desc;
1857 enic->lro_mgr.get_skb_header = enic_get_skb_header;
1858 enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1859 enic->lro_mgr.dev = netdev;
1860 enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
1861 enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1862 }
1863
1864 err = register_netdev(netdev);
1865 if (err) {
1866 printk(KERN_ERR PFX
1867 "Cannot register net device, aborting.\n");
1868 goto err_out_free_vnic_resources;
1869 }
1870
1871 return 0;
1872
1873err_out_free_vnic_resources:
1874 enic_free_vnic_resources(enic);
1875err_out_dev_close:
1876 vnic_dev_close(enic->vdev);
1877err_out_vnic_unregister:
1878 enic_clear_intr_mode(enic);
1879 vnic_dev_unregister(enic->vdev);
1880err_out_iounmap:
1881 enic_iounmap(enic);
1882err_out_release_regions:
1883 pci_release_regions(pdev);
1884err_out_disable_device:
1885 pci_disable_device(pdev);
1886err_out_free_netdev:
1887 pci_set_drvdata(pdev, NULL);
1888 free_netdev(netdev);
1889
1890 return err;
1891}
1892
1893static void __devexit enic_remove(struct pci_dev *pdev)
1894{
1895 struct net_device *netdev = pci_get_drvdata(pdev);
1896
1897 if (netdev) {
1898 struct enic *enic = netdev_priv(netdev);
1899
1900 flush_scheduled_work();
1901 unregister_netdev(netdev);
1902 enic_free_vnic_resources(enic);
1903 vnic_dev_close(enic->vdev);
1904 enic_clear_intr_mode(enic);
1905 vnic_dev_unregister(enic->vdev);
1906 enic_iounmap(enic);
1907 pci_release_regions(pdev);
1908 pci_disable_device(pdev);
1909 pci_set_drvdata(pdev, NULL);
1910 free_netdev(netdev);
1911 }
1912}
1913
1914static struct pci_driver enic_driver = {
1915 .name = DRV_NAME,
1916 .id_table = enic_id_table,
1917 .probe = enic_probe,
1918 .remove = __devexit_p(enic_remove),
1919};
1920
1921static int __init enic_init_module(void)
1922{
1923 printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
1924
1925 return pci_register_driver(&enic_driver);
1926}
1927
1928static void __exit enic_cleanup_module(void)
1929{
1930 pci_unregister_driver(&enic_driver);
1931}
1932
1933module_init(enic_init_module);
1934module_exit(enic_cleanup_module);
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
new file mode 100644
index 000000000000..95184b9108ef
--- /dev/null
+++ b/drivers/net/enic/enic_res.c
@@ -0,0 +1,370 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/netdevice.h>
25
26#include "wq_enet_desc.h"
27#include "rq_enet_desc.h"
28#include "cq_enet_desc.h"
29#include "vnic_resource.h"
30#include "vnic_enet.h"
31#include "vnic_dev.h"
32#include "vnic_wq.h"
33#include "vnic_rq.h"
34#include "vnic_cq.h"
35#include "vnic_intr.h"
36#include "vnic_stats.h"
37#include "vnic_nic.h"
38#include "vnic_rss.h"
39#include "enic_res.h"
40#include "enic.h"
41
42int enic_get_vnic_config(struct enic *enic)
43{
44 struct vnic_enet_config *c = &enic->config;
45 int err;
46
47 err = vnic_dev_mac_addr(enic->vdev, enic->mac_addr);
48 if (err) {
49 printk(KERN_ERR PFX "Error getting MAC addr, %d\n", err);
50 return err;
51 }
52
53#define GET_CONFIG(m) \
54 do { \
55 err = vnic_dev_spec(enic->vdev, \
56 offsetof(struct vnic_enet_config, m), \
57 sizeof(c->m), &c->m); \
58 if (err) { \
59 printk(KERN_ERR PFX \
60 "Error getting %s, %d\n", #m, err); \
61 return err; \
62 } \
63 } while (0)
64
65 GET_CONFIG(flags);
66 GET_CONFIG(wq_desc_count);
67 GET_CONFIG(rq_desc_count);
68 GET_CONFIG(mtu);
69 GET_CONFIG(intr_timer);
70 GET_CONFIG(intr_timer_type);
71 GET_CONFIG(intr_mode);
72
73 c->wq_desc_count =
74 min_t(u32, ENIC_MAX_WQ_DESCS,
75 max_t(u32, ENIC_MIN_WQ_DESCS,
76 c->wq_desc_count));
77 c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
78
79 c->rq_desc_count =
80 min_t(u32, ENIC_MAX_RQ_DESCS,
81 max_t(u32, ENIC_MIN_RQ_DESCS,
82 c->rq_desc_count));
83 c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
84
85 if (c->mtu == 0)
86 c->mtu = 1500;
87 c->mtu = min_t(u16, ENIC_MAX_MTU,
88 max_t(u16, ENIC_MIN_MTU,
89 c->mtu));
90
91 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
92
93 printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
94 "wq/rq %d/%d\n",
95 enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
96 enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
97 c->wq_desc_count, c->rq_desc_count);
98 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
99 "intr timer %d\n",
100 c->mtu, ENIC_SETTING(enic, TXCSUM),
101 ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
102 ENIC_SETTING(enic, LRO), c->intr_timer);
103
104 return 0;
105}
106
107void enic_add_station_addr(struct enic *enic)
108{
109 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
110}
111
112void enic_add_multicast_addr(struct enic *enic, u8 *addr)
113{
114 vnic_dev_add_addr(enic->vdev, addr);
115}
116
117void enic_del_multicast_addr(struct enic *enic, u8 *addr)
118{
119 vnic_dev_del_addr(enic->vdev, addr);
120}
121
122void enic_add_vlan(struct enic *enic, u16 vlanid)
123{
124 u64 a0 = vlanid, a1 = 0;
125 int wait = 1000;
126 int err;
127
128 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_ADD, &a0, &a1, wait);
129 if (err)
130 printk(KERN_ERR PFX "Can't add vlan id, %d\n", err);
131}
132
133void enic_del_vlan(struct enic *enic, u16 vlanid)
134{
135 u64 a0 = vlanid, a1 = 0;
136 int wait = 1000;
137 int err;
138
139 err = vnic_dev_cmd(enic->vdev, CMD_VLAN_DEL, &a0, &a1, wait);
140 if (err)
141 printk(KERN_ERR PFX "Can't delete vlan id, %d\n", err);
142}
143
144int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
145 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
146 u8 ig_vlan_strip_en)
147{
148 u64 a0, a1;
149 u32 nic_cfg;
150 int wait = 1000;
151
152 vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
153 rss_hash_type, rss_hash_bits, rss_base_cpu,
154 rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
155
156 a0 = nic_cfg;
157 a1 = 0;
158
159 return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
160}
161
162void enic_free_vnic_resources(struct enic *enic)
163{
164 unsigned int i;
165
166 for (i = 0; i < enic->wq_count; i++)
167 vnic_wq_free(&enic->wq[i]);
168 for (i = 0; i < enic->rq_count; i++)
169 vnic_rq_free(&enic->rq[i]);
170 for (i = 0; i < enic->cq_count; i++)
171 vnic_cq_free(&enic->cq[i]);
172 for (i = 0; i < enic->intr_count; i++)
173 vnic_intr_free(&enic->intr[i]);
174}
175
176void enic_get_res_counts(struct enic *enic)
177{
178 enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ);
179 enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ);
180 enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ);
181 enic->intr_count = vnic_dev_get_res_count(enic->vdev,
182 RES_TYPE_INTR_CTRL);
183
184 printk(KERN_INFO PFX "vNIC resources avail: "
185 "wq %d rq %d cq %d intr %d\n",
186 enic->wq_count, enic->rq_count,
187 enic->cq_count, enic->intr_count);
188}
189
190void enic_init_vnic_resources(struct enic *enic)
191{
192 enum vnic_dev_intr_mode intr_mode;
193 unsigned int mask_on_assertion;
194 unsigned int interrupt_offset;
195 unsigned int error_interrupt_enable;
196 unsigned int error_interrupt_offset;
197 unsigned int cq_index;
198 unsigned int i;
199
200 intr_mode = vnic_dev_get_intr_mode(enic->vdev);
201
202 /* Init RQ/WQ resources.
203 *
204 * RQ[0 - n-1] point to CQ[0 - n-1]
205 * WQ[0 - m-1] point to CQ[n - n+m-1]
206 *
207 * Error interrupt is not enabled for MSI.
208 */
209
210 switch (intr_mode) {
211 case VNIC_DEV_INTR_MODE_INTX:
212 case VNIC_DEV_INTR_MODE_MSIX:
213 error_interrupt_enable = 1;
214 error_interrupt_offset = enic->intr_count - 2;
215 break;
216 default:
217 error_interrupt_enable = 0;
218 error_interrupt_offset = 0;
219 break;
220 }
221
222 for (i = 0; i < enic->rq_count; i++) {
223 cq_index = i;
224 vnic_rq_init(&enic->rq[i],
225 cq_index,
226 error_interrupt_enable,
227 error_interrupt_offset);
228 }
229
230 for (i = 0; i < enic->wq_count; i++) {
231 cq_index = enic->rq_count + i;
232 vnic_wq_init(&enic->wq[i],
233 cq_index,
234 error_interrupt_enable,
235 error_interrupt_offset);
236 }
237
238 /* Init CQ resources
239 *
240 * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI
241 * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X
242 */
243
244 for (i = 0; i < enic->cq_count; i++) {
245
246 switch (intr_mode) {
247 case VNIC_DEV_INTR_MODE_MSIX:
248 interrupt_offset = i;
249 break;
250 default:
251 interrupt_offset = 0;
252 break;
253 }
254
255 vnic_cq_init(&enic->cq[i],
256 0 /* flow_control_enable */,
257 1 /* color_enable */,
258 0 /* cq_head */,
259 0 /* cq_tail */,
260 1 /* cq_tail_color */,
261 1 /* interrupt_enable */,
262 1 /* cq_entry_enable */,
263 0 /* cq_message_enable */,
264 interrupt_offset,
265 0 /* cq_message_addr */);
266 }
267
268 /* Init INTR resources
269 *
270 * mask_on_assertion is not used for INTx due to the level-
271 * triggered nature of INTx
272 */
273
274 switch (intr_mode) {
275 case VNIC_DEV_INTR_MODE_MSI:
276 case VNIC_DEV_INTR_MODE_MSIX:
277 mask_on_assertion = 1;
278 break;
279 default:
280 mask_on_assertion = 0;
281 break;
282 }
283
284 for (i = 0; i < enic->intr_count; i++) {
285 vnic_intr_init(&enic->intr[i],
286 enic->config.intr_timer,
287 enic->config.intr_timer_type,
288 mask_on_assertion);
289 }
290
291 /* Clear LIF stats
292 */
293
294 vnic_dev_stats_clear(enic->vdev);
295}
296
297int enic_alloc_vnic_resources(struct enic *enic)
298{
299 enum vnic_dev_intr_mode intr_mode;
300 unsigned int i;
301 int err;
302
303 intr_mode = vnic_dev_get_intr_mode(enic->vdev);
304
305 printk(KERN_INFO PFX "vNIC resources used: "
306 "wq %d rq %d cq %d intr %d intr mode %s\n",
307 enic->wq_count, enic->rq_count,
308 enic->cq_count, enic->intr_count,
309 intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
310 intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
311 intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" :
312 "unknown"
313 );
314
315 /* Allocate queue resources
316 */
317
318 for (i = 0; i < enic->wq_count; i++) {
319 err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i,
320 enic->config.wq_desc_count,
321 sizeof(struct wq_enet_desc));
322 if (err)
323 goto err_out_cleanup;
324 }
325
326 for (i = 0; i < enic->rq_count; i++) {
327 err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i,
328 enic->config.rq_desc_count,
329 sizeof(struct rq_enet_desc));
330 if (err)
331 goto err_out_cleanup;
332 }
333
334 for (i = 0; i < enic->cq_count; i++) {
335 if (i < enic->rq_count)
336 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
337 enic->config.rq_desc_count,
338 sizeof(struct cq_enet_rq_desc));
339 else
340 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i,
341 enic->config.wq_desc_count,
342 sizeof(struct cq_enet_wq_desc));
343 if (err)
344 goto err_out_cleanup;
345 }
346
347 for (i = 0; i < enic->intr_count; i++) {
348 err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
349 if (err)
350 goto err_out_cleanup;
351 }
352
353 /* Hook remaining resource
354 */
355
356 enic->legacy_pba = vnic_dev_get_res(enic->vdev,
357 RES_TYPE_INTR_PBA_LEGACY, 0);
358 if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
359 printk(KERN_ERR PFX "Failed to hook legacy pba resource\n");
360 err = -ENODEV;
361 goto err_out_cleanup;
362 }
363
364 return 0;
365
366err_out_cleanup:
367 enic_free_vnic_resources(enic);
368
369 return err;
370}
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
new file mode 100644
index 000000000000..68534a29b7ac
--- /dev/null
+++ b/drivers/net/enic/enic_res.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _ENIC_RES_H_
21#define _ENIC_RES_H_
22
23#include "wq_enet_desc.h"
24#include "rq_enet_desc.h"
25#include "vnic_wq.h"
26#include "vnic_rq.h"
27
28#define ENIC_MIN_WQ_DESCS 64
29#define ENIC_MAX_WQ_DESCS 4096
30#define ENIC_MIN_RQ_DESCS 64
31#define ENIC_MAX_RQ_DESCS 4096
32
33#define ENIC_MIN_MTU 576 /* minimum for IPv4 */
34#define ENIC_MAX_MTU 9000
35
36#define ENIC_MULTICAST_PERFECT_FILTERS 32
37
38#define ENIC_NON_TSO_MAX_DESC 16
39
40#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
41
42static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
43 void *os_buf, dma_addr_t dma_addr, unsigned int len,
44 unsigned int mss_or_csum_offset, unsigned int hdr_len,
45 int vlan_tag_insert, unsigned int vlan_tag,
46 int offload_mode, int cq_entry, int sop, int eop)
47{
48 struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
49
50 wq_enet_desc_enc(desc,
51 (u64)dma_addr | VNIC_PADDR_TARGET,
52 (u16)len,
53 (u16)mss_or_csum_offset,
54 (u16)hdr_len, (u8)offload_mode,
55 (u8)eop, (u8)cq_entry,
56 0, /* fcoe_encap */
57 (u8)vlan_tag_insert,
58 (u16)vlan_tag,
59 0 /* loopback */);
60
61 wmb();
62
63 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
64}
65
66static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
67 void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop)
68{
69 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
70 0, 0, 0, 0, 0,
71 eop, 0 /* !SOP */, eop);
72}
73
74static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf,
75 dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert,
76 unsigned int vlan_tag, int eop)
77{
78 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
79 0, 0, vlan_tag_insert, vlan_tag,
80 WQ_ENET_OFFLOAD_MODE_CSUM,
81 eop, 1 /* SOP */, eop);
82}
83
84static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq,
85 void *os_buf, dma_addr_t dma_addr, unsigned int len,
86 int ip_csum, int tcpudp_csum, int vlan_tag_insert,
87 unsigned int vlan_tag, int eop)
88{
89 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
90 (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0),
91 0, vlan_tag_insert, vlan_tag,
92 WQ_ENET_OFFLOAD_MODE_CSUM,
93 eop, 1 /* SOP */, eop);
94}
95
96static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq,
97 void *os_buf, dma_addr_t dma_addr, unsigned int len,
98 unsigned int csum_offset, unsigned int hdr_len,
99 int vlan_tag_insert, unsigned int vlan_tag, int eop)
100{
101 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
102 csum_offset, hdr_len, vlan_tag_insert, vlan_tag,
103 WQ_ENET_OFFLOAD_MODE_CSUM_L4,
104 eop, 1 /* SOP */, eop);
105}
106
107static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
108 void *os_buf, dma_addr_t dma_addr, unsigned int len,
109 unsigned int mss, unsigned int hdr_len, int vlan_tag_insert,
110 unsigned int vlan_tag, int eop)
111{
112 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len,
113 mss, hdr_len, vlan_tag_insert, vlan_tag,
114 WQ_ENET_OFFLOAD_MODE_TSO,
115 eop, 1 /* SOP */, eop);
116}
117
118static inline void enic_queue_rq_desc(struct vnic_rq *rq,
119 void *os_buf, unsigned int os_buf_index,
120 dma_addr_t dma_addr, unsigned int len)
121{
122 struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
123 u8 type = os_buf_index ?
124 RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
125
126 rq_enet_desc_enc(desc,
127 (u64)dma_addr | VNIC_PADDR_TARGET,
128 type, (u16)len);
129
130 wmb();
131
132 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
133}
134
135struct enic;
136
137int enic_get_vnic_config(struct enic *);
138void enic_add_station_addr(struct enic *enic);
139void enic_add_multicast_addr(struct enic *enic, u8 *addr);
140void enic_del_multicast_addr(struct enic *enic, u8 *addr);
141void enic_add_vlan(struct enic *enic, u16 vlanid);
142void enic_del_vlan(struct enic *enic, u16 vlanid);
143int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
144 u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
145 u8 ig_vlan_strip_en);
146void enic_get_res_counts(struct enic *enic);
147void enic_init_vnic_resources(struct enic *enic);
148int enic_alloc_vnic_resources(struct enic *);
149void enic_free_vnic_resources(struct enic *);
150
151#endif /* _ENIC_RES_H_ */
diff --git a/drivers/net/enic/rq_enet_desc.h b/drivers/net/enic/rq_enet_desc.h
new file mode 100644
index 000000000000..a06e649010ce
--- /dev/null
+++ b/drivers/net/enic/rq_enet_desc.h
@@ -0,0 +1,60 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _RQ_ENET_DESC_H_
21#define _RQ_ENET_DESC_H_
22
23/* Ethernet receive queue descriptor: 16B */
24struct rq_enet_desc {
25 __le64 address;
26 __le16 length_type;
27 u8 reserved[6];
28};
29
30enum rq_enet_type_types {
31 RQ_ENET_TYPE_ONLY_SOP = 0,
32 RQ_ENET_TYPE_NOT_SOP = 1,
33 RQ_ENET_TYPE_RESV2 = 2,
34 RQ_ENET_TYPE_RESV3 = 3,
35};
36
37#define RQ_ENET_ADDR_BITS 64
38#define RQ_ENET_LEN_BITS 14
39#define RQ_ENET_LEN_MASK ((1 << RQ_ENET_LEN_BITS) - 1)
40#define RQ_ENET_TYPE_BITS 2
41#define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
42
43static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
44 u64 address, u8 type, u16 length)
45{
46 desc->address = cpu_to_le64(address);
47 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) |
48 ((type & RQ_ENET_TYPE_MASK) << RQ_ENET_LEN_BITS));
49}
50
51static inline void rq_enet_desc_dec(struct rq_enet_desc *desc,
52 u64 *address, u8 *type, u16 *length)
53{
54 *address = le64_to_cpu(desc->address);
55 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK;
56 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) &
57 RQ_ENET_TYPE_MASK);
58}
59
60#endif /* _RQ_ENET_DESC_H_ */
diff --git a/drivers/net/enic/vnic_cq.c b/drivers/net/enic/vnic_cq.c
new file mode 100644
index 000000000000..020ae6c3f3d9
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26#include "vnic_cq.h"
27
28void vnic_cq_free(struct vnic_cq *cq)
29{
30 vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
31
32 cq->ctrl = NULL;
33}
34
35int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
36 unsigned int desc_count, unsigned int desc_size)
37{
38 int err;
39
40 cq->index = index;
41 cq->vdev = vdev;
42
43 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
44 if (!cq->ctrl) {
45 printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
46 return -EINVAL;
47 }
48
49 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
50 if (err)
51 return err;
52
53 return 0;
54}
55
56void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
57 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
58 unsigned int cq_tail_color, unsigned int interrupt_enable,
59 unsigned int cq_entry_enable, unsigned int cq_message_enable,
60 unsigned int interrupt_offset, u64 cq_message_addr)
61{
62 u64 paddr;
63
64 paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
65 writeq(paddr, &cq->ctrl->ring_base);
66 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
67 iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
68 iowrite32(color_enable, &cq->ctrl->color_enable);
69 iowrite32(cq_head, &cq->ctrl->cq_head);
70 iowrite32(cq_tail, &cq->ctrl->cq_tail);
71 iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
72 iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
73 iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
74 iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
75 iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
76 writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
77}
78
79void vnic_cq_clean(struct vnic_cq *cq)
80{
81 cq->to_clean = 0;
82 cq->last_color = 0;
83
84 iowrite32(0, &cq->ctrl->cq_head);
85 iowrite32(0, &cq->ctrl->cq_tail);
86 iowrite32(1, &cq->ctrl->cq_tail_color);
87
88 vnic_dev_clear_desc_ring(&cq->ring);
89}
diff --git a/drivers/net/enic/vnic_cq.h b/drivers/net/enic/vnic_cq.h
new file mode 100644
index 000000000000..114763cbc2f8
--- /dev/null
+++ b/drivers/net/enic/vnic_cq.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_CQ_H_
21#define _VNIC_CQ_H_
22
23#include "cq_desc.h"
24#include "vnic_dev.h"
25
26/* Completion queue control */
27struct vnic_cq_ctrl {
28 u64 ring_base; /* 0x00 */
29 u32 ring_size; /* 0x08 */
30 u32 pad0;
31 u32 flow_control_enable; /* 0x10 */
32 u32 pad1;
33 u32 color_enable; /* 0x18 */
34 u32 pad2;
35 u32 cq_head; /* 0x20 */
36 u32 pad3;
37 u32 cq_tail; /* 0x28 */
38 u32 pad4;
39 u32 cq_tail_color; /* 0x30 */
40 u32 pad5;
41 u32 interrupt_enable; /* 0x38 */
42 u32 pad6;
43 u32 cq_entry_enable; /* 0x40 */
44 u32 pad7;
45 u32 cq_message_enable; /* 0x48 */
46 u32 pad8;
47 u32 interrupt_offset; /* 0x50 */
48 u32 pad9;
49 u64 cq_message_addr; /* 0x58 */
50 u32 pad10;
51};
52
53struct vnic_cq {
54 unsigned int index;
55 struct vnic_dev *vdev;
56 struct vnic_cq_ctrl __iomem *ctrl; /* memory-mapped */
57 struct vnic_dev_ring ring;
58 unsigned int to_clean;
59 unsigned int last_color;
60};
61
62static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
63 unsigned int work_to_do,
64 int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
65 u8 type, u16 q_number, u16 completed_index, void *opaque),
66 void *opaque)
67{
68 struct cq_desc *cq_desc;
69 unsigned int work_done = 0;
70 u16 q_number, completed_index;
71 u8 type, color;
72
73 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
74 cq->ring.desc_size * cq->to_clean);
75 cq_desc_dec(cq_desc, &type, &color,
76 &q_number, &completed_index);
77
78 while (color != cq->last_color) {
79
80 if ((*q_service)(cq->vdev, cq_desc, type,
81 q_number, completed_index, opaque))
82 break;
83
84 cq->to_clean++;
85 if (cq->to_clean == cq->ring.desc_count) {
86 cq->to_clean = 0;
87 cq->last_color = cq->last_color ? 0 : 1;
88 }
89
90 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
91 cq->ring.desc_size * cq->to_clean);
92 cq_desc_dec(cq_desc, &type, &color,
93 &q_number, &completed_index);
94
95 work_done++;
96 if (work_done >= work_to_do)
97 break;
98 }
99
100 return work_done;
101}
102
103void vnic_cq_free(struct vnic_cq *cq);
104int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
105 unsigned int desc_count, unsigned int desc_size);
106void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
107 unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
108 unsigned int cq_tail_color, unsigned int interrupt_enable,
109 unsigned int cq_entry_enable, unsigned int message_enable,
110 unsigned int interrupt_offset, u64 message_addr);
111void vnic_cq_clean(struct vnic_cq *cq);
112
113#endif /* _VNIC_CQ_H_ */
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
new file mode 100644
index 000000000000..4d104f5c30f9
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.c
@@ -0,0 +1,674 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25#include <linux/if_ether.h>
26
27#include "vnic_resource.h"
28#include "vnic_devcmd.h"
29#include "vnic_dev.h"
30#include "vnic_stats.h"
31
32struct vnic_res {
33 void __iomem *vaddr;
34 unsigned int count;
35};
36
37struct vnic_dev {
38 void *priv;
39 struct pci_dev *pdev;
40 struct vnic_res res[RES_TYPE_MAX];
41 enum vnic_dev_intr_mode intr_mode;
42 struct vnic_devcmd __iomem *devcmd;
43 struct vnic_devcmd_notify *notify;
44 struct vnic_devcmd_notify notify_copy;
45 dma_addr_t notify_pa;
46 u32 *linkstatus;
47 dma_addr_t linkstatus_pa;
48 struct vnic_stats *stats;
49 dma_addr_t stats_pa;
50 struct vnic_devcmd_fw_info *fw_info;
51 dma_addr_t fw_info_pa;
52};
53
54#define VNIC_MAX_RES_HDR_SIZE \
55 (sizeof(struct vnic_resource_header) + \
56 sizeof(struct vnic_resource) * RES_TYPE_MAX)
57#define VNIC_RES_STRIDE 128
58
59void *vnic_dev_priv(struct vnic_dev *vdev)
60{
61 return vdev->priv;
62}
63
64static int vnic_dev_discover_res(struct vnic_dev *vdev,
65 struct vnic_dev_bar *bar)
66{
67 struct vnic_resource_header __iomem *rh;
68 struct vnic_resource __iomem *r;
69 u8 type;
70
71 if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
72 printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
73 return -EINVAL;
74 }
75
76 rh = bar->vaddr;
77 if (!rh) {
78 printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
79 return -EINVAL;
80 }
81
82 if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
83 ioread32(&rh->version) != VNIC_RES_VERSION) {
84 printk(KERN_ERR "vNIC BAR0 res magic/version error "
85 "exp (%lx/%lx) curr (%x/%x)\n",
86 VNIC_RES_MAGIC, VNIC_RES_VERSION,
87 ioread32(&rh->magic), ioread32(&rh->version));
88 return -EINVAL;
89 }
90
91 r = (struct vnic_resource __iomem *)(rh + 1);
92
93 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
94
95 u8 bar_num = ioread8(&r->bar);
96 u32 bar_offset = ioread32(&r->bar_offset);
97 u32 count = ioread32(&r->count);
98 u32 len;
99
100 r++;
101
102 if (bar_num != 0) /* only mapping in BAR0 resources */
103 continue;
104
105 switch (type) {
106 case RES_TYPE_WQ:
107 case RES_TYPE_RQ:
108 case RES_TYPE_CQ:
109 case RES_TYPE_INTR_CTRL:
110 /* each count is stride bytes long */
111 len = count * VNIC_RES_STRIDE;
112 if (len + bar_offset > bar->len) {
113 printk(KERN_ERR "vNIC BAR0 resource %d "
114 "out-of-bounds, offset 0x%x + "
115 "size 0x%x > bar len 0x%lx\n",
116 type, bar_offset,
117 len,
118 bar->len);
119 return -EINVAL;
120 }
121 break;
122 case RES_TYPE_INTR_PBA_LEGACY:
123 case RES_TYPE_DEVCMD:
124 len = count;
125 break;
126 default:
127 continue;
128 }
129
130 vdev->res[type].count = count;
131 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
132 }
133
134 return 0;
135}
136
137unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
138 enum vnic_res_type type)
139{
140 return vdev->res[type].count;
141}
142
143void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
144 unsigned int index)
145{
146 if (!vdev->res[type].vaddr)
147 return NULL;
148
149 switch (type) {
150 case RES_TYPE_WQ:
151 case RES_TYPE_RQ:
152 case RES_TYPE_CQ:
153 case RES_TYPE_INTR_CTRL:
154 return (char __iomem *)vdev->res[type].vaddr +
155 index * VNIC_RES_STRIDE;
156 default:
157 return (char __iomem *)vdev->res[type].vaddr;
158 }
159}
160
161unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
162 unsigned int desc_count, unsigned int desc_size)
163{
164 /* The base address of the desc rings must be 512 byte aligned.
165 * Descriptor count is aligned to groups of 32 descriptors. A
166 * count of 0 means the maximum 4096 descriptors. Descriptor
167 * size is aligned to 16 bytes.
168 */
169
170 unsigned int count_align = 32;
171 unsigned int desc_align = 16;
172
173 ring->base_align = 512;
174
175 if (desc_count == 0)
176 desc_count = 4096;
177
178 ring->desc_count = ALIGN(desc_count, count_align);
179
180 ring->desc_size = ALIGN(desc_size, desc_align);
181
182 ring->size = ring->desc_count * ring->desc_size;
183 ring->size_unaligned = ring->size + ring->base_align;
184
185 return ring->size_unaligned;
186}
187
188void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
189{
190 memset(ring->descs, 0, ring->size);
191}
192
193int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
194 unsigned int desc_count, unsigned int desc_size)
195{
196 vnic_dev_desc_ring_size(ring, desc_count, desc_size);
197
198 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
199 ring->size_unaligned,
200 &ring->base_addr_unaligned);
201
202 if (!ring->descs_unaligned) {
203 printk(KERN_ERR
204 "Failed to allocate ring (size=%d), aborting\n",
205 (int)ring->size);
206 return -ENOMEM;
207 }
208
209 ring->base_addr = ALIGN(ring->base_addr_unaligned,
210 ring->base_align);
211 ring->descs = (u8 *)ring->descs_unaligned +
212 (ring->base_addr - ring->base_addr_unaligned);
213
214 vnic_dev_clear_desc_ring(ring);
215
216 ring->desc_avail = ring->desc_count - 1;
217
218 return 0;
219}
220
221void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
222{
223 if (ring->descs) {
224 pci_free_consistent(vdev->pdev,
225 ring->size_unaligned,
226 ring->descs_unaligned,
227 ring->base_addr_unaligned);
228 ring->descs = NULL;
229 }
230}
231
232int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
233 u64 *a0, u64 *a1, int wait)
234{
235 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
236 int delay;
237 u32 status;
238 int dev_cmd_err[] = {
239 /* convert from fw's version of error.h to host's version */
240 0, /* ERR_SUCCESS */
241 EINVAL, /* ERR_EINVAL */
242 EFAULT, /* ERR_EFAULT */
243 EPERM, /* ERR_EPERM */
244 EBUSY, /* ERR_EBUSY */
245 };
246 int err;
247
248 status = ioread32(&devcmd->status);
249 if (status & STAT_BUSY) {
250 printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
251 return -EBUSY;
252 }
253
254 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
255 writeq(*a0, &devcmd->args[0]);
256 writeq(*a1, &devcmd->args[1]);
257 wmb();
258 }
259
260 iowrite32(cmd, &devcmd->cmd);
261
262 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
263 return 0;
264
265 for (delay = 0; delay < wait; delay++) {
266
267 udelay(100);
268
269 status = ioread32(&devcmd->status);
270 if (!(status & STAT_BUSY)) {
271
272 if (status & STAT_ERROR) {
273 err = dev_cmd_err[(int)readq(&devcmd->args[0])];
274 printk(KERN_ERR "Error %d devcmd %d\n",
275 err, _CMD_N(cmd));
276 return -err;
277 }
278
279 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
280 rmb();
281 *a0 = readq(&devcmd->args[0]);
282 *a1 = readq(&devcmd->args[1]);
283 }
284
285 return 0;
286 }
287 }
288
289 printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
290 return -ETIMEDOUT;
291}
292
293int vnic_dev_fw_info(struct vnic_dev *vdev,
294 struct vnic_devcmd_fw_info **fw_info)
295{
296 u64 a0, a1 = 0;
297 int wait = 1000;
298 int err = 0;
299
300 if (!vdev->fw_info) {
301 vdev->fw_info = pci_alloc_consistent(vdev->pdev,
302 sizeof(struct vnic_devcmd_fw_info),
303 &vdev->fw_info_pa);
304 if (!vdev->fw_info)
305 return -ENOMEM;
306
307 a0 = vdev->fw_info_pa;
308
309 /* only get fw_info once and cache it */
310 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
311 }
312
313 *fw_info = vdev->fw_info;
314
315 return err;
316}
317
318int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
319 void *value)
320{
321 u64 a0, a1;
322 int wait = 1000;
323 int err;
324
325 a0 = offset;
326 a1 = size;
327
328 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
329
330 switch (size) {
331 case 1: *(u8 *)value = (u8)a0; break;
332 case 2: *(u16 *)value = (u16)a0; break;
333 case 4: *(u32 *)value = (u32)a0; break;
334 case 8: *(u64 *)value = a0; break;
335 default: BUG(); break;
336 }
337
338 return err;
339}
340
341int vnic_dev_stats_clear(struct vnic_dev *vdev)
342{
343 u64 a0 = 0, a1 = 0;
344 int wait = 1000;
345 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
346}
347
348int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
349{
350 u64 a0, a1;
351 int wait = 1000;
352
353 if (!vdev->stats) {
354 vdev->stats = pci_alloc_consistent(vdev->pdev,
355 sizeof(struct vnic_stats), &vdev->stats_pa);
356 if (!vdev->stats)
357 return -ENOMEM;
358 }
359
360 *stats = vdev->stats;
361 a0 = vdev->stats_pa;
362 a1 = sizeof(struct vnic_stats);
363
364 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
365}
366
367int vnic_dev_close(struct vnic_dev *vdev)
368{
369 u64 a0 = 0, a1 = 0;
370 int wait = 1000;
371 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
372}
373
374int vnic_dev_enable(struct vnic_dev *vdev)
375{
376 u64 a0 = 0, a1 = 0;
377 int wait = 1000;
378 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
379}
380
381int vnic_dev_disable(struct vnic_dev *vdev)
382{
383 u64 a0 = 0, a1 = 0;
384 int wait = 1000;
385 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
386}
387
388int vnic_dev_open(struct vnic_dev *vdev, int arg)
389{
390 u64 a0 = (u32)arg, a1 = 0;
391 int wait = 1000;
392 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
393}
394
395int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
396{
397 u64 a0 = 0, a1 = 0;
398 int wait = 1000;
399 int err;
400
401 *done = 0;
402
403 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
404 if (err)
405 return err;
406
407 *done = (a0 == 0);
408
409 return 0;
410}
411
412int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
413{
414 u64 a0 = (u32)arg, a1 = 0;
415 int wait = 1000;
416 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
417}
418
419int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
420{
421 u64 a0 = 0, a1 = 0;
422 int wait = 1000;
423 int err;
424
425 *done = 0;
426
427 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
428 if (err)
429 return err;
430
431 *done = (a0 == 0);
432
433 return 0;
434}
435
436int vnic_dev_hang_notify(struct vnic_dev *vdev)
437{
438 u64 a0, a1;
439 int wait = 1000;
440 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
441}
442
443int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
444{
445 u64 a0, a1;
446 int wait = 1000;
447 int err, i;
448
449 for (i = 0; i < ETH_ALEN; i++)
450 mac_addr[i] = 0;
451
452 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
453 if (err)
454 return err;
455
456 for (i = 0; i < ETH_ALEN; i++)
457 mac_addr[i] = ((u8 *)&a0)[i];
458
459 return 0;
460}
461
462void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
463 int broadcast, int promisc, int allmulti)
464{
465 u64 a0, a1 = 0;
466 int wait = 1000;
467 int err;
468
469 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
470 (multicast ? CMD_PFILTER_MULTICAST : 0) |
471 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
472 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
473 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
474
475 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
476 if (err)
477 printk(KERN_ERR "Can't set packet filter\n");
478}
479
480void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
481{
482 u64 a0 = 0, a1 = 0;
483 int wait = 1000;
484 int err;
485 int i;
486
487 for (i = 0; i < ETH_ALEN; i++)
488 ((u8 *)&a0)[i] = addr[i];
489
490 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
491 if (err)
492 printk(KERN_ERR
493 "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
494 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
495 err);
496}
497
498void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
499{
500 u64 a0 = 0, a1 = 0;
501 int wait = 1000;
502 int err;
503 int i;
504
505 for (i = 0; i < ETH_ALEN; i++)
506 ((u8 *)&a0)[i] = addr[i];
507
508 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
509 if (err)
510 printk(KERN_ERR
511 "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
512 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
513 err);
514}
515
516int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
517{
518 u64 a0, a1;
519 int wait = 1000;
520
521 if (!vdev->notify) {
522 vdev->notify = pci_alloc_consistent(vdev->pdev,
523 sizeof(struct vnic_devcmd_notify),
524 &vdev->notify_pa);
525 if (!vdev->notify)
526 return -ENOMEM;
527 }
528
529 a0 = vdev->notify_pa;
530 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
531 a1 += sizeof(struct vnic_devcmd_notify);
532
533 return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
534}
535
536void vnic_dev_notify_unset(struct vnic_dev *vdev)
537{
538 u64 a0, a1;
539 int wait = 1000;
540
541 a0 = 0; /* paddr = 0 to unset notify buffer */
542 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
543 a1 += sizeof(struct vnic_devcmd_notify);
544
545 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
546}
547
548static int vnic_dev_notify_ready(struct vnic_dev *vdev)
549{
550 u32 *words;
551 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
552 unsigned int i;
553 u32 csum;
554
555 if (!vdev->notify)
556 return 0;
557
558 do {
559 csum = 0;
560 memcpy(&vdev->notify_copy, vdev->notify,
561 sizeof(struct vnic_devcmd_notify));
562 words = (u32 *)&vdev->notify_copy;
563 for (i = 1; i < nwords; i++)
564 csum += words[i];
565 } while (csum != words[0]);
566
567 return 1;
568}
569
570int vnic_dev_init(struct vnic_dev *vdev, int arg)
571{
572 u64 a0 = (u32)arg, a1 = 0;
573 int wait = 1000;
574 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
575}
576
577int vnic_dev_link_status(struct vnic_dev *vdev)
578{
579 if (vdev->linkstatus)
580 return *vdev->linkstatus;
581
582 if (!vnic_dev_notify_ready(vdev))
583 return 0;
584
585 return vdev->notify_copy.link_state;
586}
587
588u32 vnic_dev_port_speed(struct vnic_dev *vdev)
589{
590 if (!vnic_dev_notify_ready(vdev))
591 return 0;
592
593 return vdev->notify_copy.port_speed;
594}
595
596u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
597{
598 if (!vnic_dev_notify_ready(vdev))
599 return 0;
600
601 return vdev->notify_copy.msglvl;
602}
603
604u32 vnic_dev_mtu(struct vnic_dev *vdev)
605{
606 if (!vnic_dev_notify_ready(vdev))
607 return 0;
608
609 return vdev->notify_copy.mtu;
610}
611
612void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
613 enum vnic_dev_intr_mode intr_mode)
614{
615 vdev->intr_mode = intr_mode;
616}
617
618enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
619 struct vnic_dev *vdev)
620{
621 return vdev->intr_mode;
622}
623
624void vnic_dev_unregister(struct vnic_dev *vdev)
625{
626 if (vdev) {
627 if (vdev->notify)
628 pci_free_consistent(vdev->pdev,
629 sizeof(struct vnic_devcmd_notify),
630 vdev->notify,
631 vdev->notify_pa);
632 if (vdev->linkstatus)
633 pci_free_consistent(vdev->pdev,
634 sizeof(u32),
635 vdev->linkstatus,
636 vdev->linkstatus_pa);
637 if (vdev->stats)
638 pci_free_consistent(vdev->pdev,
639 sizeof(struct vnic_dev),
640 vdev->stats, vdev->stats_pa);
641 if (vdev->fw_info)
642 pci_free_consistent(vdev->pdev,
643 sizeof(struct vnic_devcmd_fw_info),
644 vdev->fw_info, vdev->fw_info_pa);
645 kfree(vdev);
646 }
647}
648
649struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
650 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
651{
652 if (!vdev) {
653 vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
654 if (!vdev)
655 return NULL;
656 }
657
658 vdev->priv = priv;
659 vdev->pdev = pdev;
660
661 if (vnic_dev_discover_res(vdev, bar))
662 goto err_out;
663
664 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
665 if (!vdev->devcmd)
666 goto err_out;
667
668 return vdev;
669
670err_out:
671 vnic_dev_unregister(vdev);
672 return NULL;
673}
674
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
new file mode 100644
index 000000000000..2dcffd3a24bd
--- /dev/null
+++ b/drivers/net/enic/vnic_dev.h
@@ -0,0 +1,106 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_DEV_H_
21#define _VNIC_DEV_H_
22
23#include "vnic_resource.h"
24#include "vnic_devcmd.h"
25
26#ifndef VNIC_PADDR_TARGET
27#define VNIC_PADDR_TARGET 0x0000000000000000ULL
28#endif
29
30enum vnic_dev_intr_mode {
31 VNIC_DEV_INTR_MODE_UNKNOWN,
32 VNIC_DEV_INTR_MODE_INTX,
33 VNIC_DEV_INTR_MODE_MSI,
34 VNIC_DEV_INTR_MODE_MSIX,
35};
36
37struct vnic_dev_bar {
38 void __iomem *vaddr;
39 dma_addr_t bus_addr;
40 unsigned long len;
41};
42
43struct vnic_dev_ring {
44 void *descs;
45 size_t size;
46 dma_addr_t base_addr;
47 size_t base_align;
48 void *descs_unaligned;
49 size_t size_unaligned;
50 dma_addr_t base_addr_unaligned;
51 unsigned int desc_size;
52 unsigned int desc_count;
53 unsigned int desc_avail;
54};
55
56struct vnic_dev;
57struct vnic_stats;
58
59void *vnic_dev_priv(struct vnic_dev *vdev);
60unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
61 enum vnic_res_type type);
62void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
63 unsigned int index);
64unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
65 unsigned int desc_count, unsigned int desc_size);
66void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
67int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
68 unsigned int desc_count, unsigned int desc_size);
69void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
70 struct vnic_dev_ring *ring);
71int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
72 u64 *a0, u64 *a1, int wait);
73int vnic_dev_fw_info(struct vnic_dev *vdev,
74 struct vnic_devcmd_fw_info **fw_info);
75int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
76 void *value);
77int vnic_dev_stats_clear(struct vnic_dev *vdev);
78int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
79int vnic_dev_hang_notify(struct vnic_dev *vdev);
80void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
81 int broadcast, int promisc, int allmulti);
82void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
83void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
84int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
85int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
86void vnic_dev_notify_unset(struct vnic_dev *vdev);
87int vnic_dev_link_status(struct vnic_dev *vdev);
88u32 vnic_dev_port_speed(struct vnic_dev *vdev);
89u32 vnic_dev_msg_lvl(struct vnic_dev *vdev);
90u32 vnic_dev_mtu(struct vnic_dev *vdev);
91int vnic_dev_close(struct vnic_dev *vdev);
92int vnic_dev_enable(struct vnic_dev *vdev);
93int vnic_dev_disable(struct vnic_dev *vdev);
94int vnic_dev_open(struct vnic_dev *vdev, int arg);
95int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
96int vnic_dev_init(struct vnic_dev *vdev, int arg);
97int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
98int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
99void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
100 enum vnic_dev_intr_mode intr_mode);
101enum vnic_dev_intr_mode vnic_dev_get_intr_mode(struct vnic_dev *vdev);
102void vnic_dev_unregister(struct vnic_dev *vdev);
103struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
104 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar);
105
106#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
new file mode 100644
index 000000000000..d8617a3373b1
--- /dev/null
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -0,0 +1,282 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_DEVCMD_H_
21#define _VNIC_DEVCMD_H_
22
23#define _CMD_NBITS 14
24#define _CMD_VTYPEBITS 10
25#define _CMD_FLAGSBITS 6
26#define _CMD_DIRBITS 2
27
28#define _CMD_NMASK ((1 << _CMD_NBITS)-1)
29#define _CMD_VTYPEMASK ((1 << _CMD_VTYPEBITS)-1)
30#define _CMD_FLAGSMASK ((1 << _CMD_FLAGSBITS)-1)
31#define _CMD_DIRMASK ((1 << _CMD_DIRBITS)-1)
32
33#define _CMD_NSHIFT 0
34#define _CMD_VTYPESHIFT (_CMD_NSHIFT+_CMD_NBITS)
35#define _CMD_FLAGSSHIFT (_CMD_VTYPESHIFT+_CMD_VTYPEBITS)
36#define _CMD_DIRSHIFT (_CMD_FLAGSSHIFT+_CMD_FLAGSBITS)
37
38/*
39 * Direction bits (from host perspective).
40 */
41#define _CMD_DIR_NONE 0U
42#define _CMD_DIR_WRITE 1U
43#define _CMD_DIR_READ 2U
44#define _CMD_DIR_RW (_CMD_DIR_WRITE | _CMD_DIR_READ)
45
46/*
47 * Flag bits.
48 */
49#define _CMD_FLAGS_NONE 0U
50#define _CMD_FLAGS_NOWAIT 1U
51
52/*
53 * vNIC type bits.
54 */
55#define _CMD_VTYPE_NONE 0U
56#define _CMD_VTYPE_ENET 1U
57#define _CMD_VTYPE_FC 2U
58#define _CMD_VTYPE_SCSI 4U
59#define _CMD_VTYPE_ALL (_CMD_VTYPE_ENET | _CMD_VTYPE_FC | _CMD_VTYPE_SCSI)
60
61/*
62 * Used to create cmds..
63*/
64#define _CMDCF(dir, flags, vtype, nr) \
65 (((dir) << _CMD_DIRSHIFT) | \
66 ((flags) << _CMD_FLAGSSHIFT) | \
67 ((vtype) << _CMD_VTYPESHIFT) | \
68 ((nr) << _CMD_NSHIFT))
69#define _CMDC(dir, vtype, nr) _CMDCF(dir, 0, vtype, nr)
70#define _CMDCNW(dir, vtype, nr) _CMDCF(dir, _CMD_FLAGS_NOWAIT, vtype, nr)
71
72/*
73 * Used to decode cmds..
74*/
75#define _CMD_DIR(cmd) (((cmd) >> _CMD_DIRSHIFT) & _CMD_DIRMASK)
76#define _CMD_FLAGS(cmd) (((cmd) >> _CMD_FLAGSSHIFT) & _CMD_FLAGSMASK)
77#define _CMD_VTYPE(cmd) (((cmd) >> _CMD_VTYPESHIFT) & _CMD_VTYPEMASK)
78#define _CMD_N(cmd) (((cmd) >> _CMD_NSHIFT) & _CMD_NMASK)
79
80enum vnic_devcmd_cmd {
81 CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0),
82
83 /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */
84 CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1),
85
86 /* dev-specific block member:
87 * in: (u16)a0=offset,(u8)a1=size
88 * out: a0=value */
89 CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
90
91 /* stats clear */
92 CMD_STATS_CLEAR = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 3),
93
94 /* stats dump in mem: (u64)a0=paddr to stats area,
95 * (u16)a1=sizeof stats area */
96 CMD_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 4),
97
98 /* set Rx packet filter: (u32)a0=filters (see CMD_PFILTER_*) */
99 CMD_PACKET_FILTER = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 7),
100
101 /* hang detection notification */
102 CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
103
104 /* MAC address in (u48)a0 */
105 CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
106 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
107
108 /* disable/enable promisc mode: (u8)a0=0/1 */
109/***** XXX DEPRECATED *****/
110 CMD_PROMISC_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 10),
111
112 /* disable/enable all-multi mode: (u8)a0=0/1 */
113/***** XXX DEPRECATED *****/
114 CMD_ALLMULTI_MODE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 11),
115
116 /* add addr from (u48)a0 */
117 CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
118 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 12),
119
120 /* del addr from (u48)a0 */
121 CMD_ADDR_DEL = _CMDCNW(_CMD_DIR_WRITE,
122 _CMD_VTYPE_ENET | _CMD_VTYPE_FC, 13),
123
124 /* add VLAN id in (u16)a0 */
125 CMD_VLAN_ADD = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 14),
126
127 /* del VLAN id in (u16)a0 */
128 CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
129
130 /* nic_cfg in (u32)a0 */
131 CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
132
133 /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
134 CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
135
136 /* union vnic_rss_cpu in mem: (u64)a0=paddr, (u16)a1=len */
137 CMD_RSS_CPU = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 18),
138
139 /* initiate softreset */
140 CMD_SOFT_RESET = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 19),
141
142 /* softreset status:
143 * out: a0=0 reset complete, a0=1 reset in progress */
144 CMD_SOFT_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 20),
145
146 /* set struct vnic_devcmd_notify buffer in mem:
147 * in:
148 * (u64)a0=paddr to notify (set paddr=0 to unset)
149 * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify)
150 * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr)
151 * out:
152 * (u32)a1 = effective size
153 */
154 CMD_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 21),
155
156 /* UNDI API: (u64)a0=paddr to s_PXENV_UNDI_ struct,
157 * (u8)a1=PXENV_UNDI_xxx */
158 CMD_UNDI = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 22),
159
160 /* initiate open sequence (u32)a0=flags (see CMD_OPENF_*) */
161 CMD_OPEN = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 23),
162
163 /* open status:
164 * out: a0=0 open complete, a0=1 open in progress */
165 CMD_OPEN_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 24),
166
167 /* close vnic */
168 CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
169
170 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
171 CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
172
173 /* variant of CMD_INIT, with provisioning info
174 * (u64)a0=paddr of vnic_devcmd_provinfo
175 * (u32)a1=sizeof provision info */
176 CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27),
177
178 /* enable virtual link */
179 CMD_ENABLE = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 28),
180
181 /* disable virtual link */
182 CMD_DISABLE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 29),
183
184 /* stats dump all vnics on uplink in mem: (u64)a0=paddr (u32)a1=uif */
185 CMD_STATS_DUMP_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 30),
186
187 /* init status:
188 * out: a0=0 init complete, a0=1 init in progress
189 * if a0=0, a1=errno */
190 CMD_INIT_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 31),
191
192 /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
193 * (u8)a1=INT13_CMD_xxx */
194 CMD_INT13 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_FC, 32),
195
196 /* logical uplink enable/disable: (u64)a0: 0/1=disable/enable */
197 CMD_LOGICAL_UPLINK = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 33),
198
199 /* undo initialize of virtual link */
200 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
201};
202
203/* flags for CMD_OPEN */
204#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
205
206/* flags for CMD_INIT */
207#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
208
209/* flags for CMD_PACKET_FILTER */
210#define CMD_PFILTER_DIRECTED 0x01
211#define CMD_PFILTER_MULTICAST 0x02
212#define CMD_PFILTER_BROADCAST 0x04
213#define CMD_PFILTER_PROMISCUOUS 0x08
214#define CMD_PFILTER_ALL_MULTICAST 0x10
215
216enum vnic_devcmd_status {
217 STAT_NONE = 0,
218 STAT_BUSY = 1 << 0, /* cmd in progress */
219 STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
220};
221
222enum vnic_devcmd_error {
223 ERR_SUCCESS = 0,
224 ERR_EINVAL = 1,
225 ERR_EFAULT = 2,
226 ERR_EPERM = 3,
227 ERR_EBUSY = 4,
228 ERR_ECMDUNKNOWN = 5,
229 ERR_EBADSTATE = 6,
230 ERR_ENOMEM = 7,
231 ERR_ETIMEDOUT = 8,
232 ERR_ELINKDOWN = 9,
233};
234
235struct vnic_devcmd_fw_info {
236 char fw_version[32];
237 char fw_build[32];
238 char hw_version[32];
239 char hw_serial_number[32];
240};
241
242struct vnic_devcmd_notify {
243 u32 csum; /* checksum over following words */
244
245 u32 link_state; /* link up == 1 */
246 u32 port_speed; /* effective port speed (rate limit) */
247 u32 mtu; /* MTU */
248 u32 msglvl; /* requested driver msg lvl */
249 u32 uif; /* uplink interface */
250 u32 status; /* status bits (see VNIC_STF_*) */
251 u32 error; /* error code (see ERR_*) for first ERR */
252};
253#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
254
255struct vnic_devcmd_provinfo {
256 u8 oui[3];
257 u8 type;
258 u8 data[0];
259};
260
261/*
262 * Writing cmd register causes STAT_BUSY to get set in status register.
263 * When cmd completes, STAT_BUSY will be cleared.
264 *
265 * If cmd completed successfully STAT_ERROR will be clear
266 * and args registers contain cmd-specific results.
267 *
268 * If cmd error, STAT_ERROR will be set and args[0] contains error code.
269 *
270 * status register is read-only. While STAT_BUSY is set,
271 * all other register contents are read-only.
272 */
273
274/* Make sizeof(vnic_devcmd) a power-of-2 for I/O BAR. */
275#define VNIC_DEVCMD_NARGS 15
276struct vnic_devcmd {
277 u32 status; /* RO */
278 u32 cmd; /* RW */
279 u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */
280};
281
282#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
new file mode 100644
index 000000000000..6332ac9391b8
--- /dev/null
+++ b/drivers/net/enic/vnic_enet.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_ENIC_H_
21#define _VNIC_ENIC_H_
22
23/* Device-specific region: enet configuration */
24struct vnic_enet_config {
25 u32 flags;
26 u32 wq_desc_count;
27 u32 rq_desc_count;
28 u16 mtu;
29 u16 intr_timer;
30 u8 intr_timer_type;
31 u8 intr_mode;
32 char devname[16];
33};
34
35#define VENETF_TSO 0x1 /* TSO enabled */
36#define VENETF_LRO 0x2 /* LRO enabled */
37#define VENETF_RXCSUM 0x4 /* RX csum enabled */
38#define VENETF_TXCSUM 0x8 /* TX csum enabled */
39#define VENETF_RSS 0x10 /* RSS enabled */
40#define VENETF_RSSHASH_IPV4 0x20 /* Hash on IPv4 fields */
41#define VENETF_RSSHASH_TCPIPV4 0x40 /* Hash on TCP + IPv4 fields */
42#define VENETF_RSSHASH_IPV6 0x80 /* Hash on IPv6 fields */
43#define VENETF_RSSHASH_TCPIPV6 0x100 /* Hash on TCP + IPv6 fields */
44#define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */
45#define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */
46
47#endif /* _VNIC_ENIC_H_ */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
new file mode 100644
index 000000000000..ddc38f8f4656
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25
26#include "vnic_dev.h"
27#include "vnic_intr.h"
28
29void vnic_intr_free(struct vnic_intr *intr)
30{
31 intr->ctrl = NULL;
32}
33
34int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
35 unsigned int index)
36{
37 intr->index = index;
38 intr->vdev = vdev;
39
40 intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
41 if (!intr->ctrl) {
42 printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
43 index);
44 return -EINVAL;
45 }
46
47 return 0;
48}
49
50void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
51 unsigned int coalescing_type, unsigned int mask_on_assertion)
52{
53 iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
54 iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
55 iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
56 iowrite32(0, &intr->ctrl->int_credits);
57}
58
59void vnic_intr_clean(struct vnic_intr *intr)
60{
61 iowrite32(0, &intr->ctrl->int_credits);
62}
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
new file mode 100644
index 000000000000..ccc408116af8
--- /dev/null
+++ b/drivers/net/enic/vnic_intr.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_INTR_H_
21#define _VNIC_INTR_H_
22
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26
27#define VNIC_INTR_TIMER_MAX 0xffff
28
29#define VNIC_INTR_TIMER_TYPE_ABS 0
30#define VNIC_INTR_TIMER_TYPE_QUIET 1
31
32/* Interrupt control */
33struct vnic_intr_ctrl {
34 u32 coalescing_timer; /* 0x00 */
35 u32 pad0;
36 u32 coalescing_value; /* 0x08 */
37 u32 pad1;
38 u32 coalescing_type; /* 0x10 */
39 u32 pad2;
40 u32 mask_on_assertion; /* 0x18 */
41 u32 pad3;
42 u32 mask; /* 0x20 */
43 u32 pad4;
44 u32 int_credits; /* 0x28 */
45 u32 pad5;
46 u32 int_credit_return; /* 0x30 */
47 u32 pad6;
48};
49
50struct vnic_intr {
51 unsigned int index;
52 struct vnic_dev *vdev;
53 struct vnic_intr_ctrl __iomem *ctrl; /* memory-mapped */
54};
55
56static inline void vnic_intr_unmask(struct vnic_intr *intr)
57{
58 iowrite32(0, &intr->ctrl->mask);
59}
60
61static inline void vnic_intr_mask(struct vnic_intr *intr)
62{
63 iowrite32(1, &intr->ctrl->mask);
64}
65
66static inline void vnic_intr_return_credits(struct vnic_intr *intr,
67 unsigned int credits, int unmask, int reset_timer)
68{
69#define VNIC_INTR_UNMASK_SHIFT 16
70#define VNIC_INTR_RESET_TIMER_SHIFT 17
71
72 u32 int_credit_return = (credits & 0xffff) |
73 (unmask ? (1 << VNIC_INTR_UNMASK_SHIFT) : 0) |
74 (reset_timer ? (1 << VNIC_INTR_RESET_TIMER_SHIFT) : 0);
75
76 iowrite32(int_credit_return, &intr->ctrl->int_credit_return);
77}
78
79static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
80{
81 /* get and ack interrupt in one read (clear-and-ack-on-read) */
82 return ioread32(legacy_pba);
83}
84
85void vnic_intr_free(struct vnic_intr *intr);
86int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
87 unsigned int index);
88void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
89 unsigned int coalescing_type, unsigned int mask_on_assertion);
90void vnic_intr_clean(struct vnic_intr *intr);
91
92#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
new file mode 100644
index 000000000000..dadf26fae69a
--- /dev/null
+++ b/drivers/net/enic/vnic_nic.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_NIC_H_
21#define _VNIC_NIC_H_
22
23#define NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD 0xffUL
24#define NIC_CFG_RSS_DEFAULT_CPU_SHIFT 0
25#define NIC_CFG_RSS_HASH_TYPE (0xffUL << 8)
26#define NIC_CFG_RSS_HASH_TYPE_MASK_FIELD 0xffUL
27#define NIC_CFG_RSS_HASH_TYPE_SHIFT 8
28#define NIC_CFG_RSS_HASH_BITS (7UL << 16)
29#define NIC_CFG_RSS_HASH_BITS_MASK_FIELD 7UL
30#define NIC_CFG_RSS_HASH_BITS_SHIFT 16
31#define NIC_CFG_RSS_BASE_CPU (7UL << 19)
32#define NIC_CFG_RSS_BASE_CPU_MASK_FIELD 7UL
33#define NIC_CFG_RSS_BASE_CPU_SHIFT 19
34#define NIC_CFG_RSS_ENABLE (1UL << 22)
35#define NIC_CFG_RSS_ENABLE_MASK_FIELD 1UL
36#define NIC_CFG_RSS_ENABLE_SHIFT 22
37#define NIC_CFG_TSO_IPID_SPLIT_EN (1UL << 23)
38#define NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD 1UL
39#define NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT 23
40#define NIC_CFG_IG_VLAN_STRIP_EN (1UL << 24)
41#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
42#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
43
44static inline void vnic_set_nic_cfg(u32 *nic_cfg,
45 u8 rss_default_cpu, u8 rss_hash_type,
46 u8 rss_hash_bits, u8 rss_base_cpu,
47 u8 rss_enable, u8 tso_ipid_split_en,
48 u8 ig_vlan_strip_en)
49{
50 *nic_cfg = (rss_default_cpu & NIC_CFG_RSS_DEFAULT_CPU_MASK_FIELD) |
51 ((rss_hash_type & NIC_CFG_RSS_HASH_TYPE_MASK_FIELD)
52 << NIC_CFG_RSS_HASH_TYPE_SHIFT) |
53 ((rss_hash_bits & NIC_CFG_RSS_HASH_BITS_MASK_FIELD)
54 << NIC_CFG_RSS_HASH_BITS_SHIFT) |
55 ((rss_base_cpu & NIC_CFG_RSS_BASE_CPU_MASK_FIELD)
56 << NIC_CFG_RSS_BASE_CPU_SHIFT) |
57 ((rss_enable & NIC_CFG_RSS_ENABLE_MASK_FIELD)
58 << NIC_CFG_RSS_ENABLE_SHIFT) |
59 ((tso_ipid_split_en & NIC_CFG_TSO_IPID_SPLIT_EN_MASK_FIELD)
60 << NIC_CFG_TSO_IPID_SPLIT_EN_SHIFT) |
61 ((ig_vlan_strip_en & NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD)
62 << NIC_CFG_IG_VLAN_STRIP_EN_SHIFT);
63}
64
65#endif /* _VNIC_NIC_H_ */
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
new file mode 100644
index 000000000000..144d2812f081
--- /dev/null
+++ b/drivers/net/enic/vnic_resource.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_RESOURCE_H_
21#define _VNIC_RESOURCE_H_
22
23#define VNIC_RES_MAGIC 0x766E6963L /* 'vnic' */
24#define VNIC_RES_VERSION 0x00000000L
25
26/* vNIC resource types */
27enum vnic_res_type {
28 RES_TYPE_EOL, /* End-of-list */
29 RES_TYPE_WQ, /* Work queues */
30 RES_TYPE_RQ, /* Receive queues */
31 RES_TYPE_CQ, /* Completion queues */
32 RES_TYPE_RSVD1,
33 RES_TYPE_NIC_CFG, /* Enet NIC config registers */
34 RES_TYPE_RSVD2,
35 RES_TYPE_RSVD3,
36 RES_TYPE_RSVD4,
37 RES_TYPE_RSVD5,
38 RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
39 RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
40 RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
41 RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status, r2c */
42 RES_TYPE_RSVD6,
43 RES_TYPE_RSVD7,
44 RES_TYPE_DEVCMD, /* Device command region */
45 RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */
46
47 RES_TYPE_MAX, /* Count of resource types */
48};
49
50struct vnic_resource_header {
51 u32 magic;
52 u32 version;
53};
54
55struct vnic_resource {
56 u8 type;
57 u8 bar;
58 u8 pad[2];
59 u32 bar_offset;
60 u32 count;
61};
62
63#endif /* _VNIC_RESOURCE_H_ */
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
new file mode 100644
index 000000000000..9365e63e821a
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.c
@@ -0,0 +1,199 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25
26#include "vnic_dev.h"
27#include "vnic_rq.h"
28
29static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
30{
31 struct vnic_rq_buf *buf;
32 struct vnic_dev *vdev;
33 unsigned int i, j, count = rq->ring.desc_count;
34 unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
35
36 vdev = rq->vdev;
37
38 for (i = 0; i < blks; i++) {
39 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
40 if (!rq->bufs[i]) {
41 printk(KERN_ERR "Failed to alloc rq_bufs\n");
42 return -ENOMEM;
43 }
44 }
45
46 for (i = 0; i < blks; i++) {
47 buf = rq->bufs[i];
48 for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
49 buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
50 buf->desc = (u8 *)rq->ring.descs +
51 rq->ring.desc_size * buf->index;
52 if (buf->index + 1 == count) {
53 buf->next = rq->bufs[0];
54 break;
55 } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
56 buf->next = rq->bufs[i + 1];
57 } else {
58 buf->next = buf + 1;
59 buf++;
60 }
61 }
62 }
63
64 rq->to_use = rq->to_clean = rq->bufs[0];
65 rq->buf_index = 0;
66
67 return 0;
68}
69
70void vnic_rq_free(struct vnic_rq *rq)
71{
72 struct vnic_dev *vdev;
73 unsigned int i;
74
75 vdev = rq->vdev;
76
77 vnic_dev_free_desc_ring(vdev, &rq->ring);
78
79 for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
80 kfree(rq->bufs[i]);
81 rq->bufs[i] = NULL;
82 }
83
84 rq->ctrl = NULL;
85}
86
87int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
88 unsigned int desc_count, unsigned int desc_size)
89{
90 int err;
91
92 rq->index = index;
93 rq->vdev = vdev;
94
95 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
96 if (!rq->ctrl) {
97 printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
98 return -EINVAL;
99 }
100
101 vnic_rq_disable(rq);
102
103 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
104 if (err)
105 return err;
106
107 err = vnic_rq_alloc_bufs(rq);
108 if (err) {
109 vnic_rq_free(rq);
110 return err;
111 }
112
113 return 0;
114}
115
116void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
117 unsigned int error_interrupt_enable,
118 unsigned int error_interrupt_offset)
119{
120 u64 paddr;
121 u32 fetch_index;
122
123 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
124 writeq(paddr, &rq->ctrl->ring_base);
125 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
126 iowrite32(cq_index, &rq->ctrl->cq_index);
127 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
128 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
129 iowrite32(0, &rq->ctrl->dropped_packet_count);
130 iowrite32(0, &rq->ctrl->error_status);
131
132 /* Use current fetch_index as the ring starting point */
133 fetch_index = ioread32(&rq->ctrl->fetch_index);
134 rq->to_use = rq->to_clean =
135 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
136 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
137 iowrite32(fetch_index, &rq->ctrl->posted_index);
138
139 rq->buf_index = 0;
140}
141
142unsigned int vnic_rq_error_status(struct vnic_rq *rq)
143{
144 return ioread32(&rq->ctrl->error_status);
145}
146
147void vnic_rq_enable(struct vnic_rq *rq)
148{
149 iowrite32(1, &rq->ctrl->enable);
150}
151
152int vnic_rq_disable(struct vnic_rq *rq)
153{
154 unsigned int wait;
155
156 iowrite32(0, &rq->ctrl->enable);
157
158 /* Wait for HW to ACK disable request */
159 for (wait = 0; wait < 100; wait++) {
160 if (!(ioread32(&rq->ctrl->running)))
161 return 0;
162 udelay(1);
163 }
164
165 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
166
167 return -ETIMEDOUT;
168}
169
170void vnic_rq_clean(struct vnic_rq *rq,
171 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
172{
173 struct vnic_rq_buf *buf;
174 u32 fetch_index;
175
176 BUG_ON(ioread32(&rq->ctrl->enable));
177
178 buf = rq->to_clean;
179
180 while (vnic_rq_desc_used(rq) > 0) {
181
182 (*buf_clean)(rq, buf);
183
184 buf = rq->to_clean = buf->next;
185 rq->ring.desc_avail++;
186 }
187
188 /* Use current fetch_index as the ring starting point */
189 fetch_index = ioread32(&rq->ctrl->fetch_index);
190 rq->to_use = rq->to_clean =
191 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
192 [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
193 iowrite32(fetch_index, &rq->ctrl->posted_index);
194
195 rq->buf_index = 0;
196
197 vnic_dev_clear_desc_ring(&rq->ring);
198}
199
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
new file mode 100644
index 000000000000..82bfca67cc4d
--- /dev/null
+++ b/drivers/net/enic/vnic_rq.h
@@ -0,0 +1,204 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_RQ_H_
21#define _VNIC_RQ_H_
22
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26#include "vnic_cq.h"
27
28/* Receive queue control */
29struct vnic_rq_ctrl {
30 u64 ring_base; /* 0x00 */
31 u32 ring_size; /* 0x08 */
32 u32 pad0;
33 u32 posted_index; /* 0x10 */
34 u32 pad1;
35 u32 cq_index; /* 0x18 */
36 u32 pad2;
37 u32 enable; /* 0x20 */
38 u32 pad3;
39 u32 running; /* 0x28 */
40 u32 pad4;
41 u32 fetch_index; /* 0x30 */
42 u32 pad5;
43 u32 error_interrupt_enable; /* 0x38 */
44 u32 pad6;
45 u32 error_interrupt_offset; /* 0x40 */
46 u32 pad7;
47 u32 error_status; /* 0x48 */
48 u32 pad8;
49 u32 dropped_packet_count; /* 0x50 */
50 u32 pad9;
51 u32 dropped_packet_count_rc; /* 0x58 */
52 u32 pad10;
53};
54
55/* Break the vnic_rq_buf allocations into blocks of 64 entries */
56#define VNIC_RQ_BUF_BLK_ENTRIES 64
57#define VNIC_RQ_BUF_BLK_SZ \
58 (VNIC_RQ_BUF_BLK_ENTRIES * sizeof(struct vnic_rq_buf))
59#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
60 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES)
61#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
62
63struct vnic_rq_buf {
64 struct vnic_rq_buf *next;
65 dma_addr_t dma_addr;
66 void *os_buf;
67 unsigned int os_buf_index;
68 unsigned int len;
69 unsigned int index;
70 void *desc;
71};
72
73struct vnic_rq {
74 unsigned int index;
75 struct vnic_dev *vdev;
76 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
77 struct vnic_dev_ring ring;
78 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
79 struct vnic_rq_buf *to_use;
80 struct vnic_rq_buf *to_clean;
81 void *os_buf_head;
82 unsigned int buf_index;
83 unsigned int pkts_outstanding;
84};
85
86static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
87{
88 /* how many does SW own? */
89 return rq->ring.desc_avail;
90}
91
92static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
93{
94 /* how many does HW own? */
95 return rq->ring.desc_count - rq->ring.desc_avail - 1;
96}
97
98static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
99{
100 return rq->to_use->desc;
101}
102
103static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
104{
105 return rq->to_use->index;
106}
107
108static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq)
109{
110 return rq->buf_index++;
111}
112
113static inline void vnic_rq_post(struct vnic_rq *rq,
114 void *os_buf, unsigned int os_buf_index,
115 dma_addr_t dma_addr, unsigned int len)
116{
117 struct vnic_rq_buf *buf = rq->to_use;
118
119 buf->os_buf = os_buf;
120 buf->os_buf_index = os_buf_index;
121 buf->dma_addr = dma_addr;
122 buf->len = len;
123
124 buf = buf->next;
125 rq->to_use = buf;
126 rq->ring.desc_avail--;
127
128 /* Move the posted_index every nth descriptor
129 */
130
131#ifndef VNIC_RQ_RETURN_RATE
132#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
133#endif
134
135 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0)
136 iowrite32(buf->index, &rq->ctrl->posted_index);
137}
138
139static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
140{
141 rq->ring.desc_avail += count;
142}
143
144enum desc_return_options {
145 VNIC_RQ_RETURN_DESC,
146 VNIC_RQ_DEFER_RETURN_DESC,
147};
148
149static inline void vnic_rq_service(struct vnic_rq *rq,
150 struct cq_desc *cq_desc, u16 completed_index,
151 int desc_return, void (*buf_service)(struct vnic_rq *rq,
152 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
153 int skipped, void *opaque), void *opaque)
154{
155 struct vnic_rq_buf *buf;
156 int skipped;
157
158 buf = rq->to_clean;
159 while (1) {
160
161 skipped = (buf->index != completed_index);
162
163 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
164
165 if (desc_return == VNIC_RQ_RETURN_DESC)
166 rq->ring.desc_avail++;
167
168 rq->to_clean = buf->next;
169
170 if (!skipped)
171 break;
172
173 buf = rq->to_clean;
174 }
175}
176
177static inline int vnic_rq_fill(struct vnic_rq *rq,
178 int (*buf_fill)(struct vnic_rq *rq))
179{
180 int err;
181
182 while (vnic_rq_desc_avail(rq) > 1) {
183
184 err = (*buf_fill)(rq);
185 if (err)
186 return err;
187 }
188
189 return 0;
190}
191
192void vnic_rq_free(struct vnic_rq *rq);
193int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
194 unsigned int desc_count, unsigned int desc_size);
195void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
196 unsigned int error_interrupt_enable,
197 unsigned int error_interrupt_offset);
198unsigned int vnic_rq_error_status(struct vnic_rq *rq);
199void vnic_rq_enable(struct vnic_rq *rq);
200int vnic_rq_disable(struct vnic_rq *rq);
201void vnic_rq_clean(struct vnic_rq *rq,
202 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
203
204#endif /* _VNIC_RQ_H_ */
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
new file mode 100644
index 000000000000..e325d65d7c34
--- /dev/null
+++ b/drivers/net/enic/vnic_rss.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6#ifndef _VNIC_RSS_H_
7#define _VNIC_RSS_H_
8
9/* RSS key array */
10union vnic_rss_key {
11 struct {
12 u8 b[10];
13 u8 b_pad[6];
14 } key[4];
15 u64 raw[8];
16};
17
18/* RSS cpu array */
19union vnic_rss_cpu {
20 struct {
21 u8 b[4] ;
22 u8 b_pad[4];
23 } cpu[32];
24 u64 raw[32];
25};
26
27void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
28void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
29void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
30void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
31
32#endif /* _VNIC_RSS_H_ */
diff --git a/drivers/net/enic/vnic_stats.h b/drivers/net/enic/vnic_stats.h
new file mode 100644
index 000000000000..9ff9614d89b1
--- /dev/null
+++ b/drivers/net/enic/vnic_stats.h
@@ -0,0 +1,70 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_STATS_H_
21#define _VNIC_STATS_H_
22
23/* Tx statistics */
24struct vnic_tx_stats {
25 u64 tx_frames_ok;
26 u64 tx_unicast_frames_ok;
27 u64 tx_multicast_frames_ok;
28 u64 tx_broadcast_frames_ok;
29 u64 tx_bytes_ok;
30 u64 tx_unicast_bytes_ok;
31 u64 tx_multicast_bytes_ok;
32 u64 tx_broadcast_bytes_ok;
33 u64 tx_drops;
34 u64 tx_errors;
35 u64 tx_tso;
36 u64 rsvd[16];
37};
38
39/* Rx statistics */
40struct vnic_rx_stats {
41 u64 rx_frames_ok;
42 u64 rx_frames_total;
43 u64 rx_unicast_frames_ok;
44 u64 rx_multicast_frames_ok;
45 u64 rx_broadcast_frames_ok;
46 u64 rx_bytes_ok;
47 u64 rx_unicast_bytes_ok;
48 u64 rx_multicast_bytes_ok;
49 u64 rx_broadcast_bytes_ok;
50 u64 rx_drop;
51 u64 rx_no_bufs;
52 u64 rx_errors;
53 u64 rx_rss;
54 u64 rx_crc_errors;
55 u64 rx_frames_64;
56 u64 rx_frames_127;
57 u64 rx_frames_255;
58 u64 rx_frames_511;
59 u64 rx_frames_1023;
60 u64 rx_frames_1518;
61 u64 rx_frames_to_max;
62 u64 rsvd[16];
63};
64
65struct vnic_stats {
66 struct vnic_tx_stats tx;
67 struct vnic_rx_stats rx;
68};
69
70#endif /* _VNIC_STATS_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
new file mode 100644
index 000000000000..a576d04708ef
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25
26#include "vnic_dev.h"
27#include "vnic_wq.h"
28
29static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
30{
31 struct vnic_wq_buf *buf;
32 struct vnic_dev *vdev;
33 unsigned int i, j, count = wq->ring.desc_count;
34 unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
35
36 vdev = wq->vdev;
37
38 for (i = 0; i < blks; i++) {
39 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
40 if (!wq->bufs[i]) {
41 printk(KERN_ERR "Failed to alloc wq_bufs\n");
42 return -ENOMEM;
43 }
44 }
45
46 for (i = 0; i < blks; i++) {
47 buf = wq->bufs[i];
48 for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
49 buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
50 buf->desc = (u8 *)wq->ring.descs +
51 wq->ring.desc_size * buf->index;
52 if (buf->index + 1 == count) {
53 buf->next = wq->bufs[0];
54 break;
55 } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
56 buf->next = wq->bufs[i + 1];
57 } else {
58 buf->next = buf + 1;
59 buf++;
60 }
61 }
62 }
63
64 wq->to_use = wq->to_clean = wq->bufs[0];
65
66 return 0;
67}
68
69void vnic_wq_free(struct vnic_wq *wq)
70{
71 struct vnic_dev *vdev;
72 unsigned int i;
73
74 vdev = wq->vdev;
75
76 vnic_dev_free_desc_ring(vdev, &wq->ring);
77
78 for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
79 kfree(wq->bufs[i]);
80 wq->bufs[i] = NULL;
81 }
82
83 wq->ctrl = NULL;
84}
85
86int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
87 unsigned int desc_count, unsigned int desc_size)
88{
89 int err;
90
91 wq->index = index;
92 wq->vdev = vdev;
93
94 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
95 if (!wq->ctrl) {
96 printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
97 return -EINVAL;
98 }
99
100 vnic_wq_disable(wq);
101
102 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
103 if (err)
104 return err;
105
106 err = vnic_wq_alloc_bufs(wq);
107 if (err) {
108 vnic_wq_free(wq);
109 return err;
110 }
111
112 return 0;
113}
114
115void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
116 unsigned int error_interrupt_enable,
117 unsigned int error_interrupt_offset)
118{
119 u64 paddr;
120
121 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
122 writeq(paddr, &wq->ctrl->ring_base);
123 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
124 iowrite32(0, &wq->ctrl->fetch_index);
125 iowrite32(0, &wq->ctrl->posted_index);
126 iowrite32(cq_index, &wq->ctrl->cq_index);
127 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
128 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
129 iowrite32(0, &wq->ctrl->error_status);
130}
131
132unsigned int vnic_wq_error_status(struct vnic_wq *wq)
133{
134 return ioread32(&wq->ctrl->error_status);
135}
136
137void vnic_wq_enable(struct vnic_wq *wq)
138{
139 iowrite32(1, &wq->ctrl->enable);
140}
141
142int vnic_wq_disable(struct vnic_wq *wq)
143{
144 unsigned int wait;
145
146 iowrite32(0, &wq->ctrl->enable);
147
148 /* Wait for HW to ACK disable request */
149 for (wait = 0; wait < 100; wait++) {
150 if (!(ioread32(&wq->ctrl->running)))
151 return 0;
152 udelay(1);
153 }
154
155 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
156
157 return -ETIMEDOUT;
158}
159
160void vnic_wq_clean(struct vnic_wq *wq,
161 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
162{
163 struct vnic_wq_buf *buf;
164
165 BUG_ON(ioread32(&wq->ctrl->enable));
166
167 buf = wq->to_clean;
168
169 while (vnic_wq_desc_used(wq) > 0) {
170
171 (*buf_clean)(wq, buf);
172
173 buf = wq->to_clean = buf->next;
174 wq->ring.desc_avail++;
175 }
176
177 wq->to_use = wq->to_clean = wq->bufs[0];
178
179 iowrite32(0, &wq->ctrl->fetch_index);
180 iowrite32(0, &wq->ctrl->posted_index);
181 iowrite32(0, &wq->ctrl->error_status);
182
183 vnic_dev_clear_desc_ring(&wq->ring);
184}
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
new file mode 100644
index 000000000000..7081828d8a42
--- /dev/null
+++ b/drivers/net/enic/vnic_wq.h
@@ -0,0 +1,154 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _VNIC_WQ_H_
21#define _VNIC_WQ_H_
22
23#include <linux/pci.h>
24
25#include "vnic_dev.h"
26#include "vnic_cq.h"
27
28/* Work queue control */
29struct vnic_wq_ctrl {
30 u64 ring_base; /* 0x00 */
31 u32 ring_size; /* 0x08 */
32 u32 pad0;
33 u32 posted_index; /* 0x10 */
34 u32 pad1;
35 u32 cq_index; /* 0x18 */
36 u32 pad2;
37 u32 enable; /* 0x20 */
38 u32 pad3;
39 u32 running; /* 0x28 */
40 u32 pad4;
41 u32 fetch_index; /* 0x30 */
42 u32 pad5;
43 u32 dca_value; /* 0x38 */
44 u32 pad6;
45 u32 error_interrupt_enable; /* 0x40 */
46 u32 pad7;
47 u32 error_interrupt_offset; /* 0x48 */
48 u32 pad8;
49 u32 error_status; /* 0x50 */
50 u32 pad9;
51};
52
53struct vnic_wq_buf {
54 struct vnic_wq_buf *next;
55 dma_addr_t dma_addr;
56 void *os_buf;
57 unsigned int len;
58 unsigned int index;
59 int sop;
60 void *desc;
61};
62
63/* Break the vnic_wq_buf allocations into blocks of 64 entries */
64#define VNIC_WQ_BUF_BLK_ENTRIES 64
65#define VNIC_WQ_BUF_BLK_SZ \
66 (VNIC_WQ_BUF_BLK_ENTRIES * sizeof(struct vnic_wq_buf))
67#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \
68 DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES)
69#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096)
70
71struct vnic_wq {
72 unsigned int index;
73 struct vnic_dev *vdev;
74 struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
75 struct vnic_dev_ring ring;
76 struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX];
77 struct vnic_wq_buf *to_use;
78 struct vnic_wq_buf *to_clean;
79 unsigned int pkts_outstanding;
80};
81
82static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
83{
84 /* how many does SW own? */
85 return wq->ring.desc_avail;
86}
87
88static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq)
89{
90 /* how many does HW own? */
91 return wq->ring.desc_count - wq->ring.desc_avail - 1;
92}
93
94static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
95{
96 return wq->to_use->desc;
97}
98
99static inline void vnic_wq_post(struct vnic_wq *wq,
100 void *os_buf, dma_addr_t dma_addr,
101 unsigned int len, int sop, int eop)
102{
103 struct vnic_wq_buf *buf = wq->to_use;
104
105 buf->sop = sop;
106 buf->os_buf = eop ? os_buf : NULL;
107 buf->dma_addr = dma_addr;
108 buf->len = len;
109
110 buf = buf->next;
111 if (eop)
112 iowrite32(buf->index, &wq->ctrl->posted_index);
113 wq->to_use = buf;
114
115 wq->ring.desc_avail--;
116}
117
118static inline void vnic_wq_service(struct vnic_wq *wq,
119 struct cq_desc *cq_desc, u16 completed_index,
120 void (*buf_service)(struct vnic_wq *wq,
121 struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque),
122 void *opaque)
123{
124 struct vnic_wq_buf *buf;
125
126 buf = wq->to_clean;
127 while (1) {
128
129 (*buf_service)(wq, cq_desc, buf, opaque);
130
131 wq->ring.desc_avail++;
132
133 wq->to_clean = buf->next;
134
135 if (buf->index == completed_index)
136 break;
137
138 buf = wq->to_clean;
139 }
140}
141
142void vnic_wq_free(struct vnic_wq *wq);
143int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
144 unsigned int desc_count, unsigned int desc_size);
145void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
146 unsigned int error_interrupt_enable,
147 unsigned int error_interrupt_offset);
148unsigned int vnic_wq_error_status(struct vnic_wq *wq);
149void vnic_wq_enable(struct vnic_wq *wq);
150int vnic_wq_disable(struct vnic_wq *wq);
151void vnic_wq_clean(struct vnic_wq *wq,
152 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
153
154#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/enic/wq_enet_desc.h b/drivers/net/enic/wq_enet_desc.h
new file mode 100644
index 000000000000..483596c2d8bf
--- /dev/null
+++ b/drivers/net/enic/wq_enet_desc.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 *
18 */
19
20#ifndef _WQ_ENET_DESC_H_
21#define _WQ_ENET_DESC_H_
22
23/* Ethernet work queue descriptor: 16B */
24struct wq_enet_desc {
25 __le64 address;
26 __le16 length;
27 __le16 mss_loopback;
28 __le16 header_length_flags;
29 __le16 vlan_tag;
30};
31
32#define WQ_ENET_ADDR_BITS 64
33#define WQ_ENET_LEN_BITS 14
34#define WQ_ENET_LEN_MASK ((1 << WQ_ENET_LEN_BITS) - 1)
35#define WQ_ENET_MSS_BITS 14
36#define WQ_ENET_MSS_MASK ((1 << WQ_ENET_MSS_BITS) - 1)
37#define WQ_ENET_MSS_SHIFT 2
38#define WQ_ENET_LOOPBACK_SHIFT 1
39#define WQ_ENET_HDRLEN_BITS 10
40#define WQ_ENET_HDRLEN_MASK ((1 << WQ_ENET_HDRLEN_BITS) - 1)
41#define WQ_ENET_FLAGS_OM_BITS 2
42#define WQ_ENET_FLAGS_OM_MASK ((1 << WQ_ENET_FLAGS_OM_BITS) - 1)
43#define WQ_ENET_FLAGS_EOP_SHIFT 12
44#define WQ_ENET_FLAGS_CQ_ENTRY_SHIFT 13
45#define WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT 14
46#define WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT 15
47
48#define WQ_ENET_OFFLOAD_MODE_CSUM 0
49#define WQ_ENET_OFFLOAD_MODE_RESERVED 1
50#define WQ_ENET_OFFLOAD_MODE_CSUM_L4 2
51#define WQ_ENET_OFFLOAD_MODE_TSO 3
52
53static inline void wq_enet_desc_enc(struct wq_enet_desc *desc,
54 u64 address, u16 length, u16 mss, u16 header_length,
55 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap,
56 u8 vlan_tag_insert, u16 vlan_tag, u8 loopback)
57{
58 desc->address = cpu_to_le64(address);
59 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK);
60 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) <<
61 WQ_ENET_MSS_SHIFT | (loopback & 1) << WQ_ENET_LOOPBACK_SHIFT);
62 desc->header_length_flags = cpu_to_le16(
63 (header_length & WQ_ENET_HDRLEN_MASK) |
64 (offload_mode & WQ_ENET_FLAGS_OM_MASK) << WQ_ENET_HDRLEN_BITS |
65 (eop & 1) << WQ_ENET_FLAGS_EOP_SHIFT |
66 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT |
67 (fcoe_encap & 1) << WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT |
68 (vlan_tag_insert & 1) << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT);
69 desc->vlan_tag = cpu_to_le16(vlan_tag);
70}
71
72static inline void wq_enet_desc_dec(struct wq_enet_desc *desc,
73 u64 *address, u16 *length, u16 *mss, u16 *header_length,
74 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap,
75 u8 *vlan_tag_insert, u16 *vlan_tag, u8 *loopback)
76{
77 *address = le64_to_cpu(desc->address);
78 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK;
79 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) &
80 WQ_ENET_MSS_MASK;
81 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >>
82 WQ_ENET_LOOPBACK_SHIFT) & 1);
83 *header_length = le16_to_cpu(desc->header_length_flags) &
84 WQ_ENET_HDRLEN_MASK;
85 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >>
86 WQ_ENET_HDRLEN_BITS) & WQ_ENET_FLAGS_OM_MASK);
87 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >>
88 WQ_ENET_FLAGS_EOP_SHIFT) & 1);
89 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >>
90 WQ_ENET_FLAGS_CQ_ENTRY_SHIFT) & 1);
91 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >>
92 WQ_ENET_FLAGS_FCOE_ENCAP_SHIFT) & 1);
93 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >>
94 WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT) & 1);
95 *vlan_tag = le16_to_cpu(desc->vlan_tag);
96}
97
98#endif /* _WQ_ENET_DESC_H_ */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index eeb55ed2152d..cc7328b15521 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -337,7 +337,7 @@ enum {
337 NvRegMSIXIrqStatus = 0x3f0, 337 NvRegMSIXIrqStatus = 0x3f0,
338 338
339 NvRegPowerState2 = 0x600, 339 NvRegPowerState2 = 0x600,
340#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11 340#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
341#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001 341#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
342#define NVREG_POWERSTATE2_PHY_RESET 0x0004 342#define NVREG_POWERSTATE2_PHY_RESET 0x0004
343}; 343};
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 9d461825bf4c..cb51c1fb0338 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -664,23 +664,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
664 return NETDEV_TX_OK; 664 return NETDEV_TX_OK;
665} 665}
666 666
667static int fs_request_irq(struct net_device *dev, int irq, const char *name,
668 irq_handler_t irqf)
669{
670 struct fs_enet_private *fep = netdev_priv(dev);
671
672 (*fep->ops->pre_request_irq)(dev, irq);
673 return request_irq(irq, irqf, IRQF_SHARED, name, dev);
674}
675
676static void fs_free_irq(struct net_device *dev, int irq)
677{
678 struct fs_enet_private *fep = netdev_priv(dev);
679
680 free_irq(irq, dev);
681 (*fep->ops->post_free_irq)(dev, irq);
682}
683
684static void fs_timeout(struct net_device *dev) 667static void fs_timeout(struct net_device *dev)
685{ 668{
686 struct fs_enet_private *fep = netdev_priv(dev); 669 struct fs_enet_private *fep = netdev_priv(dev);
@@ -800,7 +783,8 @@ static int fs_enet_open(struct net_device *dev)
800 napi_enable(&fep->napi); 783 napi_enable(&fep->napi);
801 784
802 /* Install our interrupt handler. */ 785 /* Install our interrupt handler. */
803 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); 786 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
787 "fs_enet-mac", dev);
804 if (r != 0) { 788 if (r != 0) {
805 printk(KERN_ERR DRV_MODULE_NAME 789 printk(KERN_ERR DRV_MODULE_NAME
806 ": %s Could not allocate FS_ENET IRQ!", dev->name); 790 ": %s Could not allocate FS_ENET IRQ!", dev->name);
@@ -842,7 +826,7 @@ static int fs_enet_close(struct net_device *dev)
842 /* release any irqs */ 826 /* release any irqs */
843 phy_disconnect(fep->phydev); 827 phy_disconnect(fep->phydev);
844 fep->phydev = NULL; 828 fep->phydev = NULL;
845 fs_free_irq(dev, fep->interrupt); 829 free_irq(fep->interrupt, dev);
846 830
847 return 0; 831 return 0;
848} 832}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index db46d2e72329..85a4bab7f630 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -34,8 +34,6 @@ struct fs_ops {
34 void (*adjust_link)(struct net_device *dev); 34 void (*adjust_link)(struct net_device *dev);
35 void (*restart)(struct net_device *dev); 35 void (*restart)(struct net_device *dev);
36 void (*stop)(struct net_device *dev); 36 void (*stop)(struct net_device *dev);
37 void (*pre_request_irq)(struct net_device *dev, int irq);
38 void (*post_free_irq)(struct net_device *dev, int irq);
39 void (*napi_clear_rx_event)(struct net_device *dev); 37 void (*napi_clear_rx_event)(struct net_device *dev);
40 void (*napi_enable_rx)(struct net_device *dev); 38 void (*napi_enable_rx)(struct net_device *dev);
41 void (*napi_disable_rx)(struct net_device *dev); 39 void (*napi_disable_rx)(struct net_device *dev);
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 1c7ef812a8e3..22e5a847a588 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -421,16 +421,6 @@ static void stop(struct net_device *dev)
421 fs_cleanup_bds(dev); 421 fs_cleanup_bds(dev);
422} 422}
423 423
424static void pre_request_irq(struct net_device *dev, int irq)
425{
426 /* nothing */
427}
428
429static void post_free_irq(struct net_device *dev, int irq)
430{
431 /* nothing */
432}
433
434static void napi_clear_rx_event(struct net_device *dev) 424static void napi_clear_rx_event(struct net_device *dev)
435{ 425{
436 struct fs_enet_private *fep = netdev_priv(dev); 426 struct fs_enet_private *fep = netdev_priv(dev);
@@ -540,8 +530,6 @@ const struct fs_ops fs_fcc_ops = {
540 .set_multicast_list = set_multicast_list, 530 .set_multicast_list = set_multicast_list,
541 .restart = restart, 531 .restart = restart,
542 .stop = stop, 532 .stop = stop,
543 .pre_request_irq = pre_request_irq,
544 .post_free_irq = post_free_irq,
545 .napi_clear_rx_event = napi_clear_rx_event, 533 .napi_clear_rx_event = napi_clear_rx_event,
546 .napi_enable_rx = napi_enable_rx, 534 .napi_enable_rx = napi_enable_rx,
547 .napi_disable_rx = napi_disable_rx, 535 .napi_disable_rx = napi_disable_rx,
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 0a7d1c5c6524..14e575313c89 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -313,11 +313,7 @@ static void restart(struct net_device *dev)
313 * Clear any outstanding interrupt. 313 * Clear any outstanding interrupt.
314 */ 314 */
315 FW(fecp, ievent, 0xffc0); 315 FW(fecp, ievent, 0xffc0);
316#ifndef CONFIG_PPC_MERGE
317 FW(fecp, ivec, (fep->interrupt / 2) << 29);
318#else
319 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29); 316 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
320#endif
321 317
322 /* 318 /*
323 * adjust to speed (only for DUET & RMII) 319 * adjust to speed (only for DUET & RMII)
@@ -413,30 +409,6 @@ static void stop(struct net_device *dev)
413 } 409 }
414} 410}
415 411
416static void pre_request_irq(struct net_device *dev, int irq)
417{
418#ifndef CONFIG_PPC_MERGE
419 immap_t *immap = fs_enet_immap;
420 u32 siel;
421
422 /* SIU interrupt */
423 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
424
425 siel = in_be32(&immap->im_siu_conf.sc_siel);
426 if ((irq & 1) == 0)
427 siel |= (0x80000000 >> irq);
428 else
429 siel &= ~(0x80000000 >> (irq & ~1));
430 out_be32(&immap->im_siu_conf.sc_siel, siel);
431 }
432#endif
433}
434
435static void post_free_irq(struct net_device *dev, int irq)
436{
437 /* nothing */
438}
439
440static void napi_clear_rx_event(struct net_device *dev) 412static void napi_clear_rx_event(struct net_device *dev)
441{ 413{
442 struct fs_enet_private *fep = netdev_priv(dev); 414 struct fs_enet_private *fep = netdev_priv(dev);
@@ -529,8 +501,6 @@ const struct fs_ops fs_fec_ops = {
529 .set_multicast_list = set_multicast_list, 501 .set_multicast_list = set_multicast_list,
530 .restart = restart, 502 .restart = restart,
531 .stop = stop, 503 .stop = stop,
532 .pre_request_irq = pre_request_irq,
533 .post_free_irq = post_free_irq,
534 .napi_clear_rx_event = napi_clear_rx_event, 504 .napi_clear_rx_event = napi_clear_rx_event,
535 .napi_enable_rx = napi_enable_rx, 505 .napi_enable_rx = napi_enable_rx,
536 .napi_disable_rx = napi_disable_rx, 506 .napi_disable_rx = napi_disable_rx,
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 22f50dd8b277..008cdd9cc536 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -377,30 +377,6 @@ static void stop(struct net_device *dev)
377 fs_cleanup_bds(dev); 377 fs_cleanup_bds(dev);
378} 378}
379 379
380static void pre_request_irq(struct net_device *dev, int irq)
381{
382#ifndef CONFIG_PPC_MERGE
383 immap_t *immap = fs_enet_immap;
384 u32 siel;
385
386 /* SIU interrupt */
387 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
388
389 siel = in_be32(&immap->im_siu_conf.sc_siel);
390 if ((irq & 1) == 0)
391 siel |= (0x80000000 >> irq);
392 else
393 siel &= ~(0x80000000 >> (irq & ~1));
394 out_be32(&immap->im_siu_conf.sc_siel, siel);
395 }
396#endif
397}
398
399static void post_free_irq(struct net_device *dev, int irq)
400{
401 /* nothing */
402}
403
404static void napi_clear_rx_event(struct net_device *dev) 380static void napi_clear_rx_event(struct net_device *dev)
405{ 381{
406 struct fs_enet_private *fep = netdev_priv(dev); 382 struct fs_enet_private *fep = netdev_priv(dev);
@@ -494,8 +470,6 @@ const struct fs_ops fs_scc_ops = {
494 .set_multicast_list = set_multicast_list, 470 .set_multicast_list = set_multicast_list,
495 .restart = restart, 471 .restart = restart,
496 .stop = stop, 472 .stop = stop,
497 .pre_request_irq = pre_request_irq,
498 .post_free_irq = post_free_irq,
499 .napi_clear_rx_event = napi_clear_rx_event, 473 .napi_clear_rx_event = napi_clear_rx_event,
500 .napi_enable_rx = napi_enable_rx, 474 .napi_enable_rx = napi_enable_rx,
501 .napi_disable_rx = napi_disable_rx, 475 .napi_disable_rx = napi_disable_rx,
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index ebcfb27a904e..678f48c69119 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -136,12 +136,12 @@ static int gfar_mdio_reset(struct mii_bus *bus)
136 136
137 /* Wait until the bus is free */ 137 /* Wait until the bus is free */
138 while ((gfar_read(&regs->miimind) & MIIMIND_BUSY) && 138 while ((gfar_read(&regs->miimind) & MIIMIND_BUSY) &&
139 timeout--) 139 --timeout)
140 cpu_relax(); 140 cpu_relax();
141 141
142 mutex_unlock(&bus->mdio_lock); 142 mutex_unlock(&bus->mdio_lock);
143 143
144 if(timeout <= 0) { 144 if(timeout == 0) {
145 printk(KERN_ERR "%s: The MII Bus is stuck!\n", 145 printk(KERN_ERR "%s: The MII Bus is stuck!\n",
146 bus->name); 146 bus->name);
147 return -EBUSY; 147 return -EBUSY;
@@ -211,19 +211,21 @@ static int gfar_mdio_probe(struct device *dev)
211 gfar_write(&enet_regs->tbipa, 0); 211 gfar_write(&enet_regs->tbipa, 0);
212 for (i = PHY_MAX_ADDR; i > 0; i--) { 212 for (i = PHY_MAX_ADDR; i > 0; i--) {
213 u32 phy_id; 213 u32 phy_id;
214 int r;
215 214
216 r = get_phy_id(new_bus, i, &phy_id); 215 err = get_phy_id(new_bus, i, &phy_id);
217 if (r) 216 if (err)
218 return r; 217 goto bus_register_fail;
219 218
220 if (phy_id == 0xffffffff) 219 if (phy_id == 0xffffffff)
221 break; 220 break;
222 } 221 }
223 222
224 /* The bus is full. We don't support using 31 PHYs, sorry */ 223 /* The bus is full. We don't support using 31 PHYs, sorry */
225 if (i == 0) 224 if (i == 0) {
226 return -EBUSY; 225 err = -EBUSY;
226
227 goto bus_register_fail;
228 }
227 229
228 gfar_write(&enet_regs->tbipa, i); 230 gfar_write(&enet_regs->tbipa, i);
229 231
diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig
index 70a3272ee998..bcec7320895c 100644
--- a/drivers/net/ibm_newemac/Kconfig
+++ b/drivers/net/ibm_newemac/Kconfig
@@ -1,6 +1,6 @@
1config IBM_NEW_EMAC 1config IBM_NEW_EMAC
2 tristate "IBM EMAC Ethernet support" 2 tristate "IBM EMAC Ethernet support"
3 depends on PPC_DCR && PPC_MERGE 3 depends on PPC_DCR
4 select CRC32 4 select CRC32
5 help 5 help
6 This driver supports the IBM EMAC family of Ethernet controllers 6 This driver supports the IBM EMAC family of Ethernet controllers
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index eaa7262dc079..717dc38b6858 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -102,7 +102,7 @@
102/* MAL V1 IER bits */ 102/* MAL V1 IER bits */
103#define MAL1_IER_NWE 0x00000008 103#define MAL1_IER_NWE 0x00000008
104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE 104#define MAL1_IER_SOC_EVENTS MAL1_IER_NWE
105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_OTE | \ 105#define MAL1_IER_EVENTS (MAL1_IER_SOC_EVENTS | MAL_IER_DE | \
106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE) 106 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
107 107
108/* MAL V2 IER bits */ 108/* MAL V2 IER bits */
@@ -110,7 +110,7 @@
110#define MAL2_IER_PRE 0x00000040 110#define MAL2_IER_PRE 0x00000040
111#define MAL2_IER_PWE 0x00000020 111#define MAL2_IER_PWE 0x00000020
112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE) 112#define MAL2_IER_SOC_EVENTS (MAL2_IER_PT | MAL2_IER_PRE | MAL2_IER_PWE)
113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_OTE | \ 113#define MAL2_IER_EVENTS (MAL2_IER_SOC_EVENTS | MAL_IER_DE | \
114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE) 114 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
115 115
116 116
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
index 37bfeea8788a..9164abb72d9b 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ibm_newemac/phy.c
@@ -321,7 +321,7 @@ static struct mii_phy_def bcm5248_phy_def = {
321 321
322static int m88e1111_init(struct mii_phy *phy) 322static int m88e1111_init(struct mii_phy *phy)
323{ 323{
324 pr_debug("%s: Marvell 88E1111 Ethernet\n", __FUNCTION__); 324 pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
325 phy_write(phy, 0x14, 0x0ce3); 325 phy_write(phy, 0x14, 0x0ce3);
326 phy_write(phy, 0x18, 0x4101); 326 phy_write(phy, 0x18, 0x4101);
327 phy_write(phy, 0x09, 0x0e00); 327 phy_write(phy, 0x09, 0x0e00);
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 634c4c9d87be..93d02efa9a0a 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -3563,10 +3563,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3563 struct net_device *netdev = adapter->netdev; 3563 struct net_device *netdev = adapter->netdev;
3564 int work_done = 0; 3564 int work_done = 0;
3565 3565
3566 /* Keep link state information with original netdev */
3567 if (!netif_carrier_ok(netdev))
3568 goto quit_polling;
3569
3570#ifdef CONFIG_DCA 3566#ifdef CONFIG_DCA
3571 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3567 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3572 igb_update_rx_dca(rx_ring); 3568 igb_update_rx_dca(rx_ring);
@@ -3576,7 +3572,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3576 3572
3577 /* If not enough Rx work done, exit the polling mode */ 3573 /* If not enough Rx work done, exit the polling mode */
3578 if ((work_done == 0) || !netif_running(netdev)) { 3574 if ((work_done == 0) || !netif_running(netdev)) {
3579quit_polling:
3580 netif_rx_complete(netdev, napi); 3575 netif_rx_complete(netdev, napi);
3581 3576
3582 if (adapter->itr_setting & 3) { 3577 if (adapter->itr_setting & 3) {
@@ -3617,16 +3612,14 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3617 unsigned int i; 3612 unsigned int i;
3618 u32 head, oldhead; 3613 u32 head, oldhead;
3619 unsigned int count = 0; 3614 unsigned int count = 0;
3620 bool cleaned = false;
3621 bool retval = true;
3622 unsigned int total_bytes = 0, total_packets = 0; 3615 unsigned int total_bytes = 0, total_packets = 0;
3616 bool retval = true;
3623 3617
3624 rmb(); 3618 rmb();
3625 head = get_head(tx_ring); 3619 head = get_head(tx_ring);
3626 i = tx_ring->next_to_clean; 3620 i = tx_ring->next_to_clean;
3627 while (1) { 3621 while (1) {
3628 while (i != head) { 3622 while (i != head) {
3629 cleaned = true;
3630 tx_desc = E1000_TX_DESC(*tx_ring, i); 3623 tx_desc = E1000_TX_DESC(*tx_ring, i);
3631 buffer_info = &tx_ring->buffer_info[i]; 3624 buffer_info = &tx_ring->buffer_info[i];
3632 skb = buffer_info->skb; 3625 skb = buffer_info->skb;
@@ -3643,7 +3636,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3643 } 3636 }
3644 3637
3645 igb_unmap_and_free_tx_resource(adapter, buffer_info); 3638 igb_unmap_and_free_tx_resource(adapter, buffer_info);
3646 tx_desc->upper.data = 0;
3647 3639
3648 i++; 3640 i++;
3649 if (i == tx_ring->count) 3641 if (i == tx_ring->count)
@@ -3665,7 +3657,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3665done_cleaning: 3657done_cleaning:
3666 tx_ring->next_to_clean = i; 3658 tx_ring->next_to_clean = i;
3667 3659
3668 if (unlikely(cleaned && 3660 if (unlikely(count &&
3669 netif_carrier_ok(netdev) && 3661 netif_carrier_ok(netdev) &&
3670 IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) { 3662 IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
3671 /* Make sure that anybody stopping the queue after this 3663 /* Make sure that anybody stopping the queue after this
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 18f4b3a96aed..9c926d205de9 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -165,7 +165,7 @@ static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
165 unsigned iobase = pci_resource_start(pdev, 0); 165 unsigned iobase = pci_resource_start(pdev, 0);
166 unsigned i; 166 unsigned i;
167 167
168 seq_printf(seq, "\n%s (vid/did: %04x/%04x)\n", 168 seq_printf(seq, "\n%s (vid/did: [%04x:%04x])\n",
169 pci_name(pdev), (int)pdev->vendor, (int)pdev->device); 169 pci_name(pdev), (int)pdev->vendor, (int)pdev->device);
170 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state); 170 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
171 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n", 171 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 804698fc6a8f..d85717e3022a 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -85,7 +85,7 @@ struct ixgb_adapter;
85#define DPRINTK(nlevel, klevel, fmt, args...) \ 85#define DPRINTK(nlevel, klevel, fmt, args...) \
86 (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 86 (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
87 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ 87 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
88 __FUNCTION__ , ## args)) 88 __func__ , ## args))
89 89
90 90
91/* TX/RX descriptor defines */ 91/* TX/RX descriptor defines */
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 956914a5028d..2198b77c53ed 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -37,17 +36,15 @@
37#include "ixgbe_type.h" 36#include "ixgbe_type.h"
38#include "ixgbe_common.h" 37#include "ixgbe_common.h"
39 38
40#ifdef CONFIG_DCA 39#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
41#include <linux/dca.h> 40#include <linux/dca.h>
42#endif 41#endif
43 42
44#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
45
46#define PFX "ixgbe: " 43#define PFX "ixgbe: "
47#define DPRINTK(nlevel, klevel, fmt, args...) \ 44#define DPRINTK(nlevel, klevel, fmt, args...) \
48 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 45 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
49 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ 46 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
50 __FUNCTION__ , ## args))) 47 __func__ , ## args)))
51 48
52/* TX/RX descriptor defines */ 49/* TX/RX descriptor defines */
53#define IXGBE_DEFAULT_TXD 1024 50#define IXGBE_DEFAULT_TXD 1024
@@ -58,23 +55,14 @@
58#define IXGBE_MAX_RXD 4096 55#define IXGBE_MAX_RXD 4096
59#define IXGBE_MIN_RXD 64 56#define IXGBE_MIN_RXD 64
60 57
61#define IXGBE_DEFAULT_RXQ 1
62#define IXGBE_MAX_RXQ 1
63#define IXGBE_MIN_RXQ 1
64
65#define IXGBE_DEFAULT_ITR_RX_USECS 125 /* 8k irqs/sec */
66#define IXGBE_DEFAULT_ITR_TX_USECS 250 /* 4k irqs/sec */
67#define IXGBE_MIN_ITR_USECS 100 /* 500k irqs/sec */
68#define IXGBE_MAX_ITR_USECS 10000 /* 100 irqs/sec */
69
70/* flow control */ 58/* flow control */
71#define IXGBE_DEFAULT_FCRTL 0x10000 59#define IXGBE_DEFAULT_FCRTL 0x10000
72#define IXGBE_MIN_FCRTL 0 60#define IXGBE_MIN_FCRTL 0x40
73#define IXGBE_MAX_FCRTL 0x7FF80 61#define IXGBE_MAX_FCRTL 0x7FF80
74#define IXGBE_DEFAULT_FCRTH 0x20000 62#define IXGBE_DEFAULT_FCRTH 0x20000
75#define IXGBE_MIN_FCRTH 0 63#define IXGBE_MIN_FCRTH 0x600
76#define IXGBE_MAX_FCRTH 0x7FFF0 64#define IXGBE_MAX_FCRTH 0x7FFF0
77#define IXGBE_DEFAULT_FCPAUSE 0x6800 /* may be too long */ 65#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
78#define IXGBE_MIN_FCPAUSE 0 66#define IXGBE_MIN_FCPAUSE 0
79#define IXGBE_MAX_FCPAUSE 0xFFFF 67#define IXGBE_MAX_FCPAUSE 0xFFFF
80 68
@@ -88,9 +76,6 @@
88 76
89#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 77#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
90 78
91/* How many Tx Descriptors do we need to call netif_wake_queue? */
92#define IXGBE_TX_QUEUE_WAKE 16
93
94/* How many Rx Buffers do we bundle into one write to the hardware ? */ 79/* How many Rx Buffers do we bundle into one write to the hardware ? */
95#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 80#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
96 81
@@ -119,6 +104,7 @@ struct ixgbe_rx_buffer {
119 dma_addr_t dma; 104 dma_addr_t dma;
120 struct page *page; 105 struct page *page;
121 dma_addr_t page_dma; 106 dma_addr_t page_dma;
107 unsigned int page_offset;
122}; 108};
123 109
124struct ixgbe_queue_stats { 110struct ixgbe_queue_stats {
@@ -150,22 +136,20 @@ struct ixgbe_ring {
150 * offset associated with this ring, which is different 136 * offset associated with this ring, which is different
151 * for DCE and RSS modes */ 137 * for DCE and RSS modes */
152 138
153#ifdef CONFIG_DCA 139#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
154 /* cpu for tx queue */ 140 /* cpu for tx queue */
155 int cpu; 141 int cpu;
156#endif 142#endif
157 struct net_lro_mgr lro_mgr; 143 struct net_lro_mgr lro_mgr;
158 bool lro_used; 144 bool lro_used;
159 struct ixgbe_queue_stats stats; 145 struct ixgbe_queue_stats stats;
160 u8 v_idx; /* maps directly to the index for this ring in the hardware 146 u16 v_idx; /* maps directly to the index for this ring in the hardware
161 * vector array, can also be used for finding the bit in EICR 147 * vector array, can also be used for finding the bit in EICR
162 * and friends that represents the vector for this ring */ 148 * and friends that represents the vector for this ring */
163 149
164 u32 eims_value;
165 u16 itr_register;
166 150
167 char name[IFNAMSIZ + 5];
168 u16 work_limit; /* max work per interrupt */ 151 u16 work_limit; /* max work per interrupt */
152 u16 rx_buf_len;
169}; 153};
170 154
171#define RING_F_VMDQ 1 155#define RING_F_VMDQ 1
@@ -190,8 +174,8 @@ struct ixgbe_q_vector {
190 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 174 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
191 u8 rxr_count; /* Rx ring count assigned to this vector */ 175 u8 rxr_count; /* Rx ring count assigned to this vector */
192 u8 txr_count; /* Tx ring count assigned to this vector */ 176 u8 txr_count; /* Tx ring count assigned to this vector */
193 u8 tx_eitr; 177 u8 tx_itr;
194 u8 rx_eitr; 178 u8 rx_itr;
195 u32 eitr; 179 u32 eitr;
196}; 180};
197 181
@@ -228,7 +212,6 @@ struct ixgbe_adapter {
228 struct timer_list watchdog_timer; 212 struct timer_list watchdog_timer;
229 struct vlan_group *vlgrp; 213 struct vlan_group *vlgrp;
230 u16 bd_number; 214 u16 bd_number;
231 u16 rx_buf_len;
232 struct work_struct reset_task; 215 struct work_struct reset_task;
233 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; 216 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
234 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; 217 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
@@ -240,7 +223,9 @@ struct ixgbe_adapter {
240 223
241 /* TX */ 224 /* TX */
242 struct ixgbe_ring *tx_ring; /* One per active queue */ 225 struct ixgbe_ring *tx_ring; /* One per active queue */
226 int num_tx_queues;
243 u64 restart_queue; 227 u64 restart_queue;
228 u64 hw_csum_tx_good;
244 u64 lsc_int; 229 u64 lsc_int;
245 u64 hw_tso_ctxt; 230 u64 hw_tso_ctxt;
246 u64 hw_tso6_ctxt; 231 u64 hw_tso6_ctxt;
@@ -249,12 +234,10 @@ struct ixgbe_adapter {
249 234
250 /* RX */ 235 /* RX */
251 struct ixgbe_ring *rx_ring; /* One per active queue */ 236 struct ixgbe_ring *rx_ring; /* One per active queue */
252 u64 hw_csum_tx_good; 237 int num_rx_queues;
253 u64 hw_csum_rx_error; 238 u64 hw_csum_rx_error;
254 u64 hw_csum_rx_good; 239 u64 hw_csum_rx_good;
255 u64 non_eop_descs; 240 u64 non_eop_descs;
256 int num_tx_queues;
257 int num_rx_queues;
258 int num_msix_vectors; 241 int num_msix_vectors;
259 struct ixgbe_ring_feature ring_feature[3]; 242 struct ixgbe_ring_feature ring_feature[3];
260 struct msix_entry *msix_entries; 243 struct msix_entry *msix_entries;
@@ -267,15 +250,28 @@ struct ixgbe_adapter {
267 * thus the additional *_CAPABLE flags. 250 * thus the additional *_CAPABLE flags.
268 */ 251 */
269 u32 flags; 252 u32 flags;
270#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1 << 0) 253#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
271#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) 254#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
272#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2) 255#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
273#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) 256#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
274#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) 257#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
275#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5) 258#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
276#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 6) 259#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
277#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 7) 260#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
278#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) 261#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
262#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
263#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
264#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
265#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
266#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
267#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
268#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
269#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
270#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
271#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
272
273/* default to trying for four seconds */
274#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
279 275
280 /* OS defined structs */ 276 /* OS defined structs */
281 struct net_device *netdev; 277 struct net_device *netdev;
@@ -288,14 +284,21 @@ struct ixgbe_adapter {
288 struct ixgbe_hw_stats stats; 284 struct ixgbe_hw_stats stats;
289 285
290 /* Interrupt Throttle Rate */ 286 /* Interrupt Throttle Rate */
291 u32 rx_eitr; 287 u32 eitr_param;
292 u32 tx_eitr;
293 288
294 unsigned long state; 289 unsigned long state;
295 u64 tx_busy; 290 u64 tx_busy;
296 u64 lro_aggregated; 291 u64 lro_aggregated;
297 u64 lro_flushed; 292 u64 lro_flushed;
298 u64 lro_no_desc; 293 u64 lro_no_desc;
294 unsigned int tx_ring_count;
295 unsigned int rx_ring_count;
296
297 u32 link_speed;
298 bool link_up;
299 unsigned long link_check_timeout;
300
301 struct work_struct watchdog_task;
299}; 302};
300 303
301enum ixbge_state_t { 304enum ixbge_state_t {
@@ -317,11 +320,11 @@ extern int ixgbe_up(struct ixgbe_adapter *adapter);
317extern void ixgbe_down(struct ixgbe_adapter *adapter); 320extern void ixgbe_down(struct ixgbe_adapter *adapter);
318extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 321extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
319extern void ixgbe_reset(struct ixgbe_adapter *adapter); 322extern void ixgbe_reset(struct ixgbe_adapter *adapter);
320extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
321extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 323extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
322extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 324extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
323 struct ixgbe_ring *rxdr); 325extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
324extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 326extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
325 struct ixgbe_ring *txdr); 327extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
328extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
326 329
327#endif /* _IXGBE_H_ */ 330#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index f96358b641af..7cddcfba809e 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -36,67 +35,62 @@
36#define IXGBE_82598_MAX_TX_QUEUES 32 35#define IXGBE_82598_MAX_TX_QUEUES 32
37#define IXGBE_82598_MAX_RX_QUEUES 64 36#define IXGBE_82598_MAX_RX_QUEUES 64
38#define IXGBE_82598_RAR_ENTRIES 16 37#define IXGBE_82598_RAR_ENTRIES 16
38#define IXGBE_82598_MC_TBL_SIZE 128
39#define IXGBE_82598_VFT_TBL_SIZE 128
39 40
40static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); 41static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
41static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, 42 ixgbe_link_speed *speed,
42 bool *autoneg); 43 bool *autoneg);
43static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
44 u32 *speed, bool *autoneg);
45static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
46static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw);
47static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
48 bool *link_up);
49static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
50 bool autoneg,
51 bool autoneg_wait_to_complete);
52static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); 44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw);
53static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, 45static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
54 bool autoneg, 46 ixgbe_link_speed speed,
55 bool autoneg_wait_to_complete); 47 bool autoneg,
56static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); 48 bool autoneg_wait_to_complete);
57
58 49
50/**
51 */
59static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 52static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
60{ 53{
61 hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 54 struct ixgbe_mac_info *mac = &hw->mac;
62 hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 55 struct ixgbe_phy_info *phy = &hw->phy;
63 hw->mac.num_rx_addrs = IXGBE_82598_RAR_ENTRIES; 56
64 57 /* Call PHY identify routine to get the phy type */
65 /* PHY ops are filled in by default properly for Fiber only */ 58 ixgbe_identify_phy_generic(hw);
66 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 59
67 hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598; 60 /* PHY Init */
68 hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598; 61 switch (phy->type) {
69 hw->mac.ops.get_link_settings = 62 default:
70 &ixgbe_get_copper_link_settings_82598; 63 break;
71
72 /* Call PHY identify routine to get the phy type */
73 ixgbe_identify_phy(hw);
74
75 switch (hw->phy.type) {
76 case ixgbe_phy_tn:
77 hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link;
78 hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link;
79 hw->phy.ops.setup_link_speed =
80 &ixgbe_setup_tnx_phy_link_speed;
81 break;
82 default:
83 break;
84 }
85 } 64 }
86 65
66 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
67 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
68 mac->ops.setup_link_speed =
69 &ixgbe_setup_copper_link_speed_82598;
70 mac->ops.get_link_capabilities =
71 &ixgbe_get_copper_link_capabilities_82598;
72 }
73
74 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
75 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
76 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
77 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
78 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
79
87 return 0; 80 return 0;
88} 81}
89 82
90/** 83/**
91 * ixgbe_get_link_settings_82598 - Determines default link settings 84 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
92 * @hw: pointer to hardware structure 85 * @hw: pointer to hardware structure
93 * @speed: pointer to link speed 86 * @speed: pointer to link speed
94 * @autoneg: boolean auto-negotiation value 87 * @autoneg: boolean auto-negotiation value
95 * 88 *
96 * Determines the default link settings by reading the AUTOC register. 89 * Determines the link capabilities by reading the AUTOC register.
97 **/ 90 **/
98static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, 91static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
99 bool *autoneg) 92 ixgbe_link_speed *speed,
93 bool *autoneg)
100{ 94{
101 s32 status = 0; 95 s32 status = 0;
102 s32 autoc_reg; 96 s32 autoc_reg;
@@ -145,15 +139,16 @@ static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed,
145} 139}
146 140
147/** 141/**
148 * ixgbe_get_copper_link_settings_82598 - Determines default link settings 142 * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
149 * @hw: pointer to hardware structure 143 * @hw: pointer to hardware structure
150 * @speed: pointer to link speed 144 * @speed: pointer to link speed
151 * @autoneg: boolean auto-negotiation value 145 * @autoneg: boolean auto-negotiation value
152 * 146 *
153 * Determines the default link settings by reading the AUTOC register. 147 * Determines the link capabilities by reading the AUTOC register.
154 **/ 148 **/
155static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, 149s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
156 u32 *speed, bool *autoneg) 150 ixgbe_link_speed *speed,
151 bool *autoneg)
157{ 152{
158 s32 status = IXGBE_ERR_LINK_SETUP; 153 s32 status = IXGBE_ERR_LINK_SETUP;
159 u16 speed_ability; 154 u16 speed_ability;
@@ -161,9 +156,9 @@ static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw,
161 *speed = 0; 156 *speed = 0;
162 *autoneg = true; 157 *autoneg = true;
163 158
164 status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, 159 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
165 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 160 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
166 &speed_ability); 161 &speed_ability);
167 162
168 if (status == 0) { 163 if (status == 0) {
169 if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) 164 if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
@@ -191,11 +186,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
191 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 186 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
192 case IXGBE_DEV_ID_82598EB_CX4: 187 case IXGBE_DEV_ID_82598EB_CX4:
193 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 188 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
189 case IXGBE_DEV_ID_82598EB_XF_LR:
194 media_type = ixgbe_media_type_fiber; 190 media_type = ixgbe_media_type_fiber;
195 break; 191 break;
196 case IXGBE_DEV_ID_82598AT_DUAL_PORT:
197 media_type = ixgbe_media_type_copper;
198 break;
199 default: 192 default:
200 media_type = ixgbe_media_type_unknown; 193 media_type = ixgbe_media_type_unknown;
201 break; 194 break;
@@ -205,6 +198,122 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
205} 198}
206 199
207/** 200/**
201 * ixgbe_setup_fc_82598 - Configure flow control settings
202 * @hw: pointer to hardware structure
203 * @packetbuf_num: packet buffer number (0-7)
204 *
205 * Configures the flow control settings based on SW configuration. This
206 * function is used for 802.3x flow control configuration only.
207 **/
208s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
209{
210 u32 frctl_reg;
211 u32 rmcs_reg;
212
213 if (packetbuf_num < 0 || packetbuf_num > 7) {
214 hw_dbg(hw, "Invalid packet buffer number [%d], expected range is"
215 " 0-7\n", packetbuf_num);
216 }
217
218 frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
219 frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
220
221 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
222 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
223
224 /*
225 * 10 gig parts do not have a word in the EEPROM to determine the
226 * default flow control setting, so we explicitly set it to full.
227 */
228 if (hw->fc.type == ixgbe_fc_default)
229 hw->fc.type = ixgbe_fc_full;
230
231 /*
232 * We want to save off the original Flow Control configuration just in
233 * case we get disconnected and then reconnected into a different hub
234 * or switch with different Flow Control capabilities.
235 */
236 hw->fc.original_type = hw->fc.type;
237
238 /*
239 * The possible values of the "flow_control" parameter are:
240 * 0: Flow control is completely disabled
241 * 1: Rx flow control is enabled (we can receive pause frames but not
242 * send pause frames).
243 * 2: Tx flow control is enabled (we can send pause frames but we do not
244 * support receiving pause frames)
245 * 3: Both Rx and Tx flow control (symmetric) are enabled.
246 * other: Invalid.
247 */
248 switch (hw->fc.type) {
249 case ixgbe_fc_none:
250 break;
251 case ixgbe_fc_rx_pause:
252 /*
253 * Rx Flow control is enabled,
254 * and Tx Flow control is disabled.
255 */
256 frctl_reg |= IXGBE_FCTRL_RFCE;
257 break;
258 case ixgbe_fc_tx_pause:
259 /*
260 * Tx Flow control is enabled, and Rx Flow control is disabled,
261 * by a software over-ride.
262 */
263 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
264 break;
265 case ixgbe_fc_full:
266 /*
267 * Flow control (both Rx and Tx) is enabled by a software
268 * over-ride.
269 */
270 frctl_reg |= IXGBE_FCTRL_RFCE;
271 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
272 break;
273 default:
274 /* We should never get here. The value should be 0-3. */
275 hw_dbg(hw, "Flow control param set incorrectly\n");
276 break;
277 }
278
279 /* Enable 802.3x based flow control settings. */
280 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
281 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
282
283 /*
284 * Check for invalid software configuration, zeros are completely
285 * invalid for all parameters used past this point, and if we enable
286 * flow control with zero water marks, we blast flow control packets.
287 */
288 if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
289 hw_dbg(hw, "Flow control structure initialized incorrectly\n");
290 return IXGBE_ERR_INVALID_LINK_SETTINGS;
291 }
292
293 /*
294 * We need to set up the Receive Threshold high and low water
295 * marks as well as (optionally) enabling the transmission of
296 * XON frames.
297 */
298 if (hw->fc.type & ixgbe_fc_tx_pause) {
299 if (hw->fc.send_xon) {
300 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
301 (hw->fc.low_water | IXGBE_FCRTL_XONE));
302 } else {
303 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
304 hw->fc.low_water);
305 }
306 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
307 (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
308 }
309
310 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time);
311 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
312
313 return 0;
314}
315
316/**
208 * ixgbe_setup_mac_link_82598 - Configures MAC link settings 317 * ixgbe_setup_mac_link_82598 - Configures MAC link settings
209 * @hw: pointer to hardware structure 318 * @hw: pointer to hardware structure
210 * 319 *
@@ -248,8 +357,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
248 } 357 }
249 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 358 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
250 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 359 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
251 hw_dbg(hw, 360 hw_dbg(hw, "Autonegotiation did not complete.\n");
252 "Autonegotiation did not complete.\n");
253 } 361 }
254 } 362 }
255 } 363 }
@@ -259,8 +367,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
259 * case we get disconnected and then reconnected into a different hub 367 * case we get disconnected and then reconnected into a different hub
260 * or switch with different Flow Control capabilities. 368 * or switch with different Flow Control capabilities.
261 */ 369 */
262 hw->fc.type = hw->fc.original_type; 370 hw->fc.original_type = hw->fc.type;
263 ixgbe_setup_fc(hw, 0); 371 ixgbe_setup_fc_82598(hw, 0);
264 372
265 /* Add delay to filter out noises during initial link setup */ 373 /* Add delay to filter out noises during initial link setup */
266 msleep(50); 374 msleep(50);
@@ -273,20 +381,35 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw)
273 * @hw: pointer to hardware structure 381 * @hw: pointer to hardware structure
274 * @speed: pointer to link speed 382 * @speed: pointer to link speed
275 * @link_up: true is link is up, false otherwise 383 * @link_up: true is link is up, false otherwise
384 * @link_up_wait_to_complete: bool used to wait for link up or not
276 * 385 *
277 * Reads the links register to determine if link is up and the current speed 386 * Reads the links register to determine if link is up and the current speed
278 **/ 387 **/
279static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, 388static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
280 bool *link_up) 389 ixgbe_link_speed *speed, bool *link_up,
390 bool link_up_wait_to_complete)
281{ 391{
282 u32 links_reg; 392 u32 links_reg;
393 u32 i;
283 394
284 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 395 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
285 396 if (link_up_wait_to_complete) {
286 if (links_reg & IXGBE_LINKS_UP) 397 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
287 *link_up = true; 398 if (links_reg & IXGBE_LINKS_UP) {
288 else 399 *link_up = true;
289 *link_up = false; 400 break;
401 } else {
402 *link_up = false;
403 }
404 msleep(100);
405 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
406 }
407 } else {
408 if (links_reg & IXGBE_LINKS_UP)
409 *link_up = true;
410 else
411 *link_up = false;
412 }
290 413
291 if (links_reg & IXGBE_LINKS_SPEED) 414 if (links_reg & IXGBE_LINKS_SPEED)
292 *speed = IXGBE_LINK_SPEED_10GB_FULL; 415 *speed = IXGBE_LINK_SPEED_10GB_FULL;
@@ -296,6 +419,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
296 return 0; 419 return 0;
297} 420}
298 421
422
299/** 423/**
300 * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed 424 * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed
301 * @hw: pointer to hardware structure 425 * @hw: pointer to hardware structure
@@ -306,18 +430,18 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed,
306 * Set the link speed in the AUTOC register and restarts link. 430 * Set the link speed in the AUTOC register and restarts link.
307 **/ 431 **/
308static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, 432static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
309 u32 speed, bool autoneg, 433 ixgbe_link_speed speed, bool autoneg,
310 bool autoneg_wait_to_complete) 434 bool autoneg_wait_to_complete)
311{ 435{
312 s32 status = 0; 436 s32 status = 0;
313 437
314 /* If speed is 10G, then check for CX4 or XAUI. */ 438 /* If speed is 10G, then check for CX4 or XAUI. */
315 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 439 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
316 (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) 440 (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) {
317 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 441 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
318 else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) 442 } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) {
319 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 443 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
320 else if (autoneg) { 444 } else if (autoneg) {
321 /* BX mode - Autonegotiate 1G */ 445 /* BX mode - Autonegotiate 1G */
322 if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) 446 if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD))
323 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; 447 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN;
@@ -336,7 +460,7 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
336 * ixgbe_hw This will write the AUTOC register based on the new 460 * ixgbe_hw This will write the AUTOC register based on the new
337 * stored values 461 * stored values
338 */ 462 */
339 hw->mac.ops.setup_link(hw); 463 ixgbe_setup_mac_link_82598(hw);
340 } 464 }
341 465
342 return status; 466 return status;
@@ -354,18 +478,17 @@ static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw,
354 **/ 478 **/
355static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) 479static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
356{ 480{
357 s32 status = 0; 481 s32 status;
358 482
359 /* Restart autonegotiation on PHY */ 483 /* Restart autonegotiation on PHY */
360 if (hw->phy.ops.setup_link) 484 status = hw->phy.ops.setup_link(hw);
361 status = hw->phy.ops.setup_link(hw);
362 485
363 /* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */ 486 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
364 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); 487 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
365 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; 488 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
366 489
367 /* Set up MAC */ 490 /* Set up MAC */
368 hw->mac.ops.setup_link(hw); 491 ixgbe_setup_mac_link_82598(hw);
369 492
370 return status; 493 return status;
371} 494}
@@ -379,23 +502,23 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw)
379 * 502 *
380 * Sets the link speed in the AUTOC register in the MAC and restarts link. 503 * Sets the link speed in the AUTOC register in the MAC and restarts link.
381 **/ 504 **/
382static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, 505static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
383 bool autoneg, 506 ixgbe_link_speed speed,
384 bool autoneg_wait_to_complete) 507 bool autoneg,
508 bool autoneg_wait_to_complete)
385{ 509{
386 s32 status = 0; 510 s32 status;
387 511
388 /* Setup the PHY according to input speed */ 512 /* Setup the PHY according to input speed */
389 if (hw->phy.ops.setup_link_speed) 513 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
390 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 514 autoneg_wait_to_complete);
391 autoneg_wait_to_complete);
392 515
393 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ 516 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */
394 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); 517 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX);
395 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; 518 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN;
396 519
397 /* Set up MAC */ 520 /* Set up MAC */
398 hw->mac.ops.setup_link(hw); 521 ixgbe_setup_mac_link_82598(hw);
399 522
400 return status; 523 return status;
401} 524}
@@ -404,7 +527,7 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed,
404 * ixgbe_reset_hw_82598 - Performs hardware reset 527 * ixgbe_reset_hw_82598 - Performs hardware reset
405 * @hw: pointer to hardware structure 528 * @hw: pointer to hardware structure
406 * 529 *
407 * Resets the hardware by reseting the transmit and receive units, masks and 530 * Resets the hardware by resetting the transmit and receive units, masks and
408 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 531 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
409 * reset. 532 * reset.
410 **/ 533 **/
@@ -418,35 +541,44 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
418 u8 analog_val; 541 u8 analog_val;
419 542
420 /* Call adapter stop to disable tx/rx and clear interrupts */ 543 /* Call adapter stop to disable tx/rx and clear interrupts */
421 ixgbe_stop_adapter(hw); 544 hw->mac.ops.stop_adapter(hw);
422 545
423 /* 546 /*
424 * Power up the Atlas TX lanes if they are currently powered down. 547 * Power up the Atlas Tx lanes if they are currently powered down.
425 * Atlas TX lanes are powered down for MAC loopback tests, but 548 * Atlas Tx lanes are powered down for MAC loopback tests, but
426 * they are not automatically restored on reset. 549 * they are not automatically restored on reset.
427 */ 550 */
428 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 551 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
429 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 552 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
430 /* Enable TX Atlas so packets can be transmitted again */ 553 /* Enable Tx Atlas so packets can be transmitted again */
431 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 554 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
555 &analog_val);
432 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 556 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
433 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); 557 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
558 analog_val);
434 559
435 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); 560 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
561 &analog_val);
436 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 562 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
437 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); 563 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
564 analog_val);
438 565
439 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); 566 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
567 &analog_val);
440 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 568 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
441 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); 569 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
570 analog_val);
442 571
443 ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); 572 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
573 &analog_val);
444 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 574 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
445 ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); 575 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
576 analog_val);
446 } 577 }
447 578
448 /* Reset PHY */ 579 /* Reset PHY */
449 ixgbe_reset_phy(hw); 580 if (hw->phy.reset_disable == false)
581 hw->phy.ops.reset(hw);
450 582
451 /* 583 /*
452 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 584 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
@@ -499,29 +631,311 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
499 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 631 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
500 } else { 632 } else {
501 hw->mac.link_attach_type = 633 hw->mac.link_attach_type =
502 (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); 634 (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE);
503 hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); 635 hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK);
504 hw->mac.link_settings_loaded = true; 636 hw->mac.link_settings_loaded = true;
505 } 637 }
506 638
507 /* Store the permanent mac address */ 639 /* Store the permanent mac address */
508 ixgbe_get_mac_addr(hw, hw->mac.perm_addr); 640 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
509 641
510 return status; 642 return status;
511} 643}
512 644
645/**
646 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
647 * @hw: pointer to hardware struct
648 * @rar: receive address register index to associate with a VMDq index
649 * @vmdq: VMDq set index
650 **/
651s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
652{
653 u32 rar_high;
654
655 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
656 rar_high &= ~IXGBE_RAH_VIND_MASK;
657 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
658 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
659 return 0;
660}
661
662/**
663 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
664 * @hw: pointer to hardware struct
665 * @rar: receive address register index to associate with a VMDq index
666 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
667 **/
668static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
669{
670 u32 rar_high;
671 u32 rar_entries = hw->mac.num_rar_entries;
672
673 if (rar < rar_entries) {
674 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
675 if (rar_high & IXGBE_RAH_VIND_MASK) {
676 rar_high &= ~IXGBE_RAH_VIND_MASK;
677 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
678 }
679 } else {
680 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
681 }
682
683 return 0;
684}
685
686/**
687 * ixgbe_set_vfta_82598 - Set VLAN filter table
688 * @hw: pointer to hardware structure
689 * @vlan: VLAN id to write to VLAN filter
690 * @vind: VMDq output index that maps queue to VLAN id in VFTA
691 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
692 *
693 * Turn on/off specified VLAN in the VLAN filter table.
694 **/
695s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
696 bool vlan_on)
697{
698 u32 regindex;
699 u32 bitindex;
700 u32 bits;
701 u32 vftabyte;
702
703 if (vlan > 4095)
704 return IXGBE_ERR_PARAM;
705
706 /* Determine 32-bit word position in array */
707 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
708
709 /* Determine the location of the (VMD) queue index */
710 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
711 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
712
713 /* Set the nibble for VMD queue index */
714 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
715 bits &= (~(0x0F << bitindex));
716 bits |= (vind << bitindex);
717 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
718
719 /* Determine the location of the bit for this VLAN id */
720 bitindex = vlan & 0x1F; /* lower five bits */
721
722 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
723 if (vlan_on)
724 /* Turn on this VLAN id */
725 bits |= (1 << bitindex);
726 else
727 /* Turn off this VLAN id */
728 bits &= ~(1 << bitindex);
729 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
730
731 return 0;
732}
733
734/**
735 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
736 * @hw: pointer to hardware structure
737 *
738 * Clears the VLAN filer table, and the VMDq index associated with the filter
739 **/
740static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
741{
742 u32 offset;
743 u32 vlanbyte;
744
745 for (offset = 0; offset < hw->mac.vft_size; offset++)
746 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
747
748 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
749 for (offset = 0; offset < hw->mac.vft_size; offset++)
750 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
751 0);
752
753 return 0;
754}
755
756/**
757 * ixgbe_blink_led_start_82598 - Blink LED based on index.
758 * @hw: pointer to hardware structure
759 * @index: led number to blink
760 **/
761static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index)
762{
763 ixgbe_link_speed speed = 0;
764 bool link_up = 0;
765 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
766 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
767
768 /*
769 * Link must be up to auto-blink the LEDs on the 82598EB MAC;
770 * force it if link is down.
771 */
772 hw->mac.ops.check_link(hw, &speed, &link_up, false);
773
774 if (!link_up) {
775 autoc_reg |= IXGBE_AUTOC_FLU;
776 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
777 msleep(10);
778 }
779
780 led_reg &= ~IXGBE_LED_MODE_MASK(index);
781 led_reg |= IXGBE_LED_BLINK(index);
782 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
783 IXGBE_WRITE_FLUSH(hw);
784
785 return 0;
786}
787
788/**
789 * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index.
790 * @hw: pointer to hardware structure
791 * @index: led number to stop blinking
792 **/
793static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
794{
795 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
796 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
797
798 autoc_reg &= ~IXGBE_AUTOC_FLU;
799 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
800 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
801
802 led_reg &= ~IXGBE_LED_MODE_MASK(index);
803 led_reg &= ~IXGBE_LED_BLINK(index);
804 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
805 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
806 IXGBE_WRITE_FLUSH(hw);
807
808 return 0;
809}
810
811/**
812 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
813 * @hw: pointer to hardware structure
814 * @reg: analog register to read
815 * @val: read value
816 *
817 * Performs read operation to Atlas analog register specified.
818 **/
819s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
820{
821 u32 atlas_ctl;
822
823 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
824 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
825 IXGBE_WRITE_FLUSH(hw);
826 udelay(10);
827 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
828 *val = (u8)atlas_ctl;
829
830 return 0;
831}
832
833/**
834 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
835 * @hw: pointer to hardware structure
836 * @reg: atlas register to write
837 * @val: value to write
838 *
839 * Performs write operation to Atlas analog register specified.
840 **/
841s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
842{
843 u32 atlas_ctl;
844
845 atlas_ctl = (reg << 8) | val;
846 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
847 IXGBE_WRITE_FLUSH(hw);
848 udelay(10);
849
850 return 0;
851}
852
853/**
854 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
855 * @hw: pointer to hardware structure
856 *
857 * Determines physical layer capabilities of the current configuration.
858 **/
859s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
860{
861 s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
862
863 switch (hw->device_id) {
864 case IXGBE_DEV_ID_82598EB_CX4:
865 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
866 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
867 break;
868 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
869 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
870 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
871 break;
872 case IXGBE_DEV_ID_82598EB_XF_LR:
873 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
874 break;
875
876 default:
877 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
878 break;
879 }
880
881 return physical_layer;
882}
883
513static struct ixgbe_mac_operations mac_ops_82598 = { 884static struct ixgbe_mac_operations mac_ops_82598 = {
514 .reset = &ixgbe_reset_hw_82598, 885 .init_hw = &ixgbe_init_hw_generic,
886 .reset_hw = &ixgbe_reset_hw_82598,
887 .start_hw = &ixgbe_start_hw_generic,
888 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
515 .get_media_type = &ixgbe_get_media_type_82598, 889 .get_media_type = &ixgbe_get_media_type_82598,
890 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
891 .get_mac_addr = &ixgbe_get_mac_addr_generic,
892 .stop_adapter = &ixgbe_stop_adapter_generic,
893 .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
894 .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
516 .setup_link = &ixgbe_setup_mac_link_82598, 895 .setup_link = &ixgbe_setup_mac_link_82598,
517 .check_link = &ixgbe_check_mac_link_82598,
518 .setup_link_speed = &ixgbe_setup_mac_link_speed_82598, 896 .setup_link_speed = &ixgbe_setup_mac_link_speed_82598,
519 .get_link_settings = &ixgbe_get_link_settings_82598, 897 .check_link = &ixgbe_check_mac_link_82598,
898 .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
899 .led_on = &ixgbe_led_on_generic,
900 .led_off = &ixgbe_led_off_generic,
901 .blink_led_start = &ixgbe_blink_led_start_82598,
902 .blink_led_stop = &ixgbe_blink_led_stop_82598,
903 .set_rar = &ixgbe_set_rar_generic,
904 .clear_rar = &ixgbe_clear_rar_generic,
905 .set_vmdq = &ixgbe_set_vmdq_82598,
906 .clear_vmdq = &ixgbe_clear_vmdq_82598,
907 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
908 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
909 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
910 .enable_mc = &ixgbe_enable_mc_generic,
911 .disable_mc = &ixgbe_disable_mc_generic,
912 .clear_vfta = &ixgbe_clear_vfta_82598,
913 .set_vfta = &ixgbe_set_vfta_82598,
914 .setup_fc = &ixgbe_setup_fc_82598,
915};
916
917static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
918 .init_params = &ixgbe_init_eeprom_params_generic,
919 .read = &ixgbe_read_eeprom_generic,
920 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
921 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
922};
923
924static struct ixgbe_phy_operations phy_ops_82598 = {
925 .identify = &ixgbe_identify_phy_generic,
926 /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */
927 .reset = &ixgbe_reset_phy_generic,
928 .read_reg = &ixgbe_read_phy_reg_generic,
929 .write_reg = &ixgbe_write_phy_reg_generic,
930 .setup_link = &ixgbe_setup_phy_link_generic,
931 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
520}; 932};
521 933
522struct ixgbe_info ixgbe_82598_info = { 934struct ixgbe_info ixgbe_82598_info = {
523 .mac = ixgbe_mac_82598EB, 935 .mac = ixgbe_mac_82598EB,
524 .get_invariants = &ixgbe_get_invariants_82598, 936 .get_invariants = &ixgbe_get_invariants_82598,
525 .mac_ops = &mac_ops_82598, 937 .mac_ops = &mac_ops_82598,
938 .eeprom_ops = &eeprom_ops_82598,
939 .phy_ops = &phy_ops_82598,
526}; 940};
527 941
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 7fd6aeb1b021..f67c68404bb3 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -33,20 +32,28 @@
33#include "ixgbe_common.h" 32#include "ixgbe_common.h"
34#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
35 34
36static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
37
38static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw); 35static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
36static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
39static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 37static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
40static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 38static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
39static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
40static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
41static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
42 u16 count);
43static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
44static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
45static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
41static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); 47static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
42 48
43static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); 49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
44static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); 50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
45static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
46static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); 52static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
47 54
48/** 55/**
49 * ixgbe_start_hw - Prepare hardware for TX/RX 56 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
50 * @hw: pointer to hardware structure 57 * @hw: pointer to hardware structure
51 * 58 *
52 * Starts the hardware by filling the bus info structure and media type, clears 59 * Starts the hardware by filling the bus info structure and media type, clears
@@ -54,7 +61,7 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr);
54 * table, VLAN filter table, calls routine to set up link and flow control 61 * table, VLAN filter table, calls routine to set up link and flow control
55 * settings, and leaves transmit and receive units disabled and uninitialized 62 * settings, and leaves transmit and receive units disabled and uninitialized
56 **/ 63 **/
57s32 ixgbe_start_hw(struct ixgbe_hw *hw) 64s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
58{ 65{
59 u32 ctrl_ext; 66 u32 ctrl_ext;
60 67
@@ -62,22 +69,22 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
62 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 69 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
63 70
64 /* Identify the PHY */ 71 /* Identify the PHY */
65 ixgbe_identify_phy(hw); 72 hw->phy.ops.identify(hw);
66 73
67 /* 74 /*
68 * Store MAC address from RAR0, clear receive address registers, and 75 * Store MAC address from RAR0, clear receive address registers, and
69 * clear the multicast table 76 * clear the multicast table
70 */ 77 */
71 ixgbe_init_rx_addrs(hw); 78 hw->mac.ops.init_rx_addrs(hw);
72 79
73 /* Clear the VLAN filter table */ 80 /* Clear the VLAN filter table */
74 ixgbe_clear_vfta(hw); 81 hw->mac.ops.clear_vfta(hw);
75 82
76 /* Set up link */ 83 /* Set up link */
77 hw->mac.ops.setup_link(hw); 84 hw->mac.ops.setup_link(hw);
78 85
79 /* Clear statistics registers */ 86 /* Clear statistics registers */
80 ixgbe_clear_hw_cntrs(hw); 87 hw->mac.ops.clear_hw_cntrs(hw);
81 88
82 /* Set No Snoop Disable */ 89 /* Set No Snoop Disable */
83 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 90 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -92,34 +99,34 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
92} 99}
93 100
94/** 101/**
95 * ixgbe_init_hw - Generic hardware initialization 102 * ixgbe_init_hw_generic - Generic hardware initialization
96 * @hw: pointer to hardware structure 103 * @hw: pointer to hardware structure
97 * 104 *
98 * Initialize the hardware by reseting the hardware, filling the bus info 105 * Initialize the hardware by resetting the hardware, filling the bus info
99 * structure and media type, clears all on chip counters, initializes receive 106 * structure and media type, clears all on chip counters, initializes receive
100 * address registers, multicast table, VLAN filter table, calls routine to set 107 * address registers, multicast table, VLAN filter table, calls routine to set
101 * up link and flow control settings, and leaves transmit and receive units 108 * up link and flow control settings, and leaves transmit and receive units
102 * disabled and uninitialized 109 * disabled and uninitialized
103 **/ 110 **/
104s32 ixgbe_init_hw(struct ixgbe_hw *hw) 111s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
105{ 112{
106 /* Reset the hardware */ 113 /* Reset the hardware */
107 hw->mac.ops.reset(hw); 114 hw->mac.ops.reset_hw(hw);
108 115
109 /* Start the HW */ 116 /* Start the HW */
110 ixgbe_start_hw(hw); 117 hw->mac.ops.start_hw(hw);
111 118
112 return 0; 119 return 0;
113} 120}
114 121
115/** 122/**
116 * ixgbe_clear_hw_cntrs - Generic clear hardware counters 123 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
117 * @hw: pointer to hardware structure 124 * @hw: pointer to hardware structure
118 * 125 *
119 * Clears all hardware statistics counters by reading them from the hardware 126 * Clears all hardware statistics counters by reading them from the hardware
120 * Statistics counters are clear on read. 127 * Statistics counters are clear on read.
121 **/ 128 **/
122static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) 129s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
123{ 130{
124 u16 i = 0; 131 u16 i = 0;
125 132
@@ -191,7 +198,36 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
191} 198}
192 199
193/** 200/**
194 * ixgbe_get_mac_addr - Generic get MAC address 201 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
202 * @hw: pointer to hardware structure
203 * @pba_num: stores the part number from the EEPROM
204 *
205 * Reads the part number from the EEPROM.
206 **/
207s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
208{
209 s32 ret_val;
210 u16 data;
211
212 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
213 if (ret_val) {
214 hw_dbg(hw, "NVM Read Error\n");
215 return ret_val;
216 }
217 *pba_num = (u32)(data << 16);
218
219 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
220 if (ret_val) {
221 hw_dbg(hw, "NVM Read Error\n");
222 return ret_val;
223 }
224 *pba_num |= data;
225
226 return 0;
227}
228
229/**
230 * ixgbe_get_mac_addr_generic - Generic get MAC address
195 * @hw: pointer to hardware structure 231 * @hw: pointer to hardware structure
196 * @mac_addr: Adapter MAC address 232 * @mac_addr: Adapter MAC address
197 * 233 *
@@ -199,7 +235,7 @@ static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
199 * A reset of the adapter must be performed prior to calling this function 235 * A reset of the adapter must be performed prior to calling this function
200 * in order for the MAC address to have been loaded from the EEPROM into RAR0 236 * in order for the MAC address to have been loaded from the EEPROM into RAR0
201 **/ 237 **/
202s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) 238s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
203{ 239{
204 u32 rar_high; 240 u32 rar_high;
205 u32 rar_low; 241 u32 rar_low;
@@ -217,30 +253,8 @@ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
217 return 0; 253 return 0;
218} 254}
219 255
220s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
221{
222 s32 ret_val;
223 u16 data;
224
225 ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data);
226 if (ret_val) {
227 hw_dbg(hw, "NVM Read Error\n");
228 return ret_val;
229 }
230 *part_num = (u32)(data << 16);
231
232 ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data);
233 if (ret_val) {
234 hw_dbg(hw, "NVM Read Error\n");
235 return ret_val;
236 }
237 *part_num |= data;
238
239 return 0;
240}
241
242/** 256/**
243 * ixgbe_stop_adapter - Generic stop TX/RX units 257 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
244 * @hw: pointer to hardware structure 258 * @hw: pointer to hardware structure
245 * 259 *
246 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, 260 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
@@ -248,7 +262,7 @@ s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num)
248 * the shared code and drivers to determine if the adapter is in a stopped 262 * the shared code and drivers to determine if the adapter is in a stopped
249 * state and should not touch the hardware. 263 * state and should not touch the hardware.
250 **/ 264 **/
251s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) 265s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
252{ 266{
253 u32 number_of_queues; 267 u32 number_of_queues;
254 u32 reg_val; 268 u32 reg_val;
@@ -264,6 +278,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
264 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 278 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
265 reg_val &= ~(IXGBE_RXCTRL_RXEN); 279 reg_val &= ~(IXGBE_RXCTRL_RXEN);
266 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 280 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
281 IXGBE_WRITE_FLUSH(hw);
267 msleep(2); 282 msleep(2);
268 283
269 /* Clear interrupt mask to stop from interrupts being generated */ 284 /* Clear interrupt mask to stop from interrupts being generated */
@@ -273,7 +288,7 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
273 IXGBE_READ_REG(hw, IXGBE_EICR); 288 IXGBE_READ_REG(hw, IXGBE_EICR);
274 289
275 /* Disable the transmit unit. Each queue must be disabled. */ 290 /* Disable the transmit unit. Each queue must be disabled. */
276 number_of_queues = hw->mac.num_tx_queues; 291 number_of_queues = hw->mac.max_tx_queues;
277 for (i = 0; i < number_of_queues; i++) { 292 for (i = 0; i < number_of_queues; i++) {
278 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 293 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
279 if (reg_val & IXGBE_TXDCTL_ENABLE) { 294 if (reg_val & IXGBE_TXDCTL_ENABLE) {
@@ -282,15 +297,22 @@ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
282 } 297 }
283 } 298 }
284 299
300 /*
301 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
302 * access and verify no pending requests
303 */
304 if (ixgbe_disable_pcie_master(hw) != 0)
305 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
306
285 return 0; 307 return 0;
286} 308}
287 309
288/** 310/**
289 * ixgbe_led_on - Turns on the software controllable LEDs. 311 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
290 * @hw: pointer to hardware structure 312 * @hw: pointer to hardware structure
291 * @index: led number to turn on 313 * @index: led number to turn on
292 **/ 314 **/
293s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) 315s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
294{ 316{
295 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 317 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
296 318
@@ -304,11 +326,11 @@ s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
304} 326}
305 327
306/** 328/**
307 * ixgbe_led_off - Turns off the software controllable LEDs. 329 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
308 * @hw: pointer to hardware structure 330 * @hw: pointer to hardware structure
309 * @index: led number to turn off 331 * @index: led number to turn off
310 **/ 332 **/
311s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) 333s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
312{ 334{
313 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 335 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
314 336
@@ -321,15 +343,14 @@ s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
321 return 0; 343 return 0;
322} 344}
323 345
324
325/** 346/**
326 * ixgbe_init_eeprom - Initialize EEPROM params 347 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
327 * @hw: pointer to hardware structure 348 * @hw: pointer to hardware structure
328 * 349 *
329 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 350 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
330 * ixgbe_hw struct in order to set up EEPROM access. 351 * ixgbe_hw struct in order to set up EEPROM access.
331 **/ 352 **/
332s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) 353s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
333{ 354{
334 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 355 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
335 u32 eec; 356 u32 eec;
@@ -337,6 +358,9 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
337 358
338 if (eeprom->type == ixgbe_eeprom_uninitialized) { 359 if (eeprom->type == ixgbe_eeprom_uninitialized) {
339 eeprom->type = ixgbe_eeprom_none; 360 eeprom->type = ixgbe_eeprom_none;
361 /* Set default semaphore delay to 10ms which is a well
362 * tested value */
363 eeprom->semaphore_delay = 10;
340 364
341 /* 365 /*
342 * Check for EEPROM present first. 366 * Check for EEPROM present first.
@@ -369,18 +393,85 @@ s32 ixgbe_init_eeprom(struct ixgbe_hw *hw)
369} 393}
370 394
371/** 395/**
372 * ixgbe_read_eeprom - Read EEPROM word using EERD 396 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
397 * @hw: pointer to hardware structure
398 * @offset: offset within the EEPROM to be read
399 * @data: read 16 bit value from EEPROM
400 *
401 * Reads 16 bit value from EEPROM through bit-bang method
402 **/
403s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
404 u16 *data)
405{
406 s32 status;
407 u16 word_in;
408 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
409
410 hw->eeprom.ops.init_params(hw);
411
412 if (offset >= hw->eeprom.word_size) {
413 status = IXGBE_ERR_EEPROM;
414 goto out;
415 }
416
417 /* Prepare the EEPROM for reading */
418 status = ixgbe_acquire_eeprom(hw);
419
420 if (status == 0) {
421 if (ixgbe_ready_eeprom(hw) != 0) {
422 ixgbe_release_eeprom(hw);
423 status = IXGBE_ERR_EEPROM;
424 }
425 }
426
427 if (status == 0) {
428 ixgbe_standby_eeprom(hw);
429
430 /*
431 * Some SPI eeproms use the 8th address bit embedded in the
432 * opcode
433 */
434 if ((hw->eeprom.address_bits == 8) && (offset >= 128))
435 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
436
437 /* Send the READ command (opcode + addr) */
438 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
439 IXGBE_EEPROM_OPCODE_BITS);
440 ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
441 hw->eeprom.address_bits);
442
443 /* Read the data. */
444 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
445 *data = (word_in >> 8) | (word_in << 8);
446
447 /* End this read operation */
448 ixgbe_release_eeprom(hw);
449 }
450
451out:
452 return status;
453}
454
455/**
456 * ixgbe_read_eeprom_generic - Read EEPROM word using EERD
373 * @hw: pointer to hardware structure 457 * @hw: pointer to hardware structure
374 * @offset: offset of word in the EEPROM to read 458 * @offset: offset of word in the EEPROM to read
375 * @data: word read from the EEPROM 459 * @data: word read from the EEPROM
376 * 460 *
377 * Reads a 16 bit word from the EEPROM using the EERD register. 461 * Reads a 16 bit word from the EEPROM using the EERD register.
378 **/ 462 **/
379s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) 463s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
380{ 464{
381 u32 eerd; 465 u32 eerd;
382 s32 status; 466 s32 status;
383 467
468 hw->eeprom.ops.init_params(hw);
469
470 if (offset >= hw->eeprom.word_size) {
471 status = IXGBE_ERR_EEPROM;
472 goto out;
473 }
474
384 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + 475 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) +
385 IXGBE_EEPROM_READ_REG_START; 476 IXGBE_EEPROM_READ_REG_START;
386 477
@@ -389,10 +480,11 @@ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
389 480
390 if (status == 0) 481 if (status == 0)
391 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 482 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
392 IXGBE_EEPROM_READ_REG_DATA); 483 IXGBE_EEPROM_READ_REG_DATA);
393 else 484 else
394 hw_dbg(hw, "Eeprom read timed out\n"); 485 hw_dbg(hw, "Eeprom read timed out\n");
395 486
487out:
396 return status; 488 return status;
397} 489}
398 490
@@ -420,6 +512,58 @@ static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw)
420} 512}
421 513
422/** 514/**
515 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
516 * @hw: pointer to hardware structure
517 *
518 * Prepares EEPROM for access using bit-bang method. This function should
519 * be called before issuing a command to the EEPROM.
520 **/
521static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
522{
523 s32 status = 0;
524 u32 eec;
525 u32 i;
526
527 if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
528 status = IXGBE_ERR_SWFW_SYNC;
529
530 if (status == 0) {
531 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
532
533 /* Request EEPROM Access */
534 eec |= IXGBE_EEC_REQ;
535 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
536
537 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
538 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
539 if (eec & IXGBE_EEC_GNT)
540 break;
541 udelay(5);
542 }
543
544 /* Release if grant not acquired */
545 if (!(eec & IXGBE_EEC_GNT)) {
546 eec &= ~IXGBE_EEC_REQ;
547 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
548 hw_dbg(hw, "Could not acquire EEPROM grant\n");
549
550 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
551 status = IXGBE_ERR_EEPROM;
552 }
553 }
554
555 /* Setup EEPROM for Read/Write */
556 if (status == 0) {
557 /* Clear CS and SK */
558 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
559 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
560 IXGBE_WRITE_FLUSH(hw);
561 udelay(1);
562 }
563 return status;
564}
565
566/**
423 * ixgbe_get_eeprom_semaphore - Get hardware semaphore 567 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
424 * @hw: pointer to hardware structure 568 * @hw: pointer to hardware structure
425 * 569 *
@@ -475,7 +619,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
475 */ 619 */
476 if (i >= timeout) { 620 if (i >= timeout) {
477 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 621 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore "
478 "not granted.\n"); 622 "not granted.\n");
479 ixgbe_release_eeprom_semaphore(hw); 623 ixgbe_release_eeprom_semaphore(hw);
480 status = IXGBE_ERR_EEPROM; 624 status = IXGBE_ERR_EEPROM;
481 } 625 }
@@ -503,6 +647,217 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
503} 647}
504 648
505/** 649/**
650 * ixgbe_ready_eeprom - Polls for EEPROM ready
651 * @hw: pointer to hardware structure
652 **/
653static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
654{
655 s32 status = 0;
656 u16 i;
657 u8 spi_stat_reg;
658
659 /*
660 * Read "Status Register" repeatedly until the LSB is cleared. The
661 * EEPROM will signal that the command has been completed by clearing
662 * bit 0 of the internal status register. If it's not cleared within
663 * 5 milliseconds, then error out.
664 */
665 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
666 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
667 IXGBE_EEPROM_OPCODE_BITS);
668 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
669 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
670 break;
671
672 udelay(5);
673 ixgbe_standby_eeprom(hw);
674 };
675
676 /*
677 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
678 * devices (and only 0-5mSec on 5V devices)
679 */
680 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
681 hw_dbg(hw, "SPI EEPROM Status error\n");
682 status = IXGBE_ERR_EEPROM;
683 }
684
685 return status;
686}
687
688/**
689 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
690 * @hw: pointer to hardware structure
691 **/
692static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
693{
694 u32 eec;
695
696 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
697
698 /* Toggle CS to flush commands */
699 eec |= IXGBE_EEC_CS;
700 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
701 IXGBE_WRITE_FLUSH(hw);
702 udelay(1);
703 eec &= ~IXGBE_EEC_CS;
704 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
705 IXGBE_WRITE_FLUSH(hw);
706 udelay(1);
707}
708
709/**
710 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
711 * @hw: pointer to hardware structure
712 * @data: data to send to the EEPROM
713 * @count: number of bits to shift out
714 **/
715static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
716 u16 count)
717{
718 u32 eec;
719 u32 mask;
720 u32 i;
721
722 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
723
724 /*
725 * Mask is used to shift "count" bits of "data" out to the EEPROM
726 * one bit at a time. Determine the starting bit based on count
727 */
728 mask = 0x01 << (count - 1);
729
730 for (i = 0; i < count; i++) {
731 /*
732 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
733 * "1", and then raising and then lowering the clock (the SK
734 * bit controls the clock input to the EEPROM). A "0" is
735 * shifted out to the EEPROM by setting "DI" to "0" and then
736 * raising and then lowering the clock.
737 */
738 if (data & mask)
739 eec |= IXGBE_EEC_DI;
740 else
741 eec &= ~IXGBE_EEC_DI;
742
743 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
744 IXGBE_WRITE_FLUSH(hw);
745
746 udelay(1);
747
748 ixgbe_raise_eeprom_clk(hw, &eec);
749 ixgbe_lower_eeprom_clk(hw, &eec);
750
751 /*
752 * Shift mask to signify next bit of data to shift in to the
753 * EEPROM
754 */
755 mask = mask >> 1;
756 };
757
758 /* We leave the "DI" bit set to "0" when we leave this routine. */
759 eec &= ~IXGBE_EEC_DI;
760 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
761 IXGBE_WRITE_FLUSH(hw);
762}
763
764/**
765 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
766 * @hw: pointer to hardware structure
767 **/
768static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
769{
770 u32 eec;
771 u32 i;
772 u16 data = 0;
773
774 /*
775 * In order to read a register from the EEPROM, we need to shift
776 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
777 * the clock input to the EEPROM (setting the SK bit), and then reading
778 * the value of the "DO" bit. During this "shifting in" process the
779 * "DI" bit should always be clear.
780 */
781 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
782
783 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
784
785 for (i = 0; i < count; i++) {
786 data = data << 1;
787 ixgbe_raise_eeprom_clk(hw, &eec);
788
789 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
790
791 eec &= ~(IXGBE_EEC_DI);
792 if (eec & IXGBE_EEC_DO)
793 data |= 1;
794
795 ixgbe_lower_eeprom_clk(hw, &eec);
796 }
797
798 return data;
799}
800
801/**
802 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
803 * @hw: pointer to hardware structure
804 * @eec: EEC register's current value
805 **/
806static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
807{
808 /*
809 * Raise the clock input to the EEPROM
810 * (setting the SK bit), then delay
811 */
812 *eec = *eec | IXGBE_EEC_SK;
813 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
814 IXGBE_WRITE_FLUSH(hw);
815 udelay(1);
816}
817
818/**
819 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
820 * @hw: pointer to hardware structure
821 * @eecd: EECD's current value
822 **/
823static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
824{
825 /*
826 * Lower the clock input to the EEPROM (clearing the SK bit), then
827 * delay
828 */
829 *eec = *eec & ~IXGBE_EEC_SK;
830 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
831 IXGBE_WRITE_FLUSH(hw);
832 udelay(1);
833}
834
835/**
836 * ixgbe_release_eeprom - Release EEPROM, release semaphores
837 * @hw: pointer to hardware structure
838 **/
839static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
840{
841 u32 eec;
842
843 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
844
845 eec |= IXGBE_EEC_CS; /* Pull CS high */
846 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
847
848 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
849 IXGBE_WRITE_FLUSH(hw);
850
851 udelay(1);
852
853 /* Stop requesting EEPROM access */
854 eec &= ~IXGBE_EEC_REQ;
855 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
856
857 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
858}
859
860/**
506 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 861 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
507 * @hw: pointer to hardware structure 862 * @hw: pointer to hardware structure
508 **/ 863 **/
@@ -517,7 +872,7 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
517 872
518 /* Include 0x0-0x3F in the checksum */ 873 /* Include 0x0-0x3F in the checksum */
519 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 874 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
520 if (ixgbe_read_eeprom(hw, i, &word) != 0) { 875 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
521 hw_dbg(hw, "EEPROM read failed\n"); 876 hw_dbg(hw, "EEPROM read failed\n");
522 break; 877 break;
523 } 878 }
@@ -526,15 +881,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
526 881
527 /* Include all data from pointers except for the fw pointer */ 882 /* Include all data from pointers except for the fw pointer */
528 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 883 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
529 ixgbe_read_eeprom(hw, i, &pointer); 884 hw->eeprom.ops.read(hw, i, &pointer);
530 885
531 /* Make sure the pointer seems valid */ 886 /* Make sure the pointer seems valid */
532 if (pointer != 0xFFFF && pointer != 0) { 887 if (pointer != 0xFFFF && pointer != 0) {
533 ixgbe_read_eeprom(hw, pointer, &length); 888 hw->eeprom.ops.read(hw, pointer, &length);
534 889
535 if (length != 0xFFFF && length != 0) { 890 if (length != 0xFFFF && length != 0) {
536 for (j = pointer+1; j <= pointer+length; j++) { 891 for (j = pointer+1; j <= pointer+length; j++) {
537 ixgbe_read_eeprom(hw, j, &word); 892 hw->eeprom.ops.read(hw, j, &word);
538 checksum += word; 893 checksum += word;
539 } 894 }
540 } 895 }
@@ -547,14 +902,15 @@ static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
547} 902}
548 903
549/** 904/**
550 * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum 905 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
551 * @hw: pointer to hardware structure 906 * @hw: pointer to hardware structure
552 * @checksum_val: calculated checksum 907 * @checksum_val: calculated checksum
553 * 908 *
554 * Performs checksum calculation and validates the EEPROM checksum. If the 909 * Performs checksum calculation and validates the EEPROM checksum. If the
555 * caller does not need checksum_val, the value can be NULL. 910 * caller does not need checksum_val, the value can be NULL.
556 **/ 911 **/
557s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) 912s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
913 u16 *checksum_val)
558{ 914{
559 s32 status; 915 s32 status;
560 u16 checksum; 916 u16 checksum;
@@ -565,12 +921,12 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
565 * not continue or we could be in for a very long wait while every 921 * not continue or we could be in for a very long wait while every
566 * EEPROM read fails 922 * EEPROM read fails
567 */ 923 */
568 status = ixgbe_read_eeprom(hw, 0, &checksum); 924 status = hw->eeprom.ops.read(hw, 0, &checksum);
569 925
570 if (status == 0) { 926 if (status == 0) {
571 checksum = ixgbe_calc_eeprom_checksum(hw); 927 checksum = ixgbe_calc_eeprom_checksum(hw);
572 928
573 ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 929 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
574 930
575 /* 931 /*
576 * Verify read checksum from EEPROM is the same as 932 * Verify read checksum from EEPROM is the same as
@@ -590,6 +946,33 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
590} 946}
591 947
592/** 948/**
949 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
950 * @hw: pointer to hardware structure
951 **/
952s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
953{
954 s32 status;
955 u16 checksum;
956
957 /*
958 * Read the first word from the EEPROM. If this times out or fails, do
959 * not continue or we could be in for a very long wait while every
960 * EEPROM read fails
961 */
962 status = hw->eeprom.ops.read(hw, 0, &checksum);
963
964 if (status == 0) {
965 checksum = ixgbe_calc_eeprom_checksum(hw);
966 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
967 checksum);
968 } else {
969 hw_dbg(hw, "EEPROM read failed\n");
970 }
971
972 return status;
973}
974
975/**
593 * ixgbe_validate_mac_addr - Validate MAC address 976 * ixgbe_validate_mac_addr - Validate MAC address
594 * @mac_addr: pointer to MAC address. 977 * @mac_addr: pointer to MAC address.
595 * 978 *
@@ -607,61 +990,140 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
607 status = IXGBE_ERR_INVALID_MAC_ADDR; 990 status = IXGBE_ERR_INVALID_MAC_ADDR;
608 /* Reject the zero address */ 991 /* Reject the zero address */
609 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 992 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
610 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) 993 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
611 status = IXGBE_ERR_INVALID_MAC_ADDR; 994 status = IXGBE_ERR_INVALID_MAC_ADDR;
612 995
613 return status; 996 return status;
614} 997}
615 998
616/** 999/**
617 * ixgbe_set_rar - Set RX address register 1000 * ixgbe_set_rar_generic - Set Rx address register
618 * @hw: pointer to hardware structure 1001 * @hw: pointer to hardware structure
619 * @addr: Address to put into receive address register
620 * @index: Receive address register to write 1002 * @index: Receive address register to write
621 * @vind: Vind to set RAR to 1003 * @addr: Address to put into receive address register
1004 * @vmdq: VMDq "set" or "pool" index
622 * @enable_addr: set flag that address is active 1005 * @enable_addr: set flag that address is active
623 * 1006 *
624 * Puts an ethernet address into a receive address register. 1007 * Puts an ethernet address into a receive address register.
625 **/ 1008 **/
626s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, 1009s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
627 u32 enable_addr) 1010 u32 enable_addr)
628{ 1011{
629 u32 rar_low, rar_high; 1012 u32 rar_low, rar_high;
1013 u32 rar_entries = hw->mac.num_rar_entries;
630 1014
631 /* 1015 /* setup VMDq pool selection before this RAR gets enabled */
632 * HW expects these in little endian so we reverse the byte order from 1016 hw->mac.ops.set_vmdq(hw, index, vmdq);
633 * network order (big endian) to little endian
634 */
635 rar_low = ((u32)addr[0] |
636 ((u32)addr[1] << 8) |
637 ((u32)addr[2] << 16) |
638 ((u32)addr[3] << 24));
639 1017
640 rar_high = ((u32)addr[4] | 1018 /* Make sure we are using a valid rar index range */
641 ((u32)addr[5] << 8) | 1019 if (index < rar_entries) {
642 ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK)); 1020 /*
1021 * HW expects these in little endian so we reverse the byte
1022 * order from network order (big endian) to little endian
1023 */
1024 rar_low = ((u32)addr[0] |
1025 ((u32)addr[1] << 8) |
1026 ((u32)addr[2] << 16) |
1027 ((u32)addr[3] << 24));
1028 /*
1029 * Some parts put the VMDq setting in the extra RAH bits,
1030 * so save everything except the lower 16 bits that hold part
1031 * of the address and the address valid bit.
1032 */
1033 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1034 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1035 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
643 1036
644 if (enable_addr != 0) 1037 if (enable_addr != 0)
645 rar_high |= IXGBE_RAH_AV; 1038 rar_high |= IXGBE_RAH_AV;
646 1039
647 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1040 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
648 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1041 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1042 } else {
1043 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1044 }
649 1045
650 return 0; 1046 return 0;
651} 1047}
652 1048
653/** 1049/**
654 * ixgbe_init_rx_addrs - Initializes receive address filters. 1050 * ixgbe_clear_rar_generic - Remove Rx address register
1051 * @hw: pointer to hardware structure
1052 * @index: Receive address register to write
1053 *
1054 * Clears an ethernet address from a receive address register.
1055 **/
1056s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1057{
1058 u32 rar_high;
1059 u32 rar_entries = hw->mac.num_rar_entries;
1060
1061 /* Make sure we are using a valid rar index range */
1062 if (index < rar_entries) {
1063 /*
1064 * Some parts put the VMDq setting in the extra RAH bits,
1065 * so save everything except the lower 16 bits that hold part
1066 * of the address and the address valid bit.
1067 */
1068 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1069 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1070
1071 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1072 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1073 } else {
1074 hw_dbg(hw, "RAR index %d is out of range.\n", index);
1075 }
1076
1077 /* clear VMDq pool/queue selection for this RAR */
1078 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
1079
1080 return 0;
1081}
1082
1083/**
1084 * ixgbe_enable_rar - Enable Rx address register
1085 * @hw: pointer to hardware structure
1086 * @index: index into the RAR table
1087 *
1088 * Enables the select receive address register.
1089 **/
1090static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index)
1091{
1092 u32 rar_high;
1093
1094 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1095 rar_high |= IXGBE_RAH_AV;
1096 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1097}
1098
1099/**
1100 * ixgbe_disable_rar - Disable Rx address register
1101 * @hw: pointer to hardware structure
1102 * @index: index into the RAR table
1103 *
1104 * Disables the select receive address register.
1105 **/
1106static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index)
1107{
1108 u32 rar_high;
1109
1110 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1111 rar_high &= (~IXGBE_RAH_AV);
1112 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1113}
1114
1115/**
1116 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
655 * @hw: pointer to hardware structure 1117 * @hw: pointer to hardware structure
656 * 1118 *
657 * Places the MAC address in receive address register 0 and clears the rest 1119 * Places the MAC address in receive address register 0 and clears the rest
658 * of the receive addresss registers. Clears the multicast table. Assumes 1120 * of the receive address registers. Clears the multicast table. Assumes
659 * the receiver is in reset when the routine is called. 1121 * the receiver is in reset when the routine is called.
660 **/ 1122 **/
661static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) 1123s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
662{ 1124{
663 u32 i; 1125 u32 i;
664 u32 rar_entries = hw->mac.num_rx_addrs; 1126 u32 rar_entries = hw->mac.num_rar_entries;
665 1127
666 /* 1128 /*
667 * If the current mac address is valid, assume it is a software override 1129 * If the current mac address is valid, assume it is a software override
@@ -671,29 +1133,30 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
671 if (ixgbe_validate_mac_addr(hw->mac.addr) == 1133 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
672 IXGBE_ERR_INVALID_MAC_ADDR) { 1134 IXGBE_ERR_INVALID_MAC_ADDR) {
673 /* Get the MAC address from the RAR0 for later reference */ 1135 /* Get the MAC address from the RAR0 for later reference */
674 ixgbe_get_mac_addr(hw, hw->mac.addr); 1136 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
675 1137
676 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1138 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
677 hw->mac.addr[0], hw->mac.addr[1], 1139 hw->mac.addr[0], hw->mac.addr[1],
678 hw->mac.addr[2]); 1140 hw->mac.addr[2]);
679 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], 1141 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
680 hw->mac.addr[4], hw->mac.addr[5]); 1142 hw->mac.addr[4], hw->mac.addr[5]);
681 } else { 1143 } else {
682 /* Setup the receive address. */ 1144 /* Setup the receive address. */
683 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1145 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
684 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", 1146 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
685 hw->mac.addr[0], hw->mac.addr[1], 1147 hw->mac.addr[0], hw->mac.addr[1],
686 hw->mac.addr[2]); 1148 hw->mac.addr[2]);
687 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], 1149 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
688 hw->mac.addr[4], hw->mac.addr[5]); 1150 hw->mac.addr[4], hw->mac.addr[5]);
689 1151
690 ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1152 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
691 } 1153 }
1154 hw->addr_ctrl.overflow_promisc = 0;
692 1155
693 hw->addr_ctrl.rar_used_count = 1; 1156 hw->addr_ctrl.rar_used_count = 1;
694 1157
695 /* Zero out the other receive addresses. */ 1158 /* Zero out the other receive addresses. */
696 hw_dbg(hw, "Clearing RAR[1-15]\n"); 1159 hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
697 for (i = 1; i < rar_entries; i++) { 1160 for (i = 1; i < rar_entries; i++) {
698 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1161 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
699 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1162 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
@@ -705,9 +1168,113 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
705 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1168 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
706 1169
707 hw_dbg(hw, " Clearing MTA\n"); 1170 hw_dbg(hw, " Clearing MTA\n");
708 for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) 1171 for (i = 0; i < hw->mac.mcft_size; i++)
709 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1172 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
710 1173
1174 if (hw->mac.ops.init_uta_tables)
1175 hw->mac.ops.init_uta_tables(hw);
1176
1177 return 0;
1178}
1179
1180/**
1181 * ixgbe_add_uc_addr - Adds a secondary unicast address.
1182 * @hw: pointer to hardware structure
1183 * @addr: new address
1184 *
1185 * Adds it to unused receive address register or goes into promiscuous mode.
1186 **/
1187static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
1188{
1189 u32 rar_entries = hw->mac.num_rar_entries;
1190 u32 rar;
1191
1192 hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
1193 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
1194
1195 /*
1196 * Place this address in the RAR if there is room,
1197 * else put the controller into promiscuous mode
1198 */
1199 if (hw->addr_ctrl.rar_used_count < rar_entries) {
1200 rar = hw->addr_ctrl.rar_used_count -
1201 hw->addr_ctrl.mc_addr_in_rar_count;
1202 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
1203 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar);
1204 hw->addr_ctrl.rar_used_count++;
1205 } else {
1206 hw->addr_ctrl.overflow_promisc++;
1207 }
1208
1209 hw_dbg(hw, "ixgbe_add_uc_addr Complete\n");
1210}
1211
1212/**
1213 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
1214 * @hw: pointer to hardware structure
1215 * @addr_list: the list of new addresses
1216 * @addr_count: number of addresses
1217 * @next: iterator function to walk the address list
1218 *
1219 * The given list replaces any existing list. Clears the secondary addrs from
1220 * receive address registers. Uses unused receive address registers for the
1221 * first secondary addresses, and falls back to promiscuous mode as needed.
1222 *
1223 * Drivers using secondary unicast addresses must set user_set_promisc when
1224 * manually putting the device into promiscuous mode.
1225 **/
1226s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
1227 u32 addr_count, ixgbe_mc_addr_itr next)
1228{
1229 u8 *addr;
1230 u32 i;
1231 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
1232 u32 uc_addr_in_use;
1233 u32 fctrl;
1234 u32 vmdq;
1235
1236 /*
1237 * Clear accounting of old secondary address list,
1238 * don't count RAR[0]
1239 */
1240 uc_addr_in_use = hw->addr_ctrl.rar_used_count -
1241 hw->addr_ctrl.mc_addr_in_rar_count - 1;
1242 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
1243 hw->addr_ctrl.overflow_promisc = 0;
1244
1245 /* Zero out the other receive addresses */
1246 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use);
1247 for (i = 1; i <= uc_addr_in_use; i++) {
1248 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
1249 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
1250 }
1251
1252 /* Add the new addresses */
1253 for (i = 0; i < addr_count; i++) {
1254 hw_dbg(hw, " Adding the secondary addresses:\n");
1255 addr = next(hw, &addr_list, &vmdq);
1256 ixgbe_add_uc_addr(hw, addr, vmdq);
1257 }
1258
1259 if (hw->addr_ctrl.overflow_promisc) {
1260 /* enable promisc if not already in overflow or set by user */
1261 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1262 hw_dbg(hw, " Entering address overflow promisc mode\n");
1263 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1264 fctrl |= IXGBE_FCTRL_UPE;
1265 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1266 }
1267 } else {
1268 /* only disable if set by overflow, not by user */
1269 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
1270 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1271 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1272 fctrl &= ~IXGBE_FCTRL_UPE;
1273 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1274 }
1275 }
1276
1277 hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n");
711 return 0; 1278 return 0;
712} 1279}
713 1280
@@ -720,7 +1287,7 @@ static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
720 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1287 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
721 * incoming rx multicast addresses, to determine the bit-vector to check in 1288 * incoming rx multicast addresses, to determine the bit-vector to check in
722 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1289 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
723 * by the MO field of the MCSTCTRL. The MO field is set during initalization 1290 * by the MO field of the MCSTCTRL. The MO field is set during initialization
724 * to mc_filter_type. 1291 * to mc_filter_type.
725 **/ 1292 **/
726static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) 1293static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
@@ -728,19 +1295,19 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
728 u32 vector = 0; 1295 u32 vector = 0;
729 1296
730 switch (hw->mac.mc_filter_type) { 1297 switch (hw->mac.mc_filter_type) {
731 case 0: /* use bits [47:36] of the address */ 1298 case 0: /* use bits [47:36] of the address */
732 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 1299 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
733 break; 1300 break;
734 case 1: /* use bits [46:35] of the address */ 1301 case 1: /* use bits [46:35] of the address */
735 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 1302 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
736 break; 1303 break;
737 case 2: /* use bits [45:34] of the address */ 1304 case 2: /* use bits [45:34] of the address */
738 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 1305 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
739 break; 1306 break;
740 case 3: /* use bits [43:32] of the address */ 1307 case 3: /* use bits [43:32] of the address */
741 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 1308 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
742 break; 1309 break;
743 default: /* Invalid mc_filter_type */ 1310 default: /* Invalid mc_filter_type */
744 hw_dbg(hw, "MC filter type param set incorrectly\n"); 1311 hw_dbg(hw, "MC filter type param set incorrectly\n");
745 break; 1312 break;
746 } 1313 }
@@ -794,21 +1361,22 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
794 **/ 1361 **/
795static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) 1362static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
796{ 1363{
797 u32 rar_entries = hw->mac.num_rx_addrs; 1364 u32 rar_entries = hw->mac.num_rar_entries;
1365 u32 rar;
798 1366
799 hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", 1367 hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n",
800 mc_addr[0], mc_addr[1], mc_addr[2], 1368 mc_addr[0], mc_addr[1], mc_addr[2],
801 mc_addr[3], mc_addr[4], mc_addr[5]); 1369 mc_addr[3], mc_addr[4], mc_addr[5]);
802 1370
803 /* 1371 /*
804 * Place this multicast address in the RAR if there is room, 1372 * Place this multicast address in the RAR if there is room,
805 * else put it in the MTA 1373 * else put it in the MTA
806 */ 1374 */
807 if (hw->addr_ctrl.rar_used_count < rar_entries) { 1375 if (hw->addr_ctrl.rar_used_count < rar_entries) {
808 ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count, 1376 /* use RAR from the end up for multicast */
809 mc_addr, 0, IXGBE_RAH_AV); 1377 rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1;
810 hw_dbg(hw, "Added a multicast address to RAR[%d]\n", 1378 hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV);
811 hw->addr_ctrl.rar_used_count); 1379 hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar);
812 hw->addr_ctrl.rar_used_count++; 1380 hw->addr_ctrl.rar_used_count++;
813 hw->addr_ctrl.mc_addr_in_rar_count++; 1381 hw->addr_ctrl.mc_addr_in_rar_count++;
814 } else { 1382 } else {
@@ -819,22 +1387,23 @@ static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr)
819} 1387}
820 1388
821/** 1389/**
822 * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses 1390 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
823 * @hw: pointer to hardware structure 1391 * @hw: pointer to hardware structure
824 * @mc_addr_list: the list of new multicast addresses 1392 * @mc_addr_list: the list of new multicast addresses
825 * @mc_addr_count: number of addresses 1393 * @mc_addr_count: number of addresses
826 * @pad: number of bytes between addresses in the list 1394 * @next: iterator function to walk the multicast address list
827 * 1395 *
828 * The given list replaces any existing list. Clears the MC addrs from receive 1396 * The given list replaces any existing list. Clears the MC addrs from receive
829 * address registers and the multicast table. Uses unsed receive address 1397 * address registers and the multicast table. Uses unused receive address
830 * registers for the first multicast addresses, and hashes the rest into the 1398 * registers for the first multicast addresses, and hashes the rest into the
831 * multicast table. 1399 * multicast table.
832 **/ 1400 **/
833s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, 1401s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
834 u32 mc_addr_count, u32 pad) 1402 u32 mc_addr_count, ixgbe_mc_addr_itr next)
835{ 1403{
836 u32 i; 1404 u32 i;
837 u32 rar_entries = hw->mac.num_rx_addrs; 1405 u32 rar_entries = hw->mac.num_rar_entries;
1406 u32 vmdq;
838 1407
839 /* 1408 /*
840 * Set the new number of MC addresses that we are being requested to 1409 * Set the new number of MC addresses that we are being requested to
@@ -846,7 +1415,8 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
846 hw->addr_ctrl.mta_in_use = 0; 1415 hw->addr_ctrl.mta_in_use = 0;
847 1416
848 /* Zero out the other receive addresses. */ 1417 /* Zero out the other receive addresses. */
849 hw_dbg(hw, "Clearing RAR[1-15]\n"); 1418 hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count,
1419 rar_entries - 1);
850 for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { 1420 for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) {
851 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1421 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
852 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1422 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
@@ -854,186 +1424,67 @@ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
854 1424
855 /* Clear the MTA */ 1425 /* Clear the MTA */
856 hw_dbg(hw, " Clearing MTA\n"); 1426 hw_dbg(hw, " Clearing MTA\n");
857 for (i = 0; i < IXGBE_MC_TBL_SIZE; i++) 1427 for (i = 0; i < hw->mac.mcft_size; i++)
858 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1428 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
859 1429
860 /* Add the new addresses */ 1430 /* Add the new addresses */
861 for (i = 0; i < mc_addr_count; i++) { 1431 for (i = 0; i < mc_addr_count; i++) {
862 hw_dbg(hw, " Adding the multicast addresses:\n"); 1432 hw_dbg(hw, " Adding the multicast addresses:\n");
863 ixgbe_add_mc_addr(hw, mc_addr_list + 1433 ixgbe_add_mc_addr(hw, next(hw, &mc_addr_list, &vmdq));
864 (i * (IXGBE_ETH_LENGTH_OF_ADDRESS + pad)));
865 } 1434 }
866 1435
867 /* Enable mta */ 1436 /* Enable mta */
868 if (hw->addr_ctrl.mta_in_use > 0) 1437 if (hw->addr_ctrl.mta_in_use > 0)
869 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1438 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
870 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1439 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
871 1440
872 hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n"); 1441 hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
873 return 0; 1442 return 0;
874} 1443}
875 1444
876/** 1445/**
877 * ixgbe_clear_vfta - Clear VLAN filter table 1446 * ixgbe_enable_mc_generic - Enable multicast address in RAR
878 * @hw: pointer to hardware structure 1447 * @hw: pointer to hardware structure
879 * 1448 *
880 * Clears the VLAN filer table, and the VMDq index associated with the filter 1449 * Enables multicast address in RAR and the use of the multicast hash table.
881 **/ 1450 **/
882static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) 1451s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
883{ 1452{
884 u32 offset; 1453 u32 i;
885 u32 vlanbyte; 1454 u32 rar_entries = hw->mac.num_rar_entries;
886 1455 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
887 for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
888 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
889
890 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
891 for (offset = 0; offset < IXGBE_VLAN_FILTER_TBL_SIZE; offset++)
892 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
893 0);
894 1456
895 return 0; 1457 if (a->mc_addr_in_rar_count > 0)
896} 1458 for (i = (rar_entries - a->mc_addr_in_rar_count);
1459 i < rar_entries; i++)
1460 ixgbe_enable_rar(hw, i);
897 1461
898/** 1462 if (a->mta_in_use > 0)
899 * ixgbe_set_vfta - Set VLAN filter table 1463 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
900 * @hw: pointer to hardware structure 1464 hw->mac.mc_filter_type);
901 * @vlan: VLAN id to write to VLAN filter
902 * @vind: VMDq output index that maps queue to VLAN id in VFTA
903 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
904 *
905 * Turn on/off specified VLAN in the VLAN filter table.
906 **/
907s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind,
908 bool vlan_on)
909{
910 u32 VftaIndex;
911 u32 BitOffset;
912 u32 VftaReg;
913 u32 VftaByte;
914
915 /* Determine 32-bit word position in array */
916 VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */
917
918 /* Determine the location of the (VMD) queue index */
919 VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
920 BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
921
922 /* Set the nibble for VMD queue index */
923 VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex));
924 VftaReg &= (~(0x0F << BitOffset));
925 VftaReg |= (vind << BitOffset);
926 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg);
927
928 /* Determine the location of the bit for this VLAN id */
929 BitOffset = vlan & 0x1F; /* lower five bits */
930
931 VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex));
932 if (vlan_on)
933 /* Turn on this VLAN id */
934 VftaReg |= (1 << BitOffset);
935 else
936 /* Turn off this VLAN id */
937 VftaReg &= ~(1 << BitOffset);
938 IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg);
939 1465
940 return 0; 1466 return 0;
941} 1467}
942 1468
943/** 1469/**
944 * ixgbe_setup_fc - Configure flow control settings 1470 * ixgbe_disable_mc_generic - Disable multicast address in RAR
945 * @hw: pointer to hardware structure 1471 * @hw: pointer to hardware structure
946 * @packetbuf_num: packet buffer number (0-7)
947 * 1472 *
948 * Configures the flow control settings based on SW configuration. 1473 * Disables multicast address in RAR and the use of the multicast hash table.
949 * This function is used for 802.3x flow control configuration only.
950 **/ 1474 **/
951s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 1475s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
952{ 1476{
953 u32 frctl_reg; 1477 u32 i;
954 u32 rmcs_reg; 1478 u32 rar_entries = hw->mac.num_rar_entries;
955 1479 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
956 if (packetbuf_num < 0 || packetbuf_num > 7)
957 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
958 "is 0-7\n", packetbuf_num);
959
960 frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
961 frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
962
963 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
964 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
965
966 /*
967 * We want to save off the original Flow Control configuration just in
968 * case we get disconnected and then reconnected into a different hub
969 * or switch with different Flow Control capabilities.
970 */
971 hw->fc.type = hw->fc.original_type;
972
973 /*
974 * The possible values of the "flow_control" parameter are:
975 * 0: Flow control is completely disabled
976 * 1: Rx flow control is enabled (we can receive pause frames but not
977 * send pause frames).
978 * 2: Tx flow control is enabled (we can send pause frames but we do not
979 * support receiving pause frames)
980 * 3: Both Rx and TX flow control (symmetric) are enabled.
981 * other: Invalid.
982 */
983 switch (hw->fc.type) {
984 case ixgbe_fc_none:
985 break;
986 case ixgbe_fc_rx_pause:
987 /*
988 * RX Flow control is enabled,
989 * and TX Flow control is disabled.
990 */
991 frctl_reg |= IXGBE_FCTRL_RFCE;
992 break;
993 case ixgbe_fc_tx_pause:
994 /*
995 * TX Flow control is enabled, and RX Flow control is disabled,
996 * by a software over-ride.
997 */
998 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
999 break;
1000 case ixgbe_fc_full:
1001 /*
1002 * Flow control (both RX and TX) is enabled by a software
1003 * over-ride.
1004 */
1005 frctl_reg |= IXGBE_FCTRL_RFCE;
1006 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
1007 break;
1008 default:
1009 /* We should never get here. The value should be 0-3. */
1010 hw_dbg(hw, "Flow control param set incorrectly\n");
1011 break;
1012 }
1013
1014 /* Enable 802.3x based flow control settings. */
1015 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg);
1016 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
1017 1480
1018 /* 1481 if (a->mc_addr_in_rar_count > 0)
1019 * We need to set up the Receive Threshold high and low water 1482 for (i = (rar_entries - a->mc_addr_in_rar_count);
1020 * marks as well as (optionally) enabling the transmission of 1483 i < rar_entries; i++)
1021 * XON frames. 1484 ixgbe_disable_rar(hw, i);
1022 */
1023 if (hw->fc.type & ixgbe_fc_tx_pause) {
1024 if (hw->fc.send_xon) {
1025 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
1026 (hw->fc.low_water | IXGBE_FCRTL_XONE));
1027 } else {
1028 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
1029 hw->fc.low_water);
1030 }
1031 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
1032 (hw->fc.high_water)|IXGBE_FCRTH_FCEN);
1033 }
1034 1485
1035 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); 1486 if (a->mta_in_use > 0)
1036 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 1487 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
1037 1488
1038 return 0; 1489 return 0;
1039} 1490}
@@ -1049,13 +1500,24 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
1049 **/ 1500 **/
1050s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 1501s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
1051{ 1502{
1052 u32 ctrl; 1503 u32 i;
1053 s32 i; 1504 u32 reg_val;
1505 u32 number_of_queues;
1054 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 1506 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
1055 1507
1056 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1508 /* Disable the receive unit by stopping each queue */
1057 ctrl |= IXGBE_CTRL_GIO_DIS; 1509 number_of_queues = hw->mac.max_rx_queues;
1058 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1510 for (i = 0; i < number_of_queues; i++) {
1511 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1512 if (reg_val & IXGBE_RXDCTL_ENABLE) {
1513 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1514 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1515 }
1516 }
1517
1518 reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
1519 reg_val |= IXGBE_CTRL_GIO_DIS;
1520 IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
1059 1521
1060 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 1522 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
1061 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { 1523 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
@@ -1070,11 +1532,11 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
1070 1532
1071 1533
1072/** 1534/**
1073 * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore 1535 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
1074 * @hw: pointer to hardware structure 1536 * @hw: pointer to hardware structure
1075 * @mask: Mask to specify wich semaphore to acquire 1537 * @mask: Mask to specify which semaphore to acquire
1076 * 1538 *
1077 * Aquires the SWFW semaphore throught the GSSR register for the specified 1539 * Acquires the SWFW semaphore thought the GSSR register for the specified
1078 * function (CSR, PHY0, PHY1, EEPROM, Flash) 1540 * function (CSR, PHY0, PHY1, EEPROM, Flash)
1079 **/ 1541 **/
1080s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) 1542s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -1116,9 +1578,9 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
1116/** 1578/**
1117 * ixgbe_release_swfw_sync - Release SWFW semaphore 1579 * ixgbe_release_swfw_sync - Release SWFW semaphore
1118 * @hw: pointer to hardware structure 1580 * @hw: pointer to hardware structure
1119 * @mask: Mask to specify wich semaphore to release 1581 * @mask: Mask to specify which semaphore to release
1120 * 1582 *
1121 * Releases the SWFW semaphore throught the GSSR register for the specified 1583 * Releases the SWFW semaphore thought the GSSR register for the specified
1122 * function (CSR, PHY0, PHY1, EEPROM, Flash) 1584 * function (CSR, PHY0, PHY1, EEPROM, Flash)
1123 **/ 1585 **/
1124void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) 1586void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
@@ -1135,45 +1597,3 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
1135 ixgbe_release_eeprom_semaphore(hw); 1597 ixgbe_release_eeprom_semaphore(hw);
1136} 1598}
1137 1599
1138/**
1139 * ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register
1140 * @hw: pointer to hardware structure
1141 * @reg: analog register to read
1142 * @val: read value
1143 *
1144 * Performs write operation to analog register specified.
1145 **/
1146s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
1147{
1148 u32 atlas_ctl;
1149
1150 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1151 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1152 IXGBE_WRITE_FLUSH(hw);
1153 udelay(10);
1154 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1155 *val = (u8)atlas_ctl;
1156
1157 return 0;
1158}
1159
1160/**
1161 * ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register
1162 * @hw: pointer to hardware structure
1163 * @reg: atlas register to write
1164 * @val: value to write
1165 *
1166 * Performs write operation to Atlas analog register specified.
1167 **/
1168s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
1169{
1170 u32 atlas_ctl;
1171
1172 atlas_ctl = (reg << 8) | val;
1173 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1174 IXGBE_WRITE_FLUSH(hw);
1175 udelay(10);
1176
1177 return 0;
1178}
1179
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index de6ddd5d04ad..192f8d012911 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -31,34 +30,45 @@
31 30
32#include "ixgbe_type.h" 31#include "ixgbe_type.h"
33 32
34s32 ixgbe_init_hw(struct ixgbe_hw *hw); 33s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
35s32 ixgbe_start_hw(struct ixgbe_hw *hw); 34s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
36s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); 35s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); 36s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
38s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num); 37s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
38s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
39s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
40s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
41
42s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
43s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
44
45s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
46s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
47s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
48 u16 *data);
49s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
50 u16 *checksum_val);
51s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
52
53s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
54 u32 enable_addr);
55s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
56s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
57s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
58 u32 mc_addr_count,
59 ixgbe_mc_addr_itr func);
60s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
61 u32 addr_count, ixgbe_mc_addr_itr func);
62s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
63s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
39 64
40s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
41s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
42
43s32 ixgbe_init_eeprom(struct ixgbe_hw *hw);
44s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
45s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
46
47s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind,
48 u32 enable_addr);
49s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
50 u32 mc_addr_count, u32 pad);
51s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
52s32 ixgbe_validate_mac_addr(u8 *mac_addr); 65s32 ixgbe_validate_mac_addr(u8 *mac_addr);
53
54s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num);
55
56s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 66s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
57void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); 67void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
58s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 68s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
59 69
60s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); 70s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val);
61s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); 71s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val);
62 72
63#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 73#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
64 74
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3efe5dda10af..81a9c4b86726 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -48,7 +47,7 @@ struct ixgbe_stats {
48}; 47};
49 48
50#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ 49#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
51 offsetof(struct ixgbe_adapter, m) 50 offsetof(struct ixgbe_adapter, m)
52static struct ixgbe_stats ixgbe_gstrings_stats[] = { 51static struct ixgbe_stats ixgbe_gstrings_stats[] = {
53 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, 52 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)},
54 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)},
@@ -95,14 +94,15 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
95}; 94};
96 95
97#define IXGBE_QUEUE_STATS_LEN \ 96#define IXGBE_QUEUE_STATS_LEN \
98 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ 97 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
99 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ 98 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
100 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 99 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
101#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 100#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
101#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
102#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 102#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
103 103
104static int ixgbe_get_settings(struct net_device *netdev, 104static int ixgbe_get_settings(struct net_device *netdev,
105 struct ethtool_cmd *ecmd) 105 struct ethtool_cmd *ecmd)
106{ 106{
107 struct ixgbe_adapter *adapter = netdev_priv(netdev); 107 struct ixgbe_adapter *adapter = netdev_priv(netdev);
108 struct ixgbe_hw *hw = &adapter->hw; 108 struct ixgbe_hw *hw = &adapter->hw;
@@ -114,7 +114,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
114 ecmd->transceiver = XCVR_EXTERNAL; 114 ecmd->transceiver = XCVR_EXTERNAL;
115 if (hw->phy.media_type == ixgbe_media_type_copper) { 115 if (hw->phy.media_type == ixgbe_media_type_copper) {
116 ecmd->supported |= (SUPPORTED_1000baseT_Full | 116 ecmd->supported |= (SUPPORTED_1000baseT_Full |
117 SUPPORTED_TP | SUPPORTED_Autoneg); 117 SUPPORTED_TP | SUPPORTED_Autoneg);
118 118
119 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg); 119 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
120 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 120 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
@@ -126,14 +126,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
126 } else { 126 } else {
127 ecmd->supported |= SUPPORTED_FIBRE; 127 ecmd->supported |= SUPPORTED_FIBRE;
128 ecmd->advertising = (ADVERTISED_10000baseT_Full | 128 ecmd->advertising = (ADVERTISED_10000baseT_Full |
129 ADVERTISED_FIBRE); 129 ADVERTISED_FIBRE);
130 ecmd->port = PORT_FIBRE; 130 ecmd->port = PORT_FIBRE;
131 ecmd->autoneg = AUTONEG_DISABLE;
131 } 132 }
132 133
133 adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up); 134 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
134 if (link_up) { 135 if (link_up) {
135 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 136 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
136 SPEED_10000 : SPEED_1000; 137 SPEED_10000 : SPEED_1000;
137 ecmd->duplex = DUPLEX_FULL; 138 ecmd->duplex = DUPLEX_FULL;
138 } else { 139 } else {
139 ecmd->speed = -1; 140 ecmd->speed = -1;
@@ -144,7 +145,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
144} 145}
145 146
146static int ixgbe_set_settings(struct net_device *netdev, 147static int ixgbe_set_settings(struct net_device *netdev,
147 struct ethtool_cmd *ecmd) 148 struct ethtool_cmd *ecmd)
148{ 149{
149 struct ixgbe_adapter *adapter = netdev_priv(netdev); 150 struct ixgbe_adapter *adapter = netdev_priv(netdev);
150 struct ixgbe_hw *hw = &adapter->hw; 151 struct ixgbe_hw *hw = &adapter->hw;
@@ -164,7 +165,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
164} 165}
165 166
166static void ixgbe_get_pauseparam(struct net_device *netdev, 167static void ixgbe_get_pauseparam(struct net_device *netdev,
167 struct ethtool_pauseparam *pause) 168 struct ethtool_pauseparam *pause)
168{ 169{
169 struct ixgbe_adapter *adapter = netdev_priv(netdev); 170 struct ixgbe_adapter *adapter = netdev_priv(netdev);
170 struct ixgbe_hw *hw = &adapter->hw; 171 struct ixgbe_hw *hw = &adapter->hw;
@@ -182,7 +183,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
182} 183}
183 184
184static int ixgbe_set_pauseparam(struct net_device *netdev, 185static int ixgbe_set_pauseparam(struct net_device *netdev,
185 struct ethtool_pauseparam *pause) 186 struct ethtool_pauseparam *pause)
186{ 187{
187 struct ixgbe_adapter *adapter = netdev_priv(netdev); 188 struct ixgbe_adapter *adapter = netdev_priv(netdev);
188 struct ixgbe_hw *hw = &adapter->hw; 189 struct ixgbe_hw *hw = &adapter->hw;
@@ -233,15 +234,15 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
233 234
234static u32 ixgbe_get_tx_csum(struct net_device *netdev) 235static u32 ixgbe_get_tx_csum(struct net_device *netdev)
235{ 236{
236 return (netdev->features & NETIF_F_HW_CSUM) != 0; 237 return (netdev->features & NETIF_F_IP_CSUM) != 0;
237} 238}
238 239
239static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) 240static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
240{ 241{
241 if (data) 242 if (data)
242 netdev->features |= NETIF_F_HW_CSUM; 243 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
243 else 244 else
244 netdev->features &= ~NETIF_F_HW_CSUM; 245 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
245 246
246 return 0; 247 return 0;
247} 248}
@@ -281,7 +282,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
281#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 282#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
282 283
283static void ixgbe_get_regs(struct net_device *netdev, 284static void ixgbe_get_regs(struct net_device *netdev,
284 struct ethtool_regs *regs, void *p) 285 struct ethtool_regs *regs, void *p)
285{ 286{
286 struct ixgbe_adapter *adapter = netdev_priv(netdev); 287 struct ixgbe_adapter *adapter = netdev_priv(netdev);
287 struct ixgbe_hw *hw = &adapter->hw; 288 struct ixgbe_hw *hw = &adapter->hw;
@@ -315,7 +316,9 @@ static void ixgbe_get_regs(struct net_device *netdev,
315 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 316 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
316 317
317 /* Interrupt */ 318 /* Interrupt */
318 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR); 319 /* don't read EICR because it can clear interrupt causes, instead
320 * read EICS which is a shadow but doesn't clear EICR */
321 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
319 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 322 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
320 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 323 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
321 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); 324 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
@@ -325,7 +328,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
325 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 328 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
326 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 329 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
327 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 330 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
328 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL); 331 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
329 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 332 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
330 333
331 /* Flow Control */ 334 /* Flow Control */
@@ -371,7 +374,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
371 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 374 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
372 for (i = 0; i < 16; i++) 375 for (i = 0; i < 16; i++)
373 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 376 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
374 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE); 377 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
375 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 378 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
376 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 379 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
377 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); 380 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
@@ -419,7 +422,6 @@ static void ixgbe_get_regs(struct net_device *netdev,
419 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 422 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
420 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT); 423 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT);
421 424
422 /* DCE */
423 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 425 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
424 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 426 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
425 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 427 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -539,21 +541,17 @@ static void ixgbe_get_regs(struct net_device *netdev,
539 /* Diagnostic */ 541 /* Diagnostic */
540 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 542 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
541 for (i = 0; i < 8; i++) 543 for (i = 0; i < 8; i++)
542 regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 544 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
543 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 545 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
544 regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0); 546 for (i = 0; i < 4; i++)
545 regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1); 547 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
546 regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2);
547 regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3);
548 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 548 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
549 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 549 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
550 for (i = 0; i < 8; i++) 550 for (i = 0; i < 8; i++)
551 regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 551 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
552 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 552 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
553 regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0); 553 for (i = 0; i < 4; i++)
554 regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1); 554 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
555 regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2);
556 regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3);
557 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 555 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
558 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 556 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
559 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); 557 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
@@ -566,7 +564,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
566 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 564 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
567 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 565 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
568 for (i = 0; i < 8; i++) 566 for (i = 0; i < 8; i++)
569 regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 567 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
570 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 568 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
571 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 569 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
572 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); 570 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
@@ -585,7 +583,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
585} 583}
586 584
587static int ixgbe_get_eeprom(struct net_device *netdev, 585static int ixgbe_get_eeprom(struct net_device *netdev,
588 struct ethtool_eeprom *eeprom, u8 *bytes) 586 struct ethtool_eeprom *eeprom, u8 *bytes)
589{ 587{
590 struct ixgbe_adapter *adapter = netdev_priv(netdev); 588 struct ixgbe_adapter *adapter = netdev_priv(netdev);
591 struct ixgbe_hw *hw = &adapter->hw; 589 struct ixgbe_hw *hw = &adapter->hw;
@@ -608,8 +606,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
608 return -ENOMEM; 606 return -ENOMEM;
609 607
610 for (i = 0; i < eeprom_len; i++) { 608 for (i = 0; i < eeprom_len; i++) {
611 if ((ret_val = ixgbe_read_eeprom(hw, first_word + i, 609 if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
612 &eeprom_buff[i]))) 610 &eeprom_buff[i])))
613 break; 611 break;
614 } 612 }
615 613
@@ -624,7 +622,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
624} 622}
625 623
626static void ixgbe_get_drvinfo(struct net_device *netdev, 624static void ixgbe_get_drvinfo(struct net_device *netdev,
627 struct ethtool_drvinfo *drvinfo) 625 struct ethtool_drvinfo *drvinfo)
628{ 626{
629 struct ixgbe_adapter *adapter = netdev_priv(netdev); 627 struct ixgbe_adapter *adapter = netdev_priv(netdev);
630 628
@@ -637,7 +635,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
637} 635}
638 636
639static void ixgbe_get_ringparam(struct net_device *netdev, 637static void ixgbe_get_ringparam(struct net_device *netdev,
640 struct ethtool_ringparam *ring) 638 struct ethtool_ringparam *ring)
641{ 639{
642 struct ixgbe_adapter *adapter = netdev_priv(netdev); 640 struct ixgbe_adapter *adapter = netdev_priv(netdev);
643 struct ixgbe_ring *tx_ring = adapter->tx_ring; 641 struct ixgbe_ring *tx_ring = adapter->tx_ring;
@@ -654,15 +652,12 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
654} 652}
655 653
656static int ixgbe_set_ringparam(struct net_device *netdev, 654static int ixgbe_set_ringparam(struct net_device *netdev,
657 struct ethtool_ringparam *ring) 655 struct ethtool_ringparam *ring)
658{ 656{
659 struct ixgbe_adapter *adapter = netdev_priv(netdev); 657 struct ixgbe_adapter *adapter = netdev_priv(netdev);
660 struct ixgbe_tx_buffer *old_buf; 658 struct ixgbe_ring *temp_ring;
661 struct ixgbe_rx_buffer *old_rx_buf;
662 void *old_desc;
663 int i, err; 659 int i, err;
664 u32 new_rx_count, new_tx_count, old_size; 660 u32 new_rx_count, new_tx_count;
665 dma_addr_t old_dma;
666 661
667 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 662 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
668 return -EINVAL; 663 return -EINVAL;
@@ -681,6 +676,15 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
681 return 0; 676 return 0;
682 } 677 }
683 678
679 if (adapter->num_tx_queues > adapter->num_rx_queues)
680 temp_ring = vmalloc(adapter->num_tx_queues *
681 sizeof(struct ixgbe_ring));
682 else
683 temp_ring = vmalloc(adapter->num_rx_queues *
684 sizeof(struct ixgbe_ring));
685 if (!temp_ring)
686 return -ENOMEM;
687
684 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 688 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
685 msleep(1); 689 msleep(1);
686 690
@@ -693,66 +697,61 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
693 * to the tx and rx ring structs. 697 * to the tx and rx ring structs.
694 */ 698 */
695 if (new_tx_count != adapter->tx_ring->count) { 699 if (new_tx_count != adapter->tx_ring->count) {
700 memcpy(temp_ring, adapter->tx_ring,
701 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
702
696 for (i = 0; i < adapter->num_tx_queues; i++) { 703 for (i = 0; i < adapter->num_tx_queues; i++) {
697 /* Save existing descriptor ring */ 704 temp_ring[i].count = new_tx_count;
698 old_buf = adapter->tx_ring[i].tx_buffer_info; 705 err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
699 old_desc = adapter->tx_ring[i].desc;
700 old_size = adapter->tx_ring[i].size;
701 old_dma = adapter->tx_ring[i].dma;
702 /* Try to allocate a new one */
703 adapter->tx_ring[i].tx_buffer_info = NULL;
704 adapter->tx_ring[i].desc = NULL;
705 adapter->tx_ring[i].count = new_tx_count;
706 err = ixgbe_setup_tx_resources(adapter,
707 &adapter->tx_ring[i]);
708 if (err) { 706 if (err) {
709 /* Restore the old one so at least 707 while (i) {
710 the adapter still works, even if 708 i--;
711 we failed the request */ 709 ixgbe_free_tx_resources(adapter,
712 adapter->tx_ring[i].tx_buffer_info = old_buf; 710 &temp_ring[i]);
713 adapter->tx_ring[i].desc = old_desc; 711 }
714 adapter->tx_ring[i].size = old_size;
715 adapter->tx_ring[i].dma = old_dma;
716 goto err_setup; 712 goto err_setup;
717 } 713 }
718 /* Free the old buffer manually */
719 vfree(old_buf);
720 pci_free_consistent(adapter->pdev, old_size,
721 old_desc, old_dma);
722 } 714 }
715
716 for (i = 0; i < adapter->num_tx_queues; i++)
717 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
718
719 memcpy(adapter->tx_ring, temp_ring,
720 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
721
722 adapter->tx_ring_count = new_tx_count;
723 } 723 }
724 724
725 if (new_rx_count != adapter->rx_ring->count) { 725 if (new_rx_count != adapter->rx_ring->count) {
726 for (i = 0; i < adapter->num_rx_queues; i++) { 726 memcpy(temp_ring, adapter->rx_ring,
727 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
727 728
728 old_rx_buf = adapter->rx_ring[i].rx_buffer_info; 729 for (i = 0; i < adapter->num_rx_queues; i++) {
729 old_desc = adapter->rx_ring[i].desc; 730 temp_ring[i].count = new_rx_count;
730 old_size = adapter->rx_ring[i].size; 731 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
731 old_dma = adapter->rx_ring[i].dma;
732
733 adapter->rx_ring[i].rx_buffer_info = NULL;
734 adapter->rx_ring[i].desc = NULL;
735 adapter->rx_ring[i].dma = 0;
736 adapter->rx_ring[i].count = new_rx_count;
737 err = ixgbe_setup_rx_resources(adapter,
738 &adapter->rx_ring[i]);
739 if (err) { 732 if (err) {
740 adapter->rx_ring[i].rx_buffer_info = old_rx_buf; 733 while (i) {
741 adapter->rx_ring[i].desc = old_desc; 734 i--;
742 adapter->rx_ring[i].size = old_size; 735 ixgbe_free_rx_resources(adapter,
743 adapter->rx_ring[i].dma = old_dma; 736 &temp_ring[i]);
737 }
744 goto err_setup; 738 goto err_setup;
745 } 739 }
746
747 vfree(old_rx_buf);
748 pci_free_consistent(adapter->pdev, old_size, old_desc,
749 old_dma);
750 } 740 }
741
742 for (i = 0; i < adapter->num_rx_queues; i++)
743 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
744
745 memcpy(adapter->rx_ring, temp_ring,
746 adapter->num_rx_queues * sizeof(struct ixgbe_ring));
747
748 adapter->rx_ring_count = new_rx_count;
751 } 749 }
752 750
751 /* success! */
753 err = 0; 752 err = 0;
754err_setup: 753err_setup:
755 if (netif_running(adapter->netdev)) 754 if (netif_running(netdev))
756 ixgbe_up(adapter); 755 ixgbe_up(adapter);
757 756
758 clear_bit(__IXGBE_RESETTING, &adapter->state); 757 clear_bit(__IXGBE_RESETTING, &adapter->state);
@@ -770,7 +769,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
770} 769}
771 770
772static void ixgbe_get_ethtool_stats(struct net_device *netdev, 771static void ixgbe_get_ethtool_stats(struct net_device *netdev,
773 struct ethtool_stats *stats, u64 *data) 772 struct ethtool_stats *stats, u64 *data)
774{ 773{
775 struct ixgbe_adapter *adapter = netdev_priv(netdev); 774 struct ixgbe_adapter *adapter = netdev_priv(netdev);
776 u64 *queue_stat; 775 u64 *queue_stat;
@@ -778,12 +777,20 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
778 int j, k; 777 int j, k;
779 int i; 778 int i;
780 u64 aggregated = 0, flushed = 0, no_desc = 0; 779 u64 aggregated = 0, flushed = 0, no_desc = 0;
780 for (i = 0; i < adapter->num_rx_queues; i++) {
781 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
782 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
783 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
784 }
785 adapter->lro_aggregated = aggregated;
786 adapter->lro_flushed = flushed;
787 adapter->lro_no_desc = no_desc;
781 788
782 ixgbe_update_stats(adapter); 789 ixgbe_update_stats(adapter);
783 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 790 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
784 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; 791 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset;
785 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 792 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
786 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 793 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
787 } 794 }
788 for (j = 0; j < adapter->num_tx_queues; j++) { 795 for (j = 0; j < adapter->num_tx_queues; j++) {
789 queue_stat = (u64 *)&adapter->tx_ring[j].stats; 796 queue_stat = (u64 *)&adapter->tx_ring[j].stats;
@@ -792,24 +799,18 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
792 i += k; 799 i += k;
793 } 800 }
794 for (j = 0; j < adapter->num_rx_queues; j++) { 801 for (j = 0; j < adapter->num_rx_queues; j++) {
795 aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated;
796 flushed += adapter->rx_ring[j].lro_mgr.stats.flushed;
797 no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc;
798 queue_stat = (u64 *)&adapter->rx_ring[j].stats; 802 queue_stat = (u64 *)&adapter->rx_ring[j].stats;
799 for (k = 0; k < stat_count; k++) 803 for (k = 0; k < stat_count; k++)
800 data[i + k] = queue_stat[k]; 804 data[i + k] = queue_stat[k];
801 i += k; 805 i += k;
802 } 806 }
803 adapter->lro_aggregated = aggregated;
804 adapter->lro_flushed = flushed;
805 adapter->lro_no_desc = no_desc;
806} 807}
807 808
808static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 809static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
809 u8 *data) 810 u8 *data)
810{ 811{
811 struct ixgbe_adapter *adapter = netdev_priv(netdev); 812 struct ixgbe_adapter *adapter = netdev_priv(netdev);
812 u8 *p = data; 813 char *p = (char *)data;
813 int i; 814 int i;
814 815
815 switch (stringset) { 816 switch (stringset) {
@@ -831,14 +832,14 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
831 sprintf(p, "rx_queue_%u_bytes", i); 832 sprintf(p, "rx_queue_%u_bytes", i);
832 p += ETH_GSTRING_LEN; 833 p += ETH_GSTRING_LEN;
833 } 834 }
834/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 835 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
835 break; 836 break;
836 } 837 }
837} 838}
838 839
839 840
840static void ixgbe_get_wol(struct net_device *netdev, 841static void ixgbe_get_wol(struct net_device *netdev,
841 struct ethtool_wolinfo *wol) 842 struct ethtool_wolinfo *wol)
842{ 843{
843 wol->supported = 0; 844 wol->supported = 0;
844 wol->wolopts = 0; 845 wol->wolopts = 0;
@@ -859,16 +860,17 @@ static int ixgbe_nway_reset(struct net_device *netdev)
859static int ixgbe_phys_id(struct net_device *netdev, u32 data) 860static int ixgbe_phys_id(struct net_device *netdev, u32 data)
860{ 861{
861 struct ixgbe_adapter *adapter = netdev_priv(netdev); 862 struct ixgbe_adapter *adapter = netdev_priv(netdev);
862 u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL); 863 struct ixgbe_hw *hw = &adapter->hw;
864 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
863 u32 i; 865 u32 i;
864 866
865 if (!data || data > 300) 867 if (!data || data > 300)
866 data = 300; 868 data = 300;
867 869
868 for (i = 0; i < (data * 1000); i += 400) { 870 for (i = 0; i < (data * 1000); i += 400) {
869 ixgbe_led_on(&adapter->hw, IXGBE_LED_ON); 871 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
870 msleep_interruptible(200); 872 msleep_interruptible(200);
871 ixgbe_led_off(&adapter->hw, IXGBE_LED_ON); 873 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
872 msleep_interruptible(200); 874 msleep_interruptible(200);
873 } 875 }
874 876
@@ -879,67 +881,75 @@ static int ixgbe_phys_id(struct net_device *netdev, u32 data)
879} 881}
880 882
881static int ixgbe_get_coalesce(struct net_device *netdev, 883static int ixgbe_get_coalesce(struct net_device *netdev,
882 struct ethtool_coalesce *ec) 884 struct ethtool_coalesce *ec)
883{ 885{
884 struct ixgbe_adapter *adapter = netdev_priv(netdev); 886 struct ixgbe_adapter *adapter = netdev_priv(netdev);
885 887
886 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
887 ec->rx_coalesce_usecs = adapter->rx_eitr;
888 else
889 ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
890
891 if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
892 ec->tx_coalesce_usecs = adapter->tx_eitr;
893 else
894 ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
895
896 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; 888 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
889
890 /* only valid if in constant ITR mode */
891 switch (adapter->itr_setting) {
892 case 0:
893 /* throttling disabled */
894 ec->rx_coalesce_usecs = 0;
895 break;
896 case 1:
897 /* dynamic ITR mode */
898 ec->rx_coalesce_usecs = 1;
899 break;
900 default:
901 /* fixed interrupt rate mode */
902 ec->rx_coalesce_usecs = 1000000/adapter->eitr_param;
903 break;
904 }
897 return 0; 905 return 0;
898} 906}
899 907
900static int ixgbe_set_coalesce(struct net_device *netdev, 908static int ixgbe_set_coalesce(struct net_device *netdev,
901 struct ethtool_coalesce *ec) 909 struct ethtool_coalesce *ec)
902{ 910{
903 struct ixgbe_adapter *adapter = netdev_priv(netdev); 911 struct ixgbe_adapter *adapter = netdev_priv(netdev);
904 912 struct ixgbe_hw *hw = &adapter->hw;
905 if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || 913 int i;
906 ((ec->rx_coalesce_usecs != 0) &&
907 (ec->rx_coalesce_usecs != 1) &&
908 (ec->rx_coalesce_usecs != 3) &&
909 (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
910 return -EINVAL;
911 if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
912 ((ec->tx_coalesce_usecs != 0) &&
913 (ec->tx_coalesce_usecs != 1) &&
914 (ec->tx_coalesce_usecs != 3) &&
915 (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
916 return -EINVAL;
917
918 /* convert to rate of irq's per second */
919 if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
920 adapter->rx_eitr = ec->rx_coalesce_usecs;
921 else
922 adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
923
924 if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
925 adapter->tx_eitr = ec->rx_coalesce_usecs;
926 else
927 adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
928 914
929 if (ec->tx_max_coalesced_frames_irq) 915 if (ec->tx_max_coalesced_frames_irq)
930 adapter->tx_ring[0].work_limit = 916 adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
931 ec->tx_max_coalesced_frames_irq; 917
918 if (ec->rx_coalesce_usecs > 1) {
919 /* store the value in ints/second */
920 adapter->eitr_param = 1000000/ec->rx_coalesce_usecs;
921
922 /* static value of interrupt rate */
923 adapter->itr_setting = adapter->eitr_param;
924 /* clear the lower bit */
925 adapter->itr_setting &= ~1;
926 } else if (ec->rx_coalesce_usecs == 1) {
927 /* 1 means dynamic mode */
928 adapter->eitr_param = 20000;
929 adapter->itr_setting = 1;
930 } else {
931 /* any other value means disable eitr, which is best
932 * served by setting the interrupt rate very high */
933 adapter->eitr_param = 3000000;
934 adapter->itr_setting = 0;
935 }
932 936
933 if (netif_running(netdev)) { 937 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
934 ixgbe_down(adapter); 938 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
935 ixgbe_up(adapter); 939 if (q_vector->txr_count && !q_vector->rxr_count)
940 q_vector->eitr = (adapter->eitr_param >> 1);
941 else
942 /* rx only or mixed */
943 q_vector->eitr = adapter->eitr_param;
944 IXGBE_WRITE_REG(hw, IXGBE_EITR(i),
945 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
936 } 946 }
937 947
938 return 0; 948 return 0;
939} 949}
940 950
941 951
942static struct ethtool_ops ixgbe_ethtool_ops = { 952static const struct ethtool_ops ixgbe_ethtool_ops = {
943 .get_settings = ixgbe_get_settings, 953 .get_settings = ixgbe_get_settings,
944 .set_settings = ixgbe_set_settings, 954 .set_settings = ixgbe_set_settings,
945 .get_drvinfo = ixgbe_get_drvinfo, 955 .get_drvinfo = ixgbe_get_drvinfo,
@@ -966,7 +976,7 @@ static struct ethtool_ops ixgbe_ethtool_ops = {
966 .set_tso = ixgbe_set_tso, 976 .set_tso = ixgbe_set_tso,
967 .get_strings = ixgbe_get_strings, 977 .get_strings = ixgbe_get_strings,
968 .phys_id = ixgbe_phys_id, 978 .phys_id = ixgbe_phys_id,
969 .get_sset_count = ixgbe_get_sset_count, 979 .get_sset_count = ixgbe_get_sset_count,
970 .get_ethtool_stats = ixgbe_get_ethtool_stats, 980 .get_ethtool_stats = ixgbe_get_ethtool_stats,
971 .get_coalesce = ixgbe_get_coalesce, 981 .get_coalesce = ixgbe_get_coalesce,
972 .set_coalesce = ixgbe_set_coalesce, 982 .set_coalesce = ixgbe_set_coalesce,
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a417be7f8be5..ca17af4349d0 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -46,15 +45,14 @@
46 45
47char ixgbe_driver_name[] = "ixgbe"; 46char ixgbe_driver_name[] = "ixgbe";
48static const char ixgbe_driver_string[] = 47static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver"; 48 "Intel(R) 10 Gigabit PCI Express Network Driver";
50 49
51#define DRV_VERSION "1.3.18-k4" 50#define DRV_VERSION "1.3.30-k2"
52const char ixgbe_driver_version[] = DRV_VERSION; 51const char ixgbe_driver_version[] = DRV_VERSION;
53static const char ixgbe_copyright[] = 52static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
54 "Copyright (c) 1999-2007 Intel Corporation.";
55 53
56static const struct ixgbe_info *ixgbe_info_tbl[] = { 54static const struct ixgbe_info *ixgbe_info_tbl[] = {
57 [board_82598] = &ixgbe_82598_info, 55 [board_82598] = &ixgbe_82598_info,
58}; 56};
59 57
60/* ixgbe_pci_tbl - PCI Device ID Table 58/* ixgbe_pci_tbl - PCI Device ID Table
@@ -74,15 +72,17 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
74 board_82598 }, 72 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
76 board_82598 }, 74 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
76 board_82598 },
77 77
78 /* required last entry */ 78 /* required last entry */
79 {0, } 79 {0, }
80}; 80};
81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82 82
83#ifdef CONFIG_DCA 83#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
85 void *p); 85 void *p);
86static struct notifier_block dca_notifier = { 86static struct notifier_block dca_notifier = {
87 .notifier_call = ixgbe_notify_dca, 87 .notifier_call = ixgbe_notify_dca,
88 .next = NULL, 88 .next = NULL,
@@ -104,7 +104,7 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
104 /* Let firmware take over control of h/w */ 104 /* Let firmware take over control of h/w */
105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
108} 108}
109 109
110static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) 110static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
@@ -114,24 +114,11 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
114 /* Let firmware know the driver has taken over */ 114 /* Let firmware know the driver has taken over */
115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
118}
119
120#ifdef DEBUG
121/**
122 * ixgbe_get_hw_dev_name - return device name string
123 * used by hardware layer to print debugging information
124 **/
125char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
126{
127 struct ixgbe_adapter *adapter = hw->back;
128 struct net_device *netdev = adapter->netdev;
129 return netdev->name;
130} 118}
131#endif
132 119
133static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, 120static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
134 u8 msix_vector) 121 u8 msix_vector)
135{ 122{
136 u32 ivar, index; 123 u32 ivar, index;
137 124
@@ -144,13 +131,12 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
144} 131}
145 132
146static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 133static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
147 struct ixgbe_tx_buffer 134 struct ixgbe_tx_buffer
148 *tx_buffer_info) 135 *tx_buffer_info)
149{ 136{
150 if (tx_buffer_info->dma) { 137 if (tx_buffer_info->dma) {
151 pci_unmap_page(adapter->pdev, 138 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
152 tx_buffer_info->dma, 139 tx_buffer_info->length, PCI_DMA_TODEVICE);
153 tx_buffer_info->length, PCI_DMA_TODEVICE);
154 tx_buffer_info->dma = 0; 140 tx_buffer_info->dma = 0;
155 } 141 }
156 if (tx_buffer_info->skb) { 142 if (tx_buffer_info->skb) {
@@ -161,107 +147,120 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
161} 147}
162 148
163static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 149static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
164 struct ixgbe_ring *tx_ring, 150 struct ixgbe_ring *tx_ring,
165 unsigned int eop, 151 unsigned int eop)
166 union ixgbe_adv_tx_desc *eop_desc)
167{ 152{
153 struct ixgbe_hw *hw = &adapter->hw;
154 u32 head, tail;
155
168 /* Detect a transmit hang in hardware, this serializes the 156 /* Detect a transmit hang in hardware, this serializes the
169 * check with the clearing of time_stamp and movement of i */ 157 * check with the clearing of time_stamp and movement of eop */
158 head = IXGBE_READ_REG(hw, tx_ring->head);
159 tail = IXGBE_READ_REG(hw, tx_ring->tail);
170 adapter->detect_tx_hung = false; 160 adapter->detect_tx_hung = false;
171 if (tx_ring->tx_buffer_info[eop].dma && 161 if ((head != tail) &&
162 tx_ring->tx_buffer_info[eop].time_stamp &&
172 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 163 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
173 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) { 164 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
174 /* detected Tx unit hang */ 165 /* detected Tx unit hang */
166 union ixgbe_adv_tx_desc *tx_desc;
167 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
175 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 168 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
176 " TDH <%x>\n" 169 " Tx Queue <%d>\n"
177 " TDT <%x>\n" 170 " TDH, TDT <%x>, <%x>\n"
178 " next_to_use <%x>\n" 171 " next_to_use <%x>\n"
179 " next_to_clean <%x>\n" 172 " next_to_clean <%x>\n"
180 "tx_buffer_info[next_to_clean]\n" 173 "tx_buffer_info[next_to_clean]\n"
181 " time_stamp <%lx>\n" 174 " time_stamp <%lx>\n"
182 " next_to_watch <%x>\n" 175 " jiffies <%lx>\n",
183 " jiffies <%lx>\n" 176 tx_ring->queue_index,
184 " next_to_watch.status <%x>\n", 177 head, tail,
185 readl(adapter->hw.hw_addr + tx_ring->head), 178 tx_ring->next_to_use, eop,
186 readl(adapter->hw.hw_addr + tx_ring->tail), 179 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
187 tx_ring->next_to_use,
188 tx_ring->next_to_clean,
189 tx_ring->tx_buffer_info[eop].time_stamp,
190 eop, jiffies, eop_desc->wb.status);
191 return true; 180 return true;
192 } 181 }
193 182
194 return false; 183 return false;
195} 184}
196 185
197#define IXGBE_MAX_TXD_PWR 14 186#define IXGBE_MAX_TXD_PWR 14
198#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 187#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
199 188
200/* Tx Descriptors needed, worst case */ 189/* Tx Descriptors needed, worst case */
201#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ 190#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
202 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 191 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
203#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 192#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
204 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 193 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
194
195#define GET_TX_HEAD_FROM_RING(ring) (\
196 *(volatile u32 *) \
197 ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
198static void ixgbe_tx_timeout(struct net_device *netdev);
205 199
206/** 200/**
207 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes 201 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
208 * @adapter: board private structure 202 * @adapter: board private structure
203 * @tx_ring: tx ring to clean
209 **/ 204 **/
210static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter, 205static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
211 struct ixgbe_ring *tx_ring) 206 struct ixgbe_ring *tx_ring)
212{ 207{
213 struct net_device *netdev = adapter->netdev; 208 union ixgbe_adv_tx_desc *tx_desc;
214 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
215 struct ixgbe_tx_buffer *tx_buffer_info; 209 struct ixgbe_tx_buffer *tx_buffer_info;
216 unsigned int i, eop; 210 struct net_device *netdev = adapter->netdev;
217 bool cleaned = false; 211 struct sk_buff *skb;
218 unsigned int total_tx_bytes = 0, total_tx_packets = 0; 212 unsigned int i;
213 u32 head, oldhead;
214 unsigned int count = 0;
215 unsigned int total_bytes = 0, total_packets = 0;
219 216
217 rmb();
218 head = GET_TX_HEAD_FROM_RING(tx_ring);
219 head = le32_to_cpu(head);
220 i = tx_ring->next_to_clean; 220 i = tx_ring->next_to_clean;
221 eop = tx_ring->tx_buffer_info[i].next_to_watch; 221 while (1) {
222 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 222 while (i != head) {
223 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
224 cleaned = false;
225 while (!cleaned) {
226 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 223 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
227 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 224 tx_buffer_info = &tx_ring->tx_buffer_info[i];
228 cleaned = (i == eop); 225 skb = tx_buffer_info->skb;
229 226
230 tx_ring->stats.bytes += tx_buffer_info->length; 227 if (skb) {
231 if (cleaned) {
232 struct sk_buff *skb = tx_buffer_info->skb;
233 unsigned int segs, bytecount; 228 unsigned int segs, bytecount;
229
230 /* gso_segs is currently only valid for tcp */
234 segs = skb_shinfo(skb)->gso_segs ?: 1; 231 segs = skb_shinfo(skb)->gso_segs ?: 1;
235 /* multiply data chunks by size of headers */ 232 /* multiply data chunks by size of headers */
236 bytecount = ((segs - 1) * skb_headlen(skb)) + 233 bytecount = ((segs - 1) * skb_headlen(skb)) +
237 skb->len; 234 skb->len;
238 total_tx_packets += segs; 235 total_packets += segs;
239 total_tx_bytes += bytecount; 236 total_bytes += bytecount;
240 } 237 }
238
241 ixgbe_unmap_and_free_tx_resource(adapter, 239 ixgbe_unmap_and_free_tx_resource(adapter,
242 tx_buffer_info); 240 tx_buffer_info);
243 tx_desc->wb.status = 0;
244 241
245 i++; 242 i++;
246 if (i == tx_ring->count) 243 if (i == tx_ring->count)
247 i = 0; 244 i = 0;
248 }
249
250 tx_ring->stats.packets++;
251
252 eop = tx_ring->tx_buffer_info[i].next_to_watch;
253 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
254
255 /* weight of a sort for tx, avoid endless transmit cleanup */
256 if (total_tx_packets >= tx_ring->work_limit)
257 break;
258 }
259 245
246 count++;
247 if (count == tx_ring->count)
248 goto done_cleaning;
249 }
250 oldhead = head;
251 rmb();
252 head = GET_TX_HEAD_FROM_RING(tx_ring);
253 head = le32_to_cpu(head);
254 if (head == oldhead)
255 goto done_cleaning;
256 } /* while (1) */
257
258done_cleaning:
260 tx_ring->next_to_clean = i; 259 tx_ring->next_to_clean = i;
261 260
262#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 261#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
263 if (total_tx_packets && netif_carrier_ok(netdev) && 262 if (unlikely(count && netif_carrier_ok(netdev) &&
264 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { 263 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
265 /* Make sure that anybody stopping the queue after this 264 /* Make sure that anybody stopping the queue after this
266 * sees the new next_to_clean. 265 * sees the new next_to_clean.
267 */ 266 */
@@ -269,59 +268,68 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
269 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 268 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
270 !test_bit(__IXGBE_DOWN, &adapter->state)) { 269 !test_bit(__IXGBE_DOWN, &adapter->state)) {
271 netif_wake_subqueue(netdev, tx_ring->queue_index); 270 netif_wake_subqueue(netdev, tx_ring->queue_index);
272 adapter->restart_queue++; 271 ++adapter->restart_queue;
273 } 272 }
274 } 273 }
275 274
276 if (adapter->detect_tx_hung) 275 if (adapter->detect_tx_hung) {
277 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) 276 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
278 netif_stop_subqueue(netdev, tx_ring->queue_index); 277 /* schedule immediate reset if we believe we hung */
279 278 DPRINTK(PROBE, INFO,
280 if (total_tx_packets >= tx_ring->work_limit) 279 "tx hang %d detected, resetting adapter\n",
281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); 280 adapter->tx_timeout_count + 1);
281 ixgbe_tx_timeout(adapter->netdev);
282 }
283 }
282 284
283 tx_ring->total_bytes += total_tx_bytes; 285 /* re-arm the interrupt */
284 tx_ring->total_packets += total_tx_packets; 286 if ((total_packets >= tx_ring->work_limit) ||
285 adapter->net_stats.tx_bytes += total_tx_bytes; 287 (count == tx_ring->count))
286 adapter->net_stats.tx_packets += total_tx_packets; 288 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
287 cleaned = total_tx_packets ? true : false; 289
288 return cleaned; 290 tx_ring->total_bytes += total_bytes;
291 tx_ring->total_packets += total_packets;
292 tx_ring->stats.bytes += total_bytes;
293 tx_ring->stats.packets += total_packets;
294 adapter->net_stats.tx_bytes += total_bytes;
295 adapter->net_stats.tx_packets += total_packets;
296 return (total_packets ? true : false);
289} 297}
290 298
291#ifdef CONFIG_DCA 299#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
292static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 300static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
293 struct ixgbe_ring *rxr) 301 struct ixgbe_ring *rx_ring)
294{ 302{
295 u32 rxctrl; 303 u32 rxctrl;
296 int cpu = get_cpu(); 304 int cpu = get_cpu();
297 int q = rxr - adapter->rx_ring; 305 int q = rx_ring - adapter->rx_ring;
298 306
299 if (rxr->cpu != cpu) { 307 if (rx_ring->cpu != cpu) {
300 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 308 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
301 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 309 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
302 rxctrl |= dca_get_tag(cpu); 310 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
303 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 311 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
304 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 312 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
305 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl); 313 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
306 rxr->cpu = cpu; 314 rx_ring->cpu = cpu;
307 } 315 }
308 put_cpu(); 316 put_cpu();
309} 317}
310 318
311static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 319static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
312 struct ixgbe_ring *txr) 320 struct ixgbe_ring *tx_ring)
313{ 321{
314 u32 txctrl; 322 u32 txctrl;
315 int cpu = get_cpu(); 323 int cpu = get_cpu();
316 int q = txr - adapter->tx_ring; 324 int q = tx_ring - adapter->tx_ring;
317 325
318 if (txr->cpu != cpu) { 326 if (tx_ring->cpu != cpu) {
319 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q)); 327 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
320 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 328 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
321 txctrl |= dca_get_tag(cpu); 329 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
322 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 330 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl); 331 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
324 txr->cpu = cpu; 332 tx_ring->cpu = cpu;
325 } 333 }
326 put_cpu(); 334 put_cpu();
327} 335}
@@ -351,11 +359,14 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
351 359
352 switch (event) { 360 switch (event) {
353 case DCA_PROVIDER_ADD: 361 case DCA_PROVIDER_ADD:
354 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 362 /* if we're already enabled, don't do it again */
363 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
364 break;
355 /* Always use CB2 mode, difference is masked 365 /* Always use CB2 mode, difference is masked
356 * in the CB driver. */ 366 * in the CB driver. */
357 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
358 if (dca_add_requester(dev) == 0) { 368 if (dca_add_requester(dev) == 0) {
369 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
359 ixgbe_setup_dca(adapter); 370 ixgbe_setup_dca(adapter);
360 break; 371 break;
361 } 372 }
@@ -372,7 +383,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
372 return 0; 383 return 0;
373} 384}
374 385
375#endif /* CONFIG_DCA */ 386#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
376/** 387/**
377 * ixgbe_receive_skb - Send a completed packet up the stack 388 * ixgbe_receive_skb - Send a completed packet up the stack
378 * @adapter: board private structure 389 * @adapter: board private structure
@@ -382,8 +393,8 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
382 * @rx_desc: rx descriptor 393 * @rx_desc: rx descriptor
383 **/ 394 **/
384static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, 395static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
385 struct sk_buff *skb, u8 status, 396 struct sk_buff *skb, u8 status,
386 struct ixgbe_ring *ring, 397 struct ixgbe_ring *ring,
387 union ixgbe_adv_rx_desc *rx_desc) 398 union ixgbe_adv_rx_desc *rx_desc)
388{ 399{
389 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 400 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
@@ -420,14 +431,12 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
420 * @skb: skb currently being received and modified 431 * @skb: skb currently being received and modified
421 **/ 432 **/
422static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 433static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
423 u32 status_err, 434 u32 status_err, struct sk_buff *skb)
424 struct sk_buff *skb)
425{ 435{
426 skb->ip_summed = CHECKSUM_NONE; 436 skb->ip_summed = CHECKSUM_NONE;
427 437
428 /* Ignore Checksum bit is set, or rx csum disabled */ 438 /* Rx csum disabled */
429 if ((status_err & IXGBE_RXD_STAT_IXSM) || 439 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
430 !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
431 return; 440 return;
432 441
433 /* if IP and error */ 442 /* if IP and error */
@@ -455,37 +464,44 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
455 * @adapter: address of board private structure 464 * @adapter: address of board private structure
456 **/ 465 **/
457static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 466static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
458 struct ixgbe_ring *rx_ring, 467 struct ixgbe_ring *rx_ring,
459 int cleaned_count) 468 int cleaned_count)
460{ 469{
461 struct net_device *netdev = adapter->netdev;
462 struct pci_dev *pdev = adapter->pdev; 470 struct pci_dev *pdev = adapter->pdev;
463 union ixgbe_adv_rx_desc *rx_desc; 471 union ixgbe_adv_rx_desc *rx_desc;
464 struct ixgbe_rx_buffer *rx_buffer_info; 472 struct ixgbe_rx_buffer *bi;
465 struct sk_buff *skb;
466 unsigned int i; 473 unsigned int i;
467 unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN; 474 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
468 475
469 i = rx_ring->next_to_use; 476 i = rx_ring->next_to_use;
470 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 477 bi = &rx_ring->rx_buffer_info[i];
471 478
472 while (cleaned_count--) { 479 while (cleaned_count--) {
473 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 480 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
474 481
475 if (!rx_buffer_info->page && 482 if (!bi->page_dma &&
476 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { 483 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
477 rx_buffer_info->page = alloc_page(GFP_ATOMIC); 484 if (!bi->page) {
478 if (!rx_buffer_info->page) { 485 bi->page = alloc_page(GFP_ATOMIC);
479 adapter->alloc_rx_page_failed++; 486 if (!bi->page) {
480 goto no_buffers; 487 adapter->alloc_rx_page_failed++;
488 goto no_buffers;
489 }
490 bi->page_offset = 0;
491 } else {
492 /* use a half page if we're re-using */
493 bi->page_offset ^= (PAGE_SIZE / 2);
481 } 494 }
482 rx_buffer_info->page_dma = 495
483 pci_map_page(pdev, rx_buffer_info->page, 496 bi->page_dma = pci_map_page(pdev, bi->page,
484 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); 497 bi->page_offset,
498 (PAGE_SIZE / 2),
499 PCI_DMA_FROMDEVICE);
485 } 500 }
486 501
487 if (!rx_buffer_info->skb) { 502 if (!bi->skb) {
488 skb = netdev_alloc_skb(netdev, bufsz); 503 struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
504 bufsz);
489 505
490 if (!skb) { 506 if (!skb) {
491 adapter->alloc_rx_buff_failed++; 507 adapter->alloc_rx_buff_failed++;
@@ -499,28 +515,25 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
499 */ 515 */
500 skb_reserve(skb, NET_IP_ALIGN); 516 skb_reserve(skb, NET_IP_ALIGN);
501 517
502 rx_buffer_info->skb = skb; 518 bi->skb = skb;
503 rx_buffer_info->dma = pci_map_single(pdev, skb->data, 519 bi->dma = pci_map_single(pdev, skb->data, bufsz,
504 bufsz, 520 PCI_DMA_FROMDEVICE);
505 PCI_DMA_FROMDEVICE);
506 } 521 }
507 /* Refresh the desc even if buffer_addrs didn't change because 522 /* Refresh the desc even if buffer_addrs didn't change because
508 * each write-back erases this info. */ 523 * each write-back erases this info. */
509 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 524 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
510 rx_desc->read.pkt_addr = 525 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
511 cpu_to_le64(rx_buffer_info->page_dma); 526 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
512 rx_desc->read.hdr_addr =
513 cpu_to_le64(rx_buffer_info->dma);
514 } else { 527 } else {
515 rx_desc->read.pkt_addr = 528 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
516 cpu_to_le64(rx_buffer_info->dma);
517 } 529 }
518 530
519 i++; 531 i++;
520 if (i == rx_ring->count) 532 if (i == rx_ring->count)
521 i = 0; 533 i = 0;
522 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 534 bi = &rx_ring->rx_buffer_info[i];
523 } 535 }
536
524no_buffers: 537no_buffers:
525 if (rx_ring->next_to_use != i) { 538 if (rx_ring->next_to_use != i) {
526 rx_ring->next_to_use = i; 539 rx_ring->next_to_use = i;
@@ -538,46 +551,54 @@ no_buffers:
538 } 551 }
539} 552}
540 553
554static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
555{
556 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
557}
558
559static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
560{
561 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
562}
563
541static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, 564static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
542 struct ixgbe_ring *rx_ring, 565 struct ixgbe_ring *rx_ring,
543 int *work_done, int work_to_do) 566 int *work_done, int work_to_do)
544{ 567{
545 struct net_device *netdev = adapter->netdev;
546 struct pci_dev *pdev = adapter->pdev; 568 struct pci_dev *pdev = adapter->pdev;
547 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 569 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
548 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 570 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
549 struct sk_buff *skb; 571 struct sk_buff *skb;
550 unsigned int i; 572 unsigned int i;
551 u32 upper_len, len, staterr; 573 u32 len, staterr;
552 u16 hdr_info; 574 u16 hdr_info;
553 bool cleaned = false; 575 bool cleaned = false;
554 int cleaned_count = 0; 576 int cleaned_count = 0;
555 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 577 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
556 578
557 i = rx_ring->next_to_clean; 579 i = rx_ring->next_to_clean;
558 upper_len = 0;
559 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 580 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
560 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 581 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
561 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 582 rx_buffer_info = &rx_ring->rx_buffer_info[i];
562 583
563 while (staterr & IXGBE_RXD_STAT_DD) { 584 while (staterr & IXGBE_RXD_STAT_DD) {
585 u32 upper_len = 0;
564 if (*work_done >= work_to_do) 586 if (*work_done >= work_to_do)
565 break; 587 break;
566 (*work_done)++; 588 (*work_done)++;
567 589
568 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 590 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
569 hdr_info = 591 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
570 le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info); 592 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
571 len = 593 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
572 ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
573 IXGBE_RXDADV_HDRBUFLEN_SHIFT);
574 if (hdr_info & IXGBE_RXDADV_SPH) 594 if (hdr_info & IXGBE_RXDADV_SPH)
575 adapter->rx_hdr_split++; 595 adapter->rx_hdr_split++;
576 if (len > IXGBE_RX_HDR_SIZE) 596 if (len > IXGBE_RX_HDR_SIZE)
577 len = IXGBE_RX_HDR_SIZE; 597 len = IXGBE_RX_HDR_SIZE;
578 upper_len = le16_to_cpu(rx_desc->wb.upper.length); 598 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
579 } else 599 } else {
580 len = le16_to_cpu(rx_desc->wb.upper.length); 600 len = le16_to_cpu(rx_desc->wb.upper.length);
601 }
581 602
582 cleaned = true; 603 cleaned = true;
583 skb = rx_buffer_info->skb; 604 skb = rx_buffer_info->skb;
@@ -586,18 +607,25 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
586 607
587 if (len && !skb_shinfo(skb)->nr_frags) { 608 if (len && !skb_shinfo(skb)->nr_frags) {
588 pci_unmap_single(pdev, rx_buffer_info->dma, 609 pci_unmap_single(pdev, rx_buffer_info->dma,
589 adapter->rx_buf_len + NET_IP_ALIGN, 610 rx_ring->rx_buf_len + NET_IP_ALIGN,
590 PCI_DMA_FROMDEVICE); 611 PCI_DMA_FROMDEVICE);
591 skb_put(skb, len); 612 skb_put(skb, len);
592 } 613 }
593 614
594 if (upper_len) { 615 if (upper_len) {
595 pci_unmap_page(pdev, rx_buffer_info->page_dma, 616 pci_unmap_page(pdev, rx_buffer_info->page_dma,
596 PAGE_SIZE, PCI_DMA_FROMDEVICE); 617 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
597 rx_buffer_info->page_dma = 0; 618 rx_buffer_info->page_dma = 0;
598 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 619 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
599 rx_buffer_info->page, 0, upper_len); 620 rx_buffer_info->page,
600 rx_buffer_info->page = NULL; 621 rx_buffer_info->page_offset,
622 upper_len);
623
624 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
625 (page_count(rx_buffer_info->page) != 1))
626 rx_buffer_info->page = NULL;
627 else
628 get_page(rx_buffer_info->page);
601 629
602 skb->len += upper_len; 630 skb->len += upper_len;
603 skb->data_len += upper_len; 631 skb->data_len += upper_len;
@@ -620,6 +648,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
620 rx_buffer_info->skb = next_buffer->skb; 648 rx_buffer_info->skb = next_buffer->skb;
621 rx_buffer_info->dma = next_buffer->dma; 649 rx_buffer_info->dma = next_buffer->dma;
622 next_buffer->skb = skb; 650 next_buffer->skb = skb;
651 next_buffer->dma = 0;
623 adapter->non_eop_descs++; 652 adapter->non_eop_descs++;
624 goto next_desc; 653 goto next_desc;
625 } 654 }
@@ -635,9 +664,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
635 total_rx_bytes += skb->len; 664 total_rx_bytes += skb->len;
636 total_rx_packets++; 665 total_rx_packets++;
637 666
638 skb->protocol = eth_type_trans(skb, netdev); 667 skb->protocol = eth_type_trans(skb, adapter->netdev);
639 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); 668 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
640 netdev->last_rx = jiffies; 669 adapter->netdev->last_rx = jiffies;
641 670
642next_desc: 671next_desc:
643 rx_desc->wb.upper.status_error = 0; 672 rx_desc->wb.upper.status_error = 0;
@@ -666,9 +695,6 @@ next_desc:
666 if (cleaned_count) 695 if (cleaned_count)
667 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 696 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
668 697
669 adapter->net_stats.rx_bytes += total_rx_bytes;
670 adapter->net_stats.rx_packets += total_rx_packets;
671
672 rx_ring->total_packets += total_rx_packets; 698 rx_ring->total_packets += total_rx_packets;
673 rx_ring->total_bytes += total_rx_bytes; 699 rx_ring->total_bytes += total_rx_bytes;
674 adapter->net_stats.rx_bytes += total_rx_bytes; 700 adapter->net_stats.rx_bytes += total_rx_bytes;
@@ -700,43 +726,43 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
700 q_vector = &adapter->q_vector[v_idx]; 726 q_vector = &adapter->q_vector[v_idx];
701 /* XXX for_each_bit(...) */ 727 /* XXX for_each_bit(...) */
702 r_idx = find_first_bit(q_vector->rxr_idx, 728 r_idx = find_first_bit(q_vector->rxr_idx,
703 adapter->num_rx_queues); 729 adapter->num_rx_queues);
704 730
705 for (i = 0; i < q_vector->rxr_count; i++) { 731 for (i = 0; i < q_vector->rxr_count; i++) {
706 j = adapter->rx_ring[r_idx].reg_idx; 732 j = adapter->rx_ring[r_idx].reg_idx;
707 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); 733 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
708 r_idx = find_next_bit(q_vector->rxr_idx, 734 r_idx = find_next_bit(q_vector->rxr_idx,
709 adapter->num_rx_queues, 735 adapter->num_rx_queues,
710 r_idx + 1); 736 r_idx + 1);
711 } 737 }
712 r_idx = find_first_bit(q_vector->txr_idx, 738 r_idx = find_first_bit(q_vector->txr_idx,
713 adapter->num_tx_queues); 739 adapter->num_tx_queues);
714 740
715 for (i = 0; i < q_vector->txr_count; i++) { 741 for (i = 0; i < q_vector->txr_count; i++) {
716 j = adapter->tx_ring[r_idx].reg_idx; 742 j = adapter->tx_ring[r_idx].reg_idx;
717 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); 743 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
718 r_idx = find_next_bit(q_vector->txr_idx, 744 r_idx = find_next_bit(q_vector->txr_idx,
719 adapter->num_tx_queues, 745 adapter->num_tx_queues,
720 r_idx + 1); 746 r_idx + 1);
721 } 747 }
722 748
723 /* if this is a tx only vector use half the irq (tx) rate */ 749 /* if this is a tx only vector halve the interrupt rate */
724 if (q_vector->txr_count && !q_vector->rxr_count) 750 if (q_vector->txr_count && !q_vector->rxr_count)
725 q_vector->eitr = adapter->tx_eitr; 751 q_vector->eitr = (adapter->eitr_param >> 1);
726 else 752 else
727 /* rx only or mixed */ 753 /* rx only */
728 q_vector->eitr = adapter->rx_eitr; 754 q_vector->eitr = adapter->eitr_param;
729 755
730 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 756 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
731 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); 757 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
732 } 758 }
733 759
734 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); 760 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
735 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 761 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
736 762
737 /* set up to autoclear timer, lsc, and the vectors */ 763 /* set up to autoclear timer, and the vectors */
738 mask = IXGBE_EIMS_ENABLE_MASK; 764 mask = IXGBE_EIMS_ENABLE_MASK;
739 mask &= ~IXGBE_EIMS_OTHER; 765 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
740 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 766 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
741} 767}
742 768
@@ -766,8 +792,8 @@ enum latency_range {
766 * parameter (see ixgbe_param.c) 792 * parameter (see ixgbe_param.c)
767 **/ 793 **/
768static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 794static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
769 u32 eitr, u8 itr_setting, 795 u32 eitr, u8 itr_setting,
770 int packets, int bytes) 796 int packets, int bytes)
771{ 797{
772 unsigned int retval = itr_setting; 798 unsigned int retval = itr_setting;
773 u32 timepassed_us; 799 u32 timepassed_us;
@@ -814,40 +840,40 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
814 u32 new_itr; 840 u32 new_itr;
815 u8 current_itr, ret_itr; 841 u8 current_itr, ret_itr;
816 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / 842 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
817 sizeof(struct ixgbe_q_vector); 843 sizeof(struct ixgbe_q_vector);
818 struct ixgbe_ring *rx_ring, *tx_ring; 844 struct ixgbe_ring *rx_ring, *tx_ring;
819 845
820 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 846 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
821 for (i = 0; i < q_vector->txr_count; i++) { 847 for (i = 0; i < q_vector->txr_count; i++) {
822 tx_ring = &(adapter->tx_ring[r_idx]); 848 tx_ring = &(adapter->tx_ring[r_idx]);
823 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 849 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
824 q_vector->tx_eitr, 850 q_vector->tx_itr,
825 tx_ring->total_packets, 851 tx_ring->total_packets,
826 tx_ring->total_bytes); 852 tx_ring->total_bytes);
827 /* if the result for this queue would decrease interrupt 853 /* if the result for this queue would decrease interrupt
828 * rate for this vector then use that result */ 854 * rate for this vector then use that result */
829 q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ? 855 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
830 q_vector->tx_eitr - 1 : ret_itr); 856 q_vector->tx_itr - 1 : ret_itr);
831 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 857 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
832 r_idx + 1); 858 r_idx + 1);
833 } 859 }
834 860
835 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 861 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
836 for (i = 0; i < q_vector->rxr_count; i++) { 862 for (i = 0; i < q_vector->rxr_count; i++) {
837 rx_ring = &(adapter->rx_ring[r_idx]); 863 rx_ring = &(adapter->rx_ring[r_idx]);
838 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 864 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
839 q_vector->rx_eitr, 865 q_vector->rx_itr,
840 rx_ring->total_packets, 866 rx_ring->total_packets,
841 rx_ring->total_bytes); 867 rx_ring->total_bytes);
842 /* if the result for this queue would decrease interrupt 868 /* if the result for this queue would decrease interrupt
843 * rate for this vector then use that result */ 869 * rate for this vector then use that result */
844 q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ? 870 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
845 q_vector->rx_eitr - 1 : ret_itr); 871 q_vector->rx_itr - 1 : ret_itr);
846 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 872 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
847 r_idx + 1); 873 r_idx + 1);
848 } 874 }
849 875
850 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 876 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
851 877
852 switch (current_itr) { 878 switch (current_itr) {
853 /* counts and packets in update_itr are dependent on these numbers */ 879 /* counts and packets in update_itr are dependent on these numbers */
@@ -871,13 +897,27 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
871 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 897 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
872 /* must write high and low 16 bits to reset counter */ 898 /* must write high and low 16 bits to reset counter */
873 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, 899 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
874 itr_reg); 900 itr_reg);
875 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16); 901 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
876 } 902 }
877 903
878 return; 904 return;
879} 905}
880 906
907
908static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
909{
910 struct ixgbe_hw *hw = &adapter->hw;
911
912 adapter->lsc_int++;
913 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
914 adapter->link_check_timeout = jiffies;
915 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
916 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
917 schedule_work(&adapter->watchdog_task);
918 }
919}
920
881static irqreturn_t ixgbe_msix_lsc(int irq, void *data) 921static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
882{ 922{
883 struct net_device *netdev = data; 923 struct net_device *netdev = data;
@@ -885,11 +925,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
885 struct ixgbe_hw *hw = &adapter->hw; 925 struct ixgbe_hw *hw = &adapter->hw;
886 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 926 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
887 927
888 if (eicr & IXGBE_EICR_LSC) { 928 if (eicr & IXGBE_EICR_LSC)
889 adapter->lsc_int++; 929 ixgbe_check_lsc(adapter);
890 if (!test_bit(__IXGBE_DOWN, &adapter->state))
891 mod_timer(&adapter->watchdog_timer, jiffies);
892 }
893 930
894 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 931 if (!test_bit(__IXGBE_DOWN, &adapter->state))
895 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 932 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -901,7 +938,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
901{ 938{
902 struct ixgbe_q_vector *q_vector = data; 939 struct ixgbe_q_vector *q_vector = data;
903 struct ixgbe_adapter *adapter = q_vector->adapter; 940 struct ixgbe_adapter *adapter = q_vector->adapter;
904 struct ixgbe_ring *txr; 941 struct ixgbe_ring *tx_ring;
905 int i, r_idx; 942 int i, r_idx;
906 943
907 if (!q_vector->txr_count) 944 if (!q_vector->txr_count)
@@ -909,16 +946,16 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
909 946
910 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 947 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
911 for (i = 0; i < q_vector->txr_count; i++) { 948 for (i = 0; i < q_vector->txr_count; i++) {
912 txr = &(adapter->tx_ring[r_idx]); 949 tx_ring = &(adapter->tx_ring[r_idx]);
913#ifdef CONFIG_DCA 950#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
914 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 951 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
915 ixgbe_update_tx_dca(adapter, txr); 952 ixgbe_update_tx_dca(adapter, tx_ring);
916#endif 953#endif
917 txr->total_bytes = 0; 954 tx_ring->total_bytes = 0;
918 txr->total_packets = 0; 955 tx_ring->total_packets = 0;
919 ixgbe_clean_tx_irq(adapter, txr); 956 ixgbe_clean_tx_irq(adapter, tx_ring);
920 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 957 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
921 r_idx + 1); 958 r_idx + 1);
922 } 959 }
923 960
924 return IRQ_HANDLED; 961 return IRQ_HANDLED;
@@ -933,18 +970,26 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
933{ 970{
934 struct ixgbe_q_vector *q_vector = data; 971 struct ixgbe_q_vector *q_vector = data;
935 struct ixgbe_adapter *adapter = q_vector->adapter; 972 struct ixgbe_adapter *adapter = q_vector->adapter;
936 struct ixgbe_ring *rxr; 973 struct ixgbe_ring *rx_ring;
937 int r_idx; 974 int r_idx;
975 int i;
938 976
939 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 977 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
978 for (i = 0; i < q_vector->rxr_count; i++) {
979 rx_ring = &(adapter->rx_ring[r_idx]);
980 rx_ring->total_bytes = 0;
981 rx_ring->total_packets = 0;
982 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
983 r_idx + 1);
984 }
985
940 if (!q_vector->rxr_count) 986 if (!q_vector->rxr_count)
941 return IRQ_HANDLED; 987 return IRQ_HANDLED;
942 988
943 rxr = &(adapter->rx_ring[r_idx]); 989 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
990 rx_ring = &(adapter->rx_ring[r_idx]);
944 /* disable interrupts on this vector only */ 991 /* disable interrupts on this vector only */
945 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx); 992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
946 rxr->total_bytes = 0;
947 rxr->total_packets = 0;
948 netif_rx_schedule(adapter->netdev, &q_vector->napi); 993 netif_rx_schedule(adapter->netdev, &q_vector->napi);
949 994
950 return IRQ_HANDLED; 995 return IRQ_HANDLED;
@@ -963,39 +1008,90 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
963 * @napi: napi struct with our devices info in it 1008 * @napi: napi struct with our devices info in it
964 * @budget: amount of work driver is allowed to do this pass, in packets 1009 * @budget: amount of work driver is allowed to do this pass, in packets
965 * 1010 *
1011 * This function is optimized for cleaning one queue only on a single
1012 * q_vector!!!
966 **/ 1013 **/
967static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 1014static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
968{ 1015{
969 struct ixgbe_q_vector *q_vector = 1016 struct ixgbe_q_vector *q_vector =
970 container_of(napi, struct ixgbe_q_vector, napi); 1017 container_of(napi, struct ixgbe_q_vector, napi);
971 struct ixgbe_adapter *adapter = q_vector->adapter; 1018 struct ixgbe_adapter *adapter = q_vector->adapter;
972 struct ixgbe_ring *rxr; 1019 struct ixgbe_ring *rx_ring = NULL;
973 int work_done = 0; 1020 int work_done = 0;
974 long r_idx; 1021 long r_idx;
975 1022
976 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1023 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
977 rxr = &(adapter->rx_ring[r_idx]); 1024 rx_ring = &(adapter->rx_ring[r_idx]);
978#ifdef CONFIG_DCA 1025#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
979 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1026 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
980 ixgbe_update_rx_dca(adapter, rxr); 1027 ixgbe_update_rx_dca(adapter, rx_ring);
981#endif 1028#endif
982 1029
983 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); 1030 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
984 1031
985 /* If all Rx work done, exit the polling mode */ 1032 /* If all Rx work done, exit the polling mode */
986 if (work_done < budget) { 1033 if (work_done < budget) {
987 netif_rx_complete(adapter->netdev, napi); 1034 netif_rx_complete(adapter->netdev, napi);
988 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 1035 if (adapter->itr_setting & 3)
989 ixgbe_set_itr_msix(q_vector); 1036 ixgbe_set_itr_msix(q_vector);
990 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1037 if (!test_bit(__IXGBE_DOWN, &adapter->state))
991 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx); 1038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
992 } 1039 }
993 1040
994 return work_done; 1041 return work_done;
995} 1042}
996 1043
1044/**
1045 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
1046 * @napi: napi struct with our devices info in it
1047 * @budget: amount of work driver is allowed to do this pass, in packets
1048 *
1049 * This function will clean more than one rx queue associated with a
1050 * q_vector.
1051 **/
1052static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1053{
1054 struct ixgbe_q_vector *q_vector =
1055 container_of(napi, struct ixgbe_q_vector, napi);
1056 struct ixgbe_adapter *adapter = q_vector->adapter;
1057 struct ixgbe_ring *rx_ring = NULL;
1058 int work_done = 0, i;
1059 long r_idx;
1060 u16 enable_mask = 0;
1061
1062 /* attempt to distribute budget to each queue fairly, but don't allow
1063 * the budget to go below 1 because we'll exit polling */
1064 budget /= (q_vector->rxr_count ?: 1);
1065 budget = max(budget, 1);
1066 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1067 for (i = 0; i < q_vector->rxr_count; i++) {
1068 rx_ring = &(adapter->rx_ring[r_idx]);
1069#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
1070 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1071 ixgbe_update_rx_dca(adapter, rx_ring);
1072#endif
1073 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
1074 enable_mask |= rx_ring->v_idx;
1075 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1076 r_idx + 1);
1077 }
1078
1079 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1080 rx_ring = &(adapter->rx_ring[r_idx]);
1081 /* If all Rx work done, exit the polling mode */
1082 if (work_done < budget) {
1083 netif_rx_complete(adapter->netdev, napi);
1084 if (adapter->itr_setting & 3)
1085 ixgbe_set_itr_msix(q_vector);
1086 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1087 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
1088 return 0;
1089 }
1090
1091 return work_done;
1092}
997static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 1093static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
998 int r_idx) 1094 int r_idx)
999{ 1095{
1000 a->q_vector[v_idx].adapter = a; 1096 a->q_vector[v_idx].adapter = a;
1001 set_bit(r_idx, a->q_vector[v_idx].rxr_idx); 1097 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
@@ -1004,7 +1100,7 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1004} 1100}
1005 1101
1006static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 1102static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1007 int r_idx) 1103 int r_idx)
1008{ 1104{
1009 a->q_vector[v_idx].adapter = a; 1105 a->q_vector[v_idx].adapter = a;
1010 set_bit(r_idx, a->q_vector[v_idx].txr_idx); 1106 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
@@ -1024,7 +1120,7 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1024 * mapping configurations in here. 1120 * mapping configurations in here.
1025 **/ 1121 **/
1026static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 1122static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1027 int vectors) 1123 int vectors)
1028{ 1124{
1029 int v_start = 0; 1125 int v_start = 0;
1030 int rxr_idx = 0, txr_idx = 0; 1126 int rxr_idx = 0, txr_idx = 0;
@@ -1101,28 +1197,28 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1101 goto out; 1197 goto out;
1102 1198
1103#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 1199#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1104 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 1200 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1105 &ixgbe_msix_clean_many) 1201 &ixgbe_msix_clean_many)
1106 for (vector = 0; vector < q_vectors; vector++) { 1202 for (vector = 0; vector < q_vectors; vector++) {
1107 handler = SET_HANDLER(&adapter->q_vector[vector]); 1203 handler = SET_HANDLER(&adapter->q_vector[vector]);
1108 sprintf(adapter->name[vector], "%s:v%d-%s", 1204 sprintf(adapter->name[vector], "%s:v%d-%s",
1109 netdev->name, vector, 1205 netdev->name, vector,
1110 (handler == &ixgbe_msix_clean_rx) ? "Rx" : 1206 (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1111 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); 1207 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1112 err = request_irq(adapter->msix_entries[vector].vector, 1208 err = request_irq(adapter->msix_entries[vector].vector,
1113 handler, 0, adapter->name[vector], 1209 handler, 0, adapter->name[vector],
1114 &(adapter->q_vector[vector])); 1210 &(adapter->q_vector[vector]));
1115 if (err) { 1211 if (err) {
1116 DPRINTK(PROBE, ERR, 1212 DPRINTK(PROBE, ERR,
1117 "request_irq failed for MSIX interrupt " 1213 "request_irq failed for MSIX interrupt "
1118 "Error: %d\n", err); 1214 "Error: %d\n", err);
1119 goto free_queue_irqs; 1215 goto free_queue_irqs;
1120 } 1216 }
1121 } 1217 }
1122 1218
1123 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 1219 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1124 err = request_irq(adapter->msix_entries[vector].vector, 1220 err = request_irq(adapter->msix_entries[vector].vector,
1125 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 1221 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1126 if (err) { 1222 if (err) {
1127 DPRINTK(PROBE, ERR, 1223 DPRINTK(PROBE, ERR,
1128 "request_irq for msix_lsc failed: %d\n", err); 1224 "request_irq for msix_lsc failed: %d\n", err);
@@ -1134,7 +1230,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1134free_queue_irqs: 1230free_queue_irqs:
1135 for (i = vector - 1; i >= 0; i--) 1231 for (i = vector - 1; i >= 0; i--)
1136 free_irq(adapter->msix_entries[--vector].vector, 1232 free_irq(adapter->msix_entries[--vector].vector,
1137 &(adapter->q_vector[i])); 1233 &(adapter->q_vector[i]));
1138 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1234 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1139 pci_disable_msix(adapter->pdev); 1235 pci_disable_msix(adapter->pdev);
1140 kfree(adapter->msix_entries); 1236 kfree(adapter->msix_entries);
@@ -1152,16 +1248,16 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1152 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; 1248 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1153 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; 1249 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1154 1250
1155 q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr, 1251 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
1156 q_vector->tx_eitr, 1252 q_vector->tx_itr,
1157 tx_ring->total_packets, 1253 tx_ring->total_packets,
1158 tx_ring->total_bytes); 1254 tx_ring->total_bytes);
1159 q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr, 1255 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
1160 q_vector->rx_eitr, 1256 q_vector->rx_itr,
1161 rx_ring->total_packets, 1257 rx_ring->total_packets,
1162 rx_ring->total_bytes); 1258 rx_ring->total_bytes);
1163 1259
1164 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 1260 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1165 1261
1166 switch (current_itr) { 1262 switch (current_itr) {
1167 /* counts and packets in update_itr are dependent on these numbers */ 1263 /* counts and packets in update_itr are dependent on these numbers */
@@ -1206,19 +1302,19 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
1206 struct ixgbe_hw *hw = &adapter->hw; 1302 struct ixgbe_hw *hw = &adapter->hw;
1207 u32 eicr; 1303 u32 eicr;
1208 1304
1209
1210 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read 1305 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1211 * therefore no explict interrupt disable is necessary */ 1306 * therefore no explict interrupt disable is necessary */
1212 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1307 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1213 if (!eicr) 1308 if (!eicr) {
1309 /* shared interrupt alert!
1310 * make sure interrupts are enabled because the read will
1311 * have disabled interrupts due to EIAM */
1312 ixgbe_irq_enable(adapter);
1214 return IRQ_NONE; /* Not our interrupt */ 1313 return IRQ_NONE; /* Not our interrupt */
1215
1216 if (eicr & IXGBE_EICR_LSC) {
1217 adapter->lsc_int++;
1218 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1219 mod_timer(&adapter->watchdog_timer, jiffies);
1220 } 1314 }
1221 1315
1316 if (eicr & IXGBE_EICR_LSC)
1317 ixgbe_check_lsc(adapter);
1222 1318
1223 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) { 1319 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
1224 adapter->tx_ring[0].total_packets = 0; 1320 adapter->tx_ring[0].total_packets = 0;
@@ -1261,10 +1357,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1261 err = ixgbe_request_msix_irqs(adapter); 1357 err = ixgbe_request_msix_irqs(adapter);
1262 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1358 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1263 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, 1359 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1264 netdev->name, netdev); 1360 netdev->name, netdev);
1265 } else { 1361 } else {
1266 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, 1362 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1267 netdev->name, netdev); 1363 netdev->name, netdev);
1268 } 1364 }
1269 1365
1270 if (err) 1366 if (err)
@@ -1288,7 +1384,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1288 i--; 1384 i--;
1289 for (; i >= 0; i--) { 1385 for (; i >= 0; i--) {
1290 free_irq(adapter->msix_entries[i].vector, 1386 free_irq(adapter->msix_entries[i].vector,
1291 &(adapter->q_vector[i])); 1387 &(adapter->q_vector[i]));
1292 } 1388 }
1293 1389
1294 ixgbe_reset_q_vectors(adapter); 1390 ixgbe_reset_q_vectors(adapter);
@@ -1335,7 +1431,7 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1335 struct ixgbe_hw *hw = &adapter->hw; 1431 struct ixgbe_hw *hw = &adapter->hw;
1336 1432
1337 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1433 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1338 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); 1434 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
1339 1435
1340 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); 1436 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
1341 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0); 1437 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
@@ -1347,26 +1443,31 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1347} 1443}
1348 1444
1349/** 1445/**
1350 * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset 1446 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
1351 * @adapter: board private structure 1447 * @adapter: board private structure
1352 * 1448 *
1353 * Configure the Tx unit of the MAC after a reset. 1449 * Configure the Tx unit of the MAC after a reset.
1354 **/ 1450 **/
1355static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) 1451static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1356{ 1452{
1357 u64 tdba; 1453 u64 tdba, tdwba;
1358 struct ixgbe_hw *hw = &adapter->hw; 1454 struct ixgbe_hw *hw = &adapter->hw;
1359 u32 i, j, tdlen, txctrl; 1455 u32 i, j, tdlen, txctrl;
1360 1456
1361 /* Setup the HW Tx Head and Tail descriptor pointers */ 1457 /* Setup the HW Tx Head and Tail descriptor pointers */
1362 for (i = 0; i < adapter->num_tx_queues; i++) { 1458 for (i = 0; i < adapter->num_tx_queues; i++) {
1363 j = adapter->tx_ring[i].reg_idx; 1459 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1364 tdba = adapter->tx_ring[i].dma; 1460 j = ring->reg_idx;
1365 tdlen = adapter->tx_ring[i].count * 1461 tdba = ring->dma;
1366 sizeof(union ixgbe_adv_tx_desc); 1462 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1367 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), 1463 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
1368 (tdba & DMA_32BIT_MASK)); 1464 (tdba & DMA_32BIT_MASK));
1369 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); 1465 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1466 tdwba = ring->dma +
1467 (ring->count * sizeof(union ixgbe_adv_tx_desc));
1468 tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1469 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
1470 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
1370 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen); 1471 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1371 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); 1472 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1372 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); 1473 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
@@ -1375,20 +1476,66 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1375 /* Disable Tx Head Writeback RO bit, since this hoses 1476 /* Disable Tx Head Writeback RO bit, since this hoses
1376 * bookkeeping if things aren't delivered in order. 1477 * bookkeeping if things aren't delivered in order.
1377 */ 1478 */
1378 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); 1479 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1379 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1480 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1380 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl); 1481 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1381 } 1482 }
1382} 1483}
1383 1484
1384#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 1485#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1385 (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 1486
1487static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1488{
1489 struct ixgbe_ring *rx_ring;
1490 u32 srrctl;
1491 int queue0;
1492 unsigned long mask;
1493
1494 /* program one srrctl register per VMDq index */
1495 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
1496 long shift, len;
1497 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1498 len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
1499 shift = find_first_bit(&mask, len);
1500 queue0 = index & mask;
1501 index = (index & mask) >> shift;
1502 /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
1503 } else {
1504 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1505 queue0 = index & mask;
1506 index = index & mask;
1507 }
1508
1509 rx_ring = &adapter->rx_ring[queue0];
1510
1511 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1512
1513 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1514 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1515
1516 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1517 srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1518 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1519 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1520 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1521 IXGBE_SRRCTL_BSIZEHDR_MASK);
1522 } else {
1523 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1524
1525 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1526 srrctl |= IXGBE_RXBUFFER_2048 >>
1527 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1528 else
1529 srrctl |= rx_ring->rx_buf_len >>
1530 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1531 }
1532 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1533}
1386 1534
1387#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1388/** 1535/**
1389 * ixgbe_get_skb_hdr - helper function for LRO header processing 1536 * ixgbe_get_skb_hdr - helper function for LRO header processing
1390 * @skb: pointer to sk_buff to be added to LRO packet 1537 * @skb: pointer to sk_buff to be added to LRO packet
1391 * @iphdr: pointer to tcp header structure 1538 * @iphdr: pointer to ip header structure
1392 * @tcph: pointer to tcp header structure 1539 * @tcph: pointer to tcp header structure
1393 * @hdr_flags: pointer to header flags 1540 * @hdr_flags: pointer to header flags
1394 * @priv: private data 1541 * @priv: private data
@@ -1399,8 +1546,8 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1399 union ixgbe_adv_rx_desc *rx_desc = priv; 1546 union ixgbe_adv_rx_desc *rx_desc = priv;
1400 1547
1401 /* Verify that this is a valid IPv4 TCP packet */ 1548 /* Verify that this is a valid IPv4 TCP packet */
1402 if (!(rx_desc->wb.lower.lo_dword.pkt_info & 1549 if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
1403 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP))) 1550 (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
1404 return -1; 1551 return -1;
1405 1552
1406 /* Set network headers */ 1553 /* Set network headers */
@@ -1412,8 +1559,11 @@ static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1412 return 0; 1559 return 0;
1413} 1560}
1414 1561
1562#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1563 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1564
1415/** 1565/**
1416 * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset 1566 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1417 * @adapter: board private structure 1567 * @adapter: board private structure
1418 * 1568 *
1419 * Configure the Rx unit of the MAC after a reset. 1569 * Configure the Rx unit of the MAC after a reset.
@@ -1426,25 +1576,26 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1426 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1576 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1427 int i, j; 1577 int i, j;
1428 u32 rdlen, rxctrl, rxcsum; 1578 u32 rdlen, rxctrl, rxcsum;
1429 u32 random[10]; 1579 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1580 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1581 0x6A3E67EA, 0x14364D17, 0x3BED200D};
1430 u32 fctrl, hlreg0; 1582 u32 fctrl, hlreg0;
1431 u32 pages; 1583 u32 pages;
1432 u32 reta = 0, mrqc, srrctl; 1584 u32 reta = 0, mrqc;
1585 u32 rdrxctl;
1586 int rx_buf_len;
1433 1587
1434 /* Decide whether to use packet split mode or not */ 1588 /* Decide whether to use packet split mode or not */
1435 if (netdev->mtu > ETH_DATA_LEN) 1589 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1436 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1437 else
1438 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1439 1590
1440 /* Set the RX buffer length according to the mode */ 1591 /* Set the RX buffer length according to the mode */
1441 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1592 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1442 adapter->rx_buf_len = IXGBE_RX_HDR_SIZE; 1593 rx_buf_len = IXGBE_RX_HDR_SIZE;
1443 } else { 1594 } else {
1444 if (netdev->mtu <= ETH_DATA_LEN) 1595 if (netdev->mtu <= ETH_DATA_LEN)
1445 adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1596 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1446 else 1597 else
1447 adapter->rx_buf_len = ALIGN(max_frame, 1024); 1598 rx_buf_len = ALIGN(max_frame, 1024);
1448 } 1599 }
1449 1600
1450 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1601 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
@@ -1461,28 +1612,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1461 1612
1462 pages = PAGE_USE_COUNT(adapter->netdev->mtu); 1613 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1463 1614
1464 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
1465 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1466 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1467
1468 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1469 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1470 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1471 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1472 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1473 IXGBE_SRRCTL_BSIZEHDR_MASK);
1474 } else {
1475 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1476
1477 if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1478 srrctl |=
1479 IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1480 else
1481 srrctl |=
1482 adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1483 }
1484 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
1485
1486 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1615 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1487 /* disable receives while setting up the descriptors */ 1616 /* disable receives while setting up the descriptors */
1488 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1617 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -1492,25 +1621,43 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1492 * the Base and Length of the Rx Descriptor Ring */ 1621 * the Base and Length of the Rx Descriptor Ring */
1493 for (i = 0; i < adapter->num_rx_queues; i++) { 1622 for (i = 0; i < adapter->num_rx_queues; i++) {
1494 rdba = adapter->rx_ring[i].dma; 1623 rdba = adapter->rx_ring[i].dma;
1495 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK)); 1624 j = adapter->rx_ring[i].reg_idx;
1496 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); 1625 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
1497 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen); 1626 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
1498 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); 1627 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
1499 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); 1628 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
1500 adapter->rx_ring[i].head = IXGBE_RDH(i); 1629 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
1501 adapter->rx_ring[i].tail = IXGBE_RDT(i); 1630 adapter->rx_ring[i].head = IXGBE_RDH(j);
1502 } 1631 adapter->rx_ring[i].tail = IXGBE_RDT(j);
1503 1632 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1504 /* Intitial LRO Settings */ 1633 /* Intitial LRO Settings */
1505 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE; 1634 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1506 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS; 1635 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1507 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr; 1636 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1508 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID; 1637 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1509 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 1638 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1510 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI; 1639 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1511 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev; 1640 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1512 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; 1641 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1513 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1642 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1643
1644 ixgbe_configure_srrctl(adapter, j);
1645 }
1646
1647 /*
1648 * For VMDq support of different descriptor types or
1649 * buffer sizes through the use of multiple SRRCTL
1650 * registers, RDRXCTL.MVMEN must be set to 1
1651 *
1652 * also, the manual doesn't mention it clearly but DCA hints
1653 * will only use queue 0's tags unless this bit is set. Side
1654 * effects of setting this bit are only that SRRCTL must be
1655 * fully programmed [0..15]
1656 */
1657 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1658 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1659 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1660
1514 1661
1515 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 1662 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1516 /* Fill out redirection table */ 1663 /* Fill out redirection table */
@@ -1525,22 +1672,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1525 } 1672 }
1526 1673
1527 /* Fill out hash function seeds */ 1674 /* Fill out hash function seeds */
1528 /* XXX use a random constant here to glue certain flows */
1529 get_random_bytes(&random[0], 40);
1530 for (i = 0; i < 10; i++) 1675 for (i = 0; i < 10; i++)
1531 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]); 1676 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
1532 1677
1533 mrqc = IXGBE_MRQC_RSSEN 1678 mrqc = IXGBE_MRQC_RSSEN
1534 /* Perform hash on these packet types */ 1679 /* Perform hash on these packet types */
1535 | IXGBE_MRQC_RSS_FIELD_IPV4 1680 | IXGBE_MRQC_RSS_FIELD_IPV4
1536 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP 1681 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1537 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP 1682 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1538 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 1683 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1539 | IXGBE_MRQC_RSS_FIELD_IPV6_EX 1684 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1540 | IXGBE_MRQC_RSS_FIELD_IPV6 1685 | IXGBE_MRQC_RSS_FIELD_IPV6
1541 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP 1686 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1542 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP 1687 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1543 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 1688 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1544 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1689 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1545 } 1690 }
1546 1691
@@ -1562,7 +1707,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1562} 1707}
1563 1708
1564static void ixgbe_vlan_rx_register(struct net_device *netdev, 1709static void ixgbe_vlan_rx_register(struct net_device *netdev,
1565 struct vlan_group *grp) 1710 struct vlan_group *grp)
1566{ 1711{
1567 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1712 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1568 u32 ctrl; 1713 u32 ctrl;
@@ -1586,14 +1731,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1586static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1731static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1587{ 1732{
1588 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1733 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1734 struct ixgbe_hw *hw = &adapter->hw;
1589 1735
1590 /* add VID to filter table */ 1736 /* add VID to filter table */
1591 ixgbe_set_vfta(&adapter->hw, vid, 0, true); 1737 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
1592} 1738}
1593 1739
1594static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1740static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1595{ 1741{
1596 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1742 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1743 struct ixgbe_hw *hw = &adapter->hw;
1597 1744
1598 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1745 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1599 ixgbe_irq_disable(adapter); 1746 ixgbe_irq_disable(adapter);
@@ -1604,7 +1751,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1604 ixgbe_irq_enable(adapter); 1751 ixgbe_irq_enable(adapter);
1605 1752
1606 /* remove VID from filter table */ 1753 /* remove VID from filter table */
1607 ixgbe_set_vfta(&adapter->hw, vid, 0, false); 1754 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
1608} 1755}
1609 1756
1610static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) 1757static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
@@ -1621,23 +1768,37 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1621 } 1768 }
1622} 1769}
1623 1770
1771static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
1772{
1773 struct dev_mc_list *mc_ptr;
1774 u8 *addr = *mc_addr_ptr;
1775 *vmdq = 0;
1776
1777 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1778 if (mc_ptr->next)
1779 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1780 else
1781 *mc_addr_ptr = NULL;
1782
1783 return addr;
1784}
1785
1624/** 1786/**
1625 * ixgbe_set_multi - Multicast and Promiscuous mode set 1787 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
1626 * @netdev: network interface device structure 1788 * @netdev: network interface device structure
1627 * 1789 *
1628 * The set_multi entry point is called whenever the multicast address 1790 * The set_rx_method entry point is called whenever the unicast/multicast
1629 * list or the network interface flags are updated. This routine is 1791 * address list or the network interface flags are updated. This routine is
1630 * responsible for configuring the hardware for proper multicast, 1792 * responsible for configuring the hardware for proper unicast, multicast and
1631 * promiscuous mode, and all-multi behavior. 1793 * promiscuous mode.
1632 **/ 1794 **/
1633static void ixgbe_set_multi(struct net_device *netdev) 1795static void ixgbe_set_rx_mode(struct net_device *netdev)
1634{ 1796{
1635 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1797 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1636 struct ixgbe_hw *hw = &adapter->hw; 1798 struct ixgbe_hw *hw = &adapter->hw;
1637 struct dev_mc_list *mc_ptr;
1638 u8 *mta_list;
1639 u32 fctrl, vlnctrl; 1799 u32 fctrl, vlnctrl;
1640 int i; 1800 u8 *addr_list = NULL;
1801 int addr_count = 0;
1641 1802
1642 /* Check for Promiscuous and All Multicast modes */ 1803 /* Check for Promiscuous and All Multicast modes */
1643 1804
@@ -1645,6 +1806,7 @@ static void ixgbe_set_multi(struct net_device *netdev)
1645 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1806 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1646 1807
1647 if (netdev->flags & IFF_PROMISC) { 1808 if (netdev->flags & IFF_PROMISC) {
1809 hw->addr_ctrl.user_set_promisc = 1;
1648 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1810 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1649 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1811 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
1650 } else { 1812 } else {
@@ -1655,33 +1817,25 @@ static void ixgbe_set_multi(struct net_device *netdev)
1655 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1817 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1656 } 1818 }
1657 vlnctrl |= IXGBE_VLNCTRL_VFE; 1819 vlnctrl |= IXGBE_VLNCTRL_VFE;
1820 hw->addr_ctrl.user_set_promisc = 0;
1658 } 1821 }
1659 1822
1660 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1823 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1661 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1824 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1662 1825
1663 if (netdev->mc_count) { 1826 /* reprogram secondary unicast list */
1664 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC); 1827 addr_count = netdev->uc_count;
1665 if (!mta_list) 1828 if (addr_count)
1666 return; 1829 addr_list = netdev->uc_list->dmi_addr;
1667 1830 hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
1668 /* Shared function expects packed array of only addresses. */ 1831 ixgbe_addr_list_itr);
1669 mc_ptr = netdev->mc_list; 1832
1670 1833 /* reprogram multicast list */
1671 for (i = 0; i < netdev->mc_count; i++) { 1834 addr_count = netdev->mc_count;
1672 if (!mc_ptr) 1835 if (addr_count)
1673 break; 1836 addr_list = netdev->mc_list->dmi_addr;
1674 memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr, 1837 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
1675 ETH_ALEN); 1838 ixgbe_addr_list_itr);
1676 mc_ptr = mc_ptr->next;
1677 }
1678
1679 ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
1680 kfree(mta_list);
1681 } else {
1682 ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
1683 }
1684
1685} 1839}
1686 1840
1687static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) 1841static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -1695,10 +1849,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1695 q_vectors = 1; 1849 q_vectors = 1;
1696 1850
1697 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1851 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1852 struct napi_struct *napi;
1698 q_vector = &adapter->q_vector[q_idx]; 1853 q_vector = &adapter->q_vector[q_idx];
1699 if (!q_vector->rxr_count) 1854 if (!q_vector->rxr_count)
1700 continue; 1855 continue;
1701 napi_enable(&q_vector->napi); 1856 napi = &q_vector->napi;
1857 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
1858 (q_vector->rxr_count > 1))
1859 napi->poll = &ixgbe_clean_rxonly_many;
1860
1861 napi_enable(napi);
1702 } 1862 }
1703} 1863}
1704 1864
@@ -1725,7 +1885,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1725 struct net_device *netdev = adapter->netdev; 1885 struct net_device *netdev = adapter->netdev;
1726 int i; 1886 int i;
1727 1887
1728 ixgbe_set_multi(netdev); 1888 ixgbe_set_rx_mode(netdev);
1729 1889
1730 ixgbe_restore_vlan(adapter); 1890 ixgbe_restore_vlan(adapter);
1731 1891
@@ -1733,7 +1893,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1733 ixgbe_configure_rx(adapter); 1893 ixgbe_configure_rx(adapter);
1734 for (i = 0; i < adapter->num_rx_queues; i++) 1894 for (i = 0; i < adapter->num_rx_queues; i++)
1735 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], 1895 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1736 (adapter->rx_ring[i].count - 1)); 1896 (adapter->rx_ring[i].count - 1));
1737} 1897}
1738 1898
1739static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 1899static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -1751,7 +1911,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1751 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { 1911 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
1752 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1912 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1753 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | 1913 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1754 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); 1914 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1755 } else { 1915 } else {
1756 /* MSI only */ 1916 /* MSI only */
1757 gpie = 0; 1917 gpie = 0;
@@ -1778,6 +1938,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1778 for (i = 0; i < adapter->num_tx_queues; i++) { 1938 for (i = 0; i < adapter->num_tx_queues; i++) {
1779 j = adapter->tx_ring[i].reg_idx; 1939 j = adapter->tx_ring[i].reg_idx;
1780 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 1940 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1941 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1942 txdctl |= (8 << 16);
1781 txdctl |= IXGBE_TXDCTL_ENABLE; 1943 txdctl |= IXGBE_TXDCTL_ENABLE;
1782 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 1944 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1783 } 1945 }
@@ -1812,6 +1974,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1812 1974
1813 /* bring the link up in the watchdog, this could race with our first 1975 /* bring the link up in the watchdog, this could race with our first
1814 * link up interrupt but shouldn't be a problem */ 1976 * link up interrupt but shouldn't be a problem */
1977 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1978 adapter->link_check_timeout = jiffies;
1815 mod_timer(&adapter->watchdog_timer, jiffies); 1979 mod_timer(&adapter->watchdog_timer, jiffies);
1816 return 0; 1980 return 0;
1817} 1981}
@@ -1836,58 +2000,22 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
1836 2000
1837void ixgbe_reset(struct ixgbe_adapter *adapter) 2001void ixgbe_reset(struct ixgbe_adapter *adapter)
1838{ 2002{
1839 if (ixgbe_init_hw(&adapter->hw)) 2003 struct ixgbe_hw *hw = &adapter->hw;
1840 DPRINTK(PROBE, ERR, "Hardware Error\n"); 2004 if (hw->mac.ops.init_hw(hw))
2005 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1841 2006
1842 /* reprogram the RAR[0] in case user changed it. */ 2007 /* reprogram the RAR[0] in case user changed it. */
1843 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 2008 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
1844 2009
1845} 2010}
1846 2011
1847#ifdef CONFIG_PM
1848static int ixgbe_resume(struct pci_dev *pdev)
1849{
1850 struct net_device *netdev = pci_get_drvdata(pdev);
1851 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1852 u32 err;
1853
1854 pci_set_power_state(pdev, PCI_D0);
1855 pci_restore_state(pdev);
1856 err = pci_enable_device(pdev);
1857 if (err) {
1858 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
1859 "suspend\n");
1860 return err;
1861 }
1862 pci_set_master(pdev);
1863
1864 pci_enable_wake(pdev, PCI_D3hot, 0);
1865 pci_enable_wake(pdev, PCI_D3cold, 0);
1866
1867 if (netif_running(netdev)) {
1868 err = ixgbe_request_irq(adapter);
1869 if (err)
1870 return err;
1871 }
1872
1873 ixgbe_reset(adapter);
1874
1875 if (netif_running(netdev))
1876 ixgbe_up(adapter);
1877
1878 netif_device_attach(netdev);
1879
1880 return 0;
1881}
1882#endif
1883
1884/** 2012/**
1885 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 2013 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1886 * @adapter: board private structure 2014 * @adapter: board private structure
1887 * @rx_ring: ring to free buffers from 2015 * @rx_ring: ring to free buffers from
1888 **/ 2016 **/
1889static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 2017static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1890 struct ixgbe_ring *rx_ring) 2018 struct ixgbe_ring *rx_ring)
1891{ 2019{
1892 struct pci_dev *pdev = adapter->pdev; 2020 struct pci_dev *pdev = adapter->pdev;
1893 unsigned long size; 2021 unsigned long size;
@@ -1901,8 +2029,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1901 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 2029 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1902 if (rx_buffer_info->dma) { 2030 if (rx_buffer_info->dma) {
1903 pci_unmap_single(pdev, rx_buffer_info->dma, 2031 pci_unmap_single(pdev, rx_buffer_info->dma,
1904 adapter->rx_buf_len, 2032 rx_ring->rx_buf_len,
1905 PCI_DMA_FROMDEVICE); 2033 PCI_DMA_FROMDEVICE);
1906 rx_buffer_info->dma = 0; 2034 rx_buffer_info->dma = 0;
1907 } 2035 }
1908 if (rx_buffer_info->skb) { 2036 if (rx_buffer_info->skb) {
@@ -1911,12 +2039,12 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1911 } 2039 }
1912 if (!rx_buffer_info->page) 2040 if (!rx_buffer_info->page)
1913 continue; 2041 continue;
1914 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, 2042 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
1915 PCI_DMA_FROMDEVICE); 2043 PCI_DMA_FROMDEVICE);
1916 rx_buffer_info->page_dma = 0; 2044 rx_buffer_info->page_dma = 0;
1917
1918 put_page(rx_buffer_info->page); 2045 put_page(rx_buffer_info->page);
1919 rx_buffer_info->page = NULL; 2046 rx_buffer_info->page = NULL;
2047 rx_buffer_info->page_offset = 0;
1920 } 2048 }
1921 2049
1922 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 2050 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
@@ -1938,7 +2066,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1938 * @tx_ring: ring to be cleaned 2066 * @tx_ring: ring to be cleaned
1939 **/ 2067 **/
1940static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 2068static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
1941 struct ixgbe_ring *tx_ring) 2069 struct ixgbe_ring *tx_ring)
1942{ 2070{
1943 struct ixgbe_tx_buffer *tx_buffer_info; 2071 struct ixgbe_tx_buffer *tx_buffer_info;
1944 unsigned long size; 2072 unsigned long size;
@@ -1991,75 +2119,64 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
1991void ixgbe_down(struct ixgbe_adapter *adapter) 2119void ixgbe_down(struct ixgbe_adapter *adapter)
1992{ 2120{
1993 struct net_device *netdev = adapter->netdev; 2121 struct net_device *netdev = adapter->netdev;
2122 struct ixgbe_hw *hw = &adapter->hw;
1994 u32 rxctrl; 2123 u32 rxctrl;
2124 u32 txdctl;
2125 int i, j;
1995 2126
1996 /* signal that we are down to the interrupt handler */ 2127 /* signal that we are down to the interrupt handler */
1997 set_bit(__IXGBE_DOWN, &adapter->state); 2128 set_bit(__IXGBE_DOWN, &adapter->state);
1998 2129
1999 /* disable receives */ 2130 /* disable receives */
2000 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 2131 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2001 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, 2132 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2002 rxctrl & ~IXGBE_RXCTRL_RXEN);
2003 2133
2004 netif_tx_disable(netdev); 2134 netif_tx_disable(netdev);
2005 2135
2006 /* disable transmits in the hardware */ 2136 IXGBE_WRITE_FLUSH(hw);
2007
2008 /* flush both disables */
2009 IXGBE_WRITE_FLUSH(&adapter->hw);
2010 msleep(10); 2137 msleep(10);
2011 2138
2139 netif_tx_stop_all_queues(netdev);
2140
2012 ixgbe_irq_disable(adapter); 2141 ixgbe_irq_disable(adapter);
2013 2142
2014 ixgbe_napi_disable_all(adapter); 2143 ixgbe_napi_disable_all(adapter);
2144
2015 del_timer_sync(&adapter->watchdog_timer); 2145 del_timer_sync(&adapter->watchdog_timer);
2146 cancel_work_sync(&adapter->watchdog_task);
2147
2148 /* disable transmits in the hardware now that interrupts are off */
2149 for (i = 0; i < adapter->num_tx_queues; i++) {
2150 j = adapter->tx_ring[i].reg_idx;
2151 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2152 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
2153 (txdctl & ~IXGBE_TXDCTL_ENABLE));
2154 }
2016 2155
2017 netif_carrier_off(netdev); 2156 netif_carrier_off(netdev);
2018 netif_tx_stop_all_queues(netdev);
2019 2157
2158#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2159 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2160 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
2161 dca_remove_requester(&adapter->pdev->dev);
2162 }
2163
2164#endif
2020 if (!pci_channel_offline(adapter->pdev)) 2165 if (!pci_channel_offline(adapter->pdev))
2021 ixgbe_reset(adapter); 2166 ixgbe_reset(adapter);
2022 ixgbe_clean_all_tx_rings(adapter); 2167 ixgbe_clean_all_tx_rings(adapter);
2023 ixgbe_clean_all_rx_rings(adapter); 2168 ixgbe_clean_all_rx_rings(adapter);
2024 2169
2025} 2170#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2026 2171 /* since we reset the hardware DCA settings were cleared */
2027static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 2172 if (dca_add_requester(&adapter->pdev->dev) == 0) {
2028{ 2173 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
2029 struct net_device *netdev = pci_get_drvdata(pdev); 2174 /* always use CB2 mode, difference is masked
2030 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2175 * in the CB driver */
2031#ifdef CONFIG_PM 2176 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
2032 int retval = 0; 2177 ixgbe_setup_dca(adapter);
2033#endif
2034
2035 netif_device_detach(netdev);
2036
2037 if (netif_running(netdev)) {
2038 ixgbe_down(adapter);
2039 ixgbe_free_irq(adapter);
2040 } 2178 }
2041
2042#ifdef CONFIG_PM
2043 retval = pci_save_state(pdev);
2044 if (retval)
2045 return retval;
2046#endif 2179#endif
2047
2048 pci_enable_wake(pdev, PCI_D3hot, 0);
2049 pci_enable_wake(pdev, PCI_D3cold, 0);
2050
2051 ixgbe_release_hw_control(adapter);
2052
2053 pci_disable_device(pdev);
2054
2055 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2056
2057 return 0;
2058}
2059
2060static void ixgbe_shutdown(struct pci_dev *pdev)
2061{
2062 ixgbe_suspend(pdev, PMSG_SUSPEND);
2063} 2180}
2064 2181
2065/** 2182/**
@@ -2072,11 +2189,11 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
2072static int ixgbe_poll(struct napi_struct *napi, int budget) 2189static int ixgbe_poll(struct napi_struct *napi, int budget)
2073{ 2190{
2074 struct ixgbe_q_vector *q_vector = container_of(napi, 2191 struct ixgbe_q_vector *q_vector = container_of(napi,
2075 struct ixgbe_q_vector, napi); 2192 struct ixgbe_q_vector, napi);
2076 struct ixgbe_adapter *adapter = q_vector->adapter; 2193 struct ixgbe_adapter *adapter = q_vector->adapter;
2077 int tx_cleaned = 0, work_done = 0; 2194 int tx_cleaned, work_done = 0;
2078 2195
2079#ifdef CONFIG_DCA 2196#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
2080 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 2197 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2081 ixgbe_update_tx_dca(adapter, adapter->tx_ring); 2198 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2082 ixgbe_update_rx_dca(adapter, adapter->rx_ring); 2199 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
@@ -2092,12 +2209,11 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2092 /* If budget not fully consumed, exit the polling mode */ 2209 /* If budget not fully consumed, exit the polling mode */
2093 if (work_done < budget) { 2210 if (work_done < budget) {
2094 netif_rx_complete(adapter->netdev, napi); 2211 netif_rx_complete(adapter->netdev, napi);
2095 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 2212 if (adapter->itr_setting & 3)
2096 ixgbe_set_itr(adapter); 2213 ixgbe_set_itr(adapter);
2097 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2214 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2098 ixgbe_irq_enable(adapter); 2215 ixgbe_irq_enable(adapter);
2099 } 2216 }
2100
2101 return work_done; 2217 return work_done;
2102} 2218}
2103 2219
@@ -2123,8 +2239,48 @@ static void ixgbe_reset_task(struct work_struct *work)
2123 ixgbe_reinit_locked(adapter); 2239 ixgbe_reinit_locked(adapter);
2124} 2240}
2125 2241
2242static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2243{
2244 int nrq = 1, ntq = 1;
2245 int feature_mask = 0, rss_i, rss_m;
2246
2247 /* Number of supported queues */
2248 switch (adapter->hw.mac.type) {
2249 case ixgbe_mac_82598EB:
2250 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2251 rss_m = 0;
2252 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2253
2254 switch (adapter->flags & feature_mask) {
2255 case (IXGBE_FLAG_RSS_ENABLED):
2256 rss_m = 0xF;
2257 nrq = rss_i;
2258 ntq = rss_i;
2259 break;
2260 case 0:
2261 default:
2262 rss_i = 0;
2263 rss_m = 0;
2264 nrq = 1;
2265 ntq = 1;
2266 break;
2267 }
2268
2269 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2270 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2271 break;
2272 default:
2273 nrq = 1;
2274 ntq = 1;
2275 break;
2276 }
2277
2278 adapter->num_rx_queues = nrq;
2279 adapter->num_tx_queues = ntq;
2280}
2281
2126static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2282static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2127 int vectors) 2283 int vectors)
2128{ 2284{
2129 int err, vector_threshold; 2285 int err, vector_threshold;
2130 2286
@@ -2143,7 +2299,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2143 */ 2299 */
2144 while (vectors >= vector_threshold) { 2300 while (vectors >= vector_threshold) {
2145 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2301 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2146 vectors); 2302 vectors);
2147 if (!err) /* Success in acquiring all requested vectors. */ 2303 if (!err) /* Success in acquiring all requested vectors. */
2148 break; 2304 break;
2149 else if (err < 0) 2305 else if (err < 0)
@@ -2162,54 +2318,13 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2162 kfree(adapter->msix_entries); 2318 kfree(adapter->msix_entries);
2163 adapter->msix_entries = NULL; 2319 adapter->msix_entries = NULL;
2164 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2320 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2165 adapter->num_tx_queues = 1; 2321 ixgbe_set_num_queues(adapter);
2166 adapter->num_rx_queues = 1;
2167 } else { 2322 } else {
2168 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 2323 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2169 adapter->num_msix_vectors = vectors; 2324 adapter->num_msix_vectors = vectors;
2170 } 2325 }
2171} 2326}
2172 2327
2173static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2174{
2175 int nrq, ntq;
2176 int feature_mask = 0, rss_i, rss_m;
2177
2178 /* Number of supported queues */
2179 switch (adapter->hw.mac.type) {
2180 case ixgbe_mac_82598EB:
2181 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2182 rss_m = 0;
2183 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2184
2185 switch (adapter->flags & feature_mask) {
2186 case (IXGBE_FLAG_RSS_ENABLED):
2187 rss_m = 0xF;
2188 nrq = rss_i;
2189 ntq = rss_i;
2190 break;
2191 case 0:
2192 default:
2193 rss_i = 0;
2194 rss_m = 0;
2195 nrq = 1;
2196 ntq = 1;
2197 break;
2198 }
2199
2200 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2201 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2202 break;
2203 default:
2204 nrq = 1;
2205 ntq = 1;
2206 break;
2207 }
2208
2209 adapter->num_rx_queues = nrq;
2210 adapter->num_tx_queues = ntq;
2211}
2212
2213/** 2328/**
2214 * ixgbe_cache_ring_register - Descriptor ring to register mapping 2329 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2215 * @adapter: board private structure to initialize 2330 * @adapter: board private structure to initialize
@@ -2219,9 +2334,6 @@ static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2219 **/ 2334 **/
2220static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2335static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2221{ 2336{
2222 /* TODO: Remove all uses of the indices in the cases where multiple
2223 * features are OR'd together, if the feature set makes sense.
2224 */
2225 int feature_mask = 0, rss_i; 2337 int feature_mask = 0, rss_i;
2226 int i, txr_idx, rxr_idx; 2338 int i, txr_idx, rxr_idx;
2227 2339
@@ -2262,21 +2374,22 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2262 int i; 2374 int i;
2263 2375
2264 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 2376 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2265 sizeof(struct ixgbe_ring), GFP_KERNEL); 2377 sizeof(struct ixgbe_ring), GFP_KERNEL);
2266 if (!adapter->tx_ring) 2378 if (!adapter->tx_ring)
2267 goto err_tx_ring_allocation; 2379 goto err_tx_ring_allocation;
2268 2380
2269 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 2381 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2270 sizeof(struct ixgbe_ring), GFP_KERNEL); 2382 sizeof(struct ixgbe_ring), GFP_KERNEL);
2271 if (!adapter->rx_ring) 2383 if (!adapter->rx_ring)
2272 goto err_rx_ring_allocation; 2384 goto err_rx_ring_allocation;
2273 2385
2274 for (i = 0; i < adapter->num_tx_queues; i++) { 2386 for (i = 0; i < adapter->num_tx_queues; i++) {
2275 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; 2387 adapter->tx_ring[i].count = adapter->tx_ring_count;
2276 adapter->tx_ring[i].queue_index = i; 2388 adapter->tx_ring[i].queue_index = i;
2277 } 2389 }
2390
2278 for (i = 0; i < adapter->num_rx_queues; i++) { 2391 for (i = 0; i < adapter->num_rx_queues; i++) {
2279 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; 2392 adapter->rx_ring[i].count = adapter->rx_ring_count;
2280 adapter->rx_ring[i].queue_index = i; 2393 adapter->rx_ring[i].queue_index = i;
2281 } 2394 }
2282 2395
@@ -2298,25 +2411,19 @@ err_tx_ring_allocation:
2298 * capabilities of the hardware and the kernel. 2411 * capabilities of the hardware and the kernel.
2299 **/ 2412 **/
2300static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter 2413static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2301 *adapter) 2414 *adapter)
2302{ 2415{
2303 int err = 0; 2416 int err = 0;
2304 int vector, v_budget; 2417 int vector, v_budget;
2305 2418
2306 /* 2419 /*
2307 * Set the default interrupt throttle rate.
2308 */
2309 adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
2310 adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
2311
2312 /*
2313 * It's easy to be greedy for MSI-X vectors, but it really 2420 * It's easy to be greedy for MSI-X vectors, but it really
2314 * doesn't do us much good if we have a lot more vectors 2421 * doesn't do us much good if we have a lot more vectors
2315 * than CPU's. So let's be conservative and only ask for 2422 * than CPU's. So let's be conservative and only ask for
2316 * (roughly) twice the number of vectors as there are CPU's. 2423 * (roughly) twice the number of vectors as there are CPU's.
2317 */ 2424 */
2318 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 2425 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2319 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 2426 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2320 2427
2321 /* 2428 /*
2322 * At the same time, hardware can only support a maximum of 2429 * At the same time, hardware can only support a maximum of
@@ -2330,7 +2437,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2330 /* A failure in MSI-X entry allocation isn't fatal, but it does 2437 /* A failure in MSI-X entry allocation isn't fatal, but it does
2331 * mean we disable MSI-X capabilities of the adapter. */ 2438 * mean we disable MSI-X capabilities of the adapter. */
2332 adapter->msix_entries = kcalloc(v_budget, 2439 adapter->msix_entries = kcalloc(v_budget,
2333 sizeof(struct msix_entry), GFP_KERNEL); 2440 sizeof(struct msix_entry), GFP_KERNEL);
2334 if (!adapter->msix_entries) { 2441 if (!adapter->msix_entries) {
2335 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2442 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2336 ixgbe_set_num_queues(adapter); 2443 ixgbe_set_num_queues(adapter);
@@ -2339,7 +2446,7 @@ static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2339 err = ixgbe_alloc_queues(adapter); 2446 err = ixgbe_alloc_queues(adapter);
2340 if (err) { 2447 if (err) {
2341 DPRINTK(PROBE, ERR, "Unable to allocate memory " 2448 DPRINTK(PROBE, ERR, "Unable to allocate memory "
2342 "for queues\n"); 2449 "for queues\n");
2343 goto out; 2450 goto out;
2344 } 2451 }
2345 2452
@@ -2360,7 +2467,7 @@ try_msi:
2360 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 2467 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2361 } else { 2468 } else {
2362 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " 2469 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2363 "falling back to legacy. Error: %d\n", err); 2470 "falling back to legacy. Error: %d\n", err);
2364 /* reset err */ 2471 /* reset err */
2365 err = 0; 2472 err = 0;
2366 } 2473 }
@@ -2416,9 +2523,9 @@ static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2416 } 2523 }
2417 2524
2418 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " 2525 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2419 "Tx Queue count = %u\n", 2526 "Tx Queue count = %u\n",
2420 (adapter->num_rx_queues > 1) ? "Enabled" : 2527 (adapter->num_rx_queues > 1) ? "Enabled" :
2421 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2528 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2422 2529
2423 set_bit(__IXGBE_DOWN, &adapter->state); 2530 set_bit(__IXGBE_DOWN, &adapter->state);
2424 2531
@@ -2445,33 +2552,44 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2445 struct pci_dev *pdev = adapter->pdev; 2552 struct pci_dev *pdev = adapter->pdev;
2446 unsigned int rss; 2553 unsigned int rss;
2447 2554
2555 /* PCI config space info */
2556
2557 hw->vendor_id = pdev->vendor;
2558 hw->device_id = pdev->device;
2559 hw->revision_id = pdev->revision;
2560 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2561 hw->subsystem_device_id = pdev->subsystem_device;
2562
2448 /* Set capability flags */ 2563 /* Set capability flags */
2449 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 2564 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2450 adapter->ring_feature[RING_F_RSS].indices = rss; 2565 adapter->ring_feature[RING_F_RSS].indices = rss;
2451 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 2566 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2452 2567
2453 /* Enable Dynamic interrupt throttling by default */
2454 adapter->rx_eitr = 1;
2455 adapter->tx_eitr = 1;
2456
2457 /* default flow control settings */ 2568 /* default flow control settings */
2458 hw->fc.original_type = ixgbe_fc_full; 2569 hw->fc.original_type = ixgbe_fc_none;
2459 hw->fc.type = ixgbe_fc_full; 2570 hw->fc.type = ixgbe_fc_none;
2571 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
2572 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2573 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2574 hw->fc.send_xon = true;
2460 2575
2461 /* select 10G link by default */ 2576 /* select 10G link by default */
2462 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 2577 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2463 if (hw->mac.ops.reset(hw)) { 2578
2464 dev_err(&pdev->dev, "HW Init failed\n"); 2579 /* enable itr by default in dynamic mode */
2465 return -EIO; 2580 adapter->itr_setting = 1;
2466 } 2581 adapter->eitr_param = 20000;
2467 if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true, 2582
2468 false)) { 2583 /* set defaults for eitr in MegaBytes */
2469 dev_err(&pdev->dev, "Link Speed setup failed\n"); 2584 adapter->eitr_low = 10;
2470 return -EIO; 2585 adapter->eitr_high = 20;
2471 } 2586
2587 /* set default ring sizes */
2588 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
2589 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
2472 2590
2473 /* initialize eeprom parameters */ 2591 /* initialize eeprom parameters */
2474 if (ixgbe_init_eeprom(hw)) { 2592 if (ixgbe_init_eeprom_params_generic(hw)) {
2475 dev_err(&pdev->dev, "EEPROM initialization failed\n"); 2593 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2476 return -EIO; 2594 return -EIO;
2477 } 2595 }
@@ -2487,105 +2605,157 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2487/** 2605/**
2488 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 2606 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2489 * @adapter: board private structure 2607 * @adapter: board private structure
2490 * @txdr: tx descriptor ring (for a specific queue) to setup 2608 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2491 * 2609 *
2492 * Return 0 on success, negative on failure 2610 * Return 0 on success, negative on failure
2493 **/ 2611 **/
2494int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 2612int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2495 struct ixgbe_ring *txdr) 2613 struct ixgbe_ring *tx_ring)
2496{ 2614{
2497 struct pci_dev *pdev = adapter->pdev; 2615 struct pci_dev *pdev = adapter->pdev;
2498 int size; 2616 int size;
2499 2617
2500 size = sizeof(struct ixgbe_tx_buffer) * txdr->count; 2618 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2501 txdr->tx_buffer_info = vmalloc(size); 2619 tx_ring->tx_buffer_info = vmalloc(size);
2502 if (!txdr->tx_buffer_info) { 2620 if (!tx_ring->tx_buffer_info)
2503 DPRINTK(PROBE, ERR, 2621 goto err;
2504 "Unable to allocate memory for the transmit descriptor ring\n"); 2622 memset(tx_ring->tx_buffer_info, 0, size);
2505 return -ENOMEM;
2506 }
2507 memset(txdr->tx_buffer_info, 0, size);
2508 2623
2509 /* round up to nearest 4K */ 2624 /* round up to nearest 4K */
2510 txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc); 2625 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
2511 txdr->size = ALIGN(txdr->size, 4096); 2626 sizeof(u32);
2512 2627 tx_ring->size = ALIGN(tx_ring->size, 4096);
2513 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2514 if (!txdr->desc) {
2515 vfree(txdr->tx_buffer_info);
2516 DPRINTK(PROBE, ERR,
2517 "Memory allocation failed for the tx desc ring\n");
2518 return -ENOMEM;
2519 }
2520 2628
2521 txdr->next_to_use = 0; 2629 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2522 txdr->next_to_clean = 0; 2630 &tx_ring->dma);
2523 txdr->work_limit = txdr->count; 2631 if (!tx_ring->desc)
2632 goto err;
2524 2633
2634 tx_ring->next_to_use = 0;
2635 tx_ring->next_to_clean = 0;
2636 tx_ring->work_limit = tx_ring->count;
2525 return 0; 2637 return 0;
2638
2639err:
2640 vfree(tx_ring->tx_buffer_info);
2641 tx_ring->tx_buffer_info = NULL;
2642 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
2643 "descriptor ring\n");
2644 return -ENOMEM;
2645}
2646
2647/**
2648 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2649 * @adapter: board private structure
2650 *
2651 * If this function returns with an error, then it's possible one or
2652 * more of the rings is populated (while the rest are not). It is the
2653 * callers duty to clean those orphaned rings.
2654 *
2655 * Return 0 on success, negative on failure
2656 **/
2657static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2658{
2659 int i, err = 0;
2660
2661 for (i = 0; i < adapter->num_tx_queues; i++) {
2662 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2663 if (!err)
2664 continue;
2665 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
2666 break;
2667 }
2668
2669 return err;
2526} 2670}
2527 2671
2528/** 2672/**
2529 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 2673 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2530 * @adapter: board private structure 2674 * @adapter: board private structure
2531 * @rxdr: rx descriptor ring (for a specific queue) to setup 2675 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2532 * 2676 *
2533 * Returns 0 on success, negative on failure 2677 * Returns 0 on success, negative on failure
2534 **/ 2678 **/
2535int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 2679int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2536 struct ixgbe_ring *rxdr) 2680 struct ixgbe_ring *rx_ring)
2537{ 2681{
2538 struct pci_dev *pdev = adapter->pdev; 2682 struct pci_dev *pdev = adapter->pdev;
2539 int size; 2683 int size;
2540 2684
2541 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS; 2685 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2542 rxdr->lro_mgr.lro_arr = vmalloc(size); 2686 rx_ring->lro_mgr.lro_arr = vmalloc(size);
2543 if (!rxdr->lro_mgr.lro_arr) 2687 if (!rx_ring->lro_mgr.lro_arr)
2544 return -ENOMEM; 2688 return -ENOMEM;
2545 memset(rxdr->lro_mgr.lro_arr, 0, size); 2689 memset(rx_ring->lro_mgr.lro_arr, 0, size);
2546 2690
2547 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; 2691 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2548 rxdr->rx_buffer_info = vmalloc(size); 2692 rx_ring->rx_buffer_info = vmalloc(size);
2549 if (!rxdr->rx_buffer_info) { 2693 if (!rx_ring->rx_buffer_info) {
2550 DPRINTK(PROBE, ERR, 2694 DPRINTK(PROBE, ERR,
2551 "vmalloc allocation failed for the rx desc ring\n"); 2695 "vmalloc allocation failed for the rx desc ring\n");
2552 goto alloc_failed; 2696 goto alloc_failed;
2553 } 2697 }
2554 memset(rxdr->rx_buffer_info, 0, size); 2698 memset(rx_ring->rx_buffer_info, 0, size);
2555 2699
2556 /* Round up to nearest 4K */ 2700 /* Round up to nearest 4K */
2557 rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc); 2701 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2558 rxdr->size = ALIGN(rxdr->size, 4096); 2702 rx_ring->size = ALIGN(rx_ring->size, 4096);
2559 2703
2560 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 2704 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
2561 2705
2562 if (!rxdr->desc) { 2706 if (!rx_ring->desc) {
2563 DPRINTK(PROBE, ERR, 2707 DPRINTK(PROBE, ERR,
2564 "Memory allocation failed for the rx desc ring\n"); 2708 "Memory allocation failed for the rx desc ring\n");
2565 vfree(rxdr->rx_buffer_info); 2709 vfree(rx_ring->rx_buffer_info);
2566 goto alloc_failed; 2710 goto alloc_failed;
2567 } 2711 }
2568 2712
2569 rxdr->next_to_clean = 0; 2713 rx_ring->next_to_clean = 0;
2570 rxdr->next_to_use = 0; 2714 rx_ring->next_to_use = 0;
2571 2715
2572 return 0; 2716 return 0;
2573 2717
2574alloc_failed: 2718alloc_failed:
2575 vfree(rxdr->lro_mgr.lro_arr); 2719 vfree(rx_ring->lro_mgr.lro_arr);
2576 rxdr->lro_mgr.lro_arr = NULL; 2720 rx_ring->lro_mgr.lro_arr = NULL;
2577 return -ENOMEM; 2721 return -ENOMEM;
2578} 2722}
2579 2723
2580/** 2724/**
2725 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2726 * @adapter: board private structure
2727 *
2728 * If this function returns with an error, then it's possible one or
2729 * more of the rings is populated (while the rest are not). It is the
2730 * callers duty to clean those orphaned rings.
2731 *
2732 * Return 0 on success, negative on failure
2733 **/
2734
2735static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2736{
2737 int i, err = 0;
2738
2739 for (i = 0; i < adapter->num_rx_queues; i++) {
2740 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2741 if (!err)
2742 continue;
2743 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
2744 break;
2745 }
2746
2747 return err;
2748}
2749
2750/**
2581 * ixgbe_free_tx_resources - Free Tx Resources per Queue 2751 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2582 * @adapter: board private structure 2752 * @adapter: board private structure
2583 * @tx_ring: Tx descriptor ring for a specific queue 2753 * @tx_ring: Tx descriptor ring for a specific queue
2584 * 2754 *
2585 * Free all transmit software resources 2755 * Free all transmit software resources
2586 **/ 2756 **/
2587static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 2757void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
2588 struct ixgbe_ring *tx_ring) 2758 struct ixgbe_ring *tx_ring)
2589{ 2759{
2590 struct pci_dev *pdev = adapter->pdev; 2760 struct pci_dev *pdev = adapter->pdev;
2591 2761
@@ -2620,8 +2790,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
2620 * 2790 *
2621 * Free all receive software resources 2791 * Free all receive software resources
2622 **/ 2792 **/
2623static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 2793void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
2624 struct ixgbe_ring *rx_ring) 2794 struct ixgbe_ring *rx_ring)
2625{ 2795{
2626 struct pci_dev *pdev = adapter->pdev; 2796 struct pci_dev *pdev = adapter->pdev;
2627 2797
@@ -2653,59 +2823,6 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
2653} 2823}
2654 2824
2655/** 2825/**
2656 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2657 * @adapter: board private structure
2658 *
2659 * If this function returns with an error, then it's possible one or
2660 * more of the rings is populated (while the rest are not). It is the
2661 * callers duty to clean those orphaned rings.
2662 *
2663 * Return 0 on success, negative on failure
2664 **/
2665static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2666{
2667 int i, err = 0;
2668
2669 for (i = 0; i < adapter->num_tx_queues; i++) {
2670 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2671 if (err) {
2672 DPRINTK(PROBE, ERR,
2673 "Allocation for Tx Queue %u failed\n", i);
2674 break;
2675 }
2676 }
2677
2678 return err;
2679}
2680
2681/**
2682 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2683 * @adapter: board private structure
2684 *
2685 * If this function returns with an error, then it's possible one or
2686 * more of the rings is populated (while the rest are not). It is the
2687 * callers duty to clean those orphaned rings.
2688 *
2689 * Return 0 on success, negative on failure
2690 **/
2691
2692static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2693{
2694 int i, err = 0;
2695
2696 for (i = 0; i < adapter->num_rx_queues; i++) {
2697 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2698 if (err) {
2699 DPRINTK(PROBE, ERR,
2700 "Allocation for Rx Queue %u failed\n", i);
2701 break;
2702 }
2703 }
2704
2705 return err;
2706}
2707
2708/**
2709 * ixgbe_change_mtu - Change the Maximum Transfer Unit 2826 * ixgbe_change_mtu - Change the Maximum Transfer Unit
2710 * @netdev: network interface device structure 2827 * @netdev: network interface device structure
2711 * @new_mtu: new value for maximum frame size 2828 * @new_mtu: new value for maximum frame size
@@ -2717,12 +2834,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
2717 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2718 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2835 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2719 2836
2720 if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) || 2837 /* MTU < 68 is an error and causes problems on some kernels */
2721 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 2838 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
2722 return -EINVAL; 2839 return -EINVAL;
2723 2840
2724 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", 2841 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2725 netdev->mtu, new_mtu); 2842 netdev->mtu, new_mtu);
2726 /* must set new MTU before calling down or up */ 2843 /* must set new MTU before calling down or up */
2727 netdev->mtu = new_mtu; 2844 netdev->mtu = new_mtu;
2728 2845
@@ -2817,6 +2934,135 @@ static int ixgbe_close(struct net_device *netdev)
2817} 2934}
2818 2935
2819/** 2936/**
2937 * ixgbe_napi_add_all - prep napi structs for use
2938 * @adapter: private struct
2939 * helper function to napi_add each possible q_vector->napi
2940 */
2941static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2942{
2943 int q_idx, q_vectors;
2944 int (*poll)(struct napi_struct *, int);
2945
2946 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2947 poll = &ixgbe_clean_rxonly;
2948 /* Only enable as many vectors as we have rx queues. */
2949 q_vectors = adapter->num_rx_queues;
2950 } else {
2951 poll = &ixgbe_poll;
2952 /* only one q_vector for legacy modes */
2953 q_vectors = 1;
2954 }
2955
2956 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2957 struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
2958 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
2959 }
2960}
2961
2962static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
2963{
2964 int q_idx;
2965 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2966
2967 /* legacy and MSI only use one vector */
2968 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2969 q_vectors = 1;
2970
2971 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2972 struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
2973 if (!q_vector->rxr_count)
2974 continue;
2975 netif_napi_del(&q_vector->napi);
2976 }
2977}
2978
2979#ifdef CONFIG_PM
2980static int ixgbe_resume(struct pci_dev *pdev)
2981{
2982 struct net_device *netdev = pci_get_drvdata(pdev);
2983 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2984 u32 err;
2985
2986 pci_set_power_state(pdev, PCI_D0);
2987 pci_restore_state(pdev);
2988 err = pci_enable_device(pdev);
2989 if (err) {
2990 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
2991 "suspend\n");
2992 return err;
2993 }
2994 pci_set_master(pdev);
2995
2996 pci_enable_wake(pdev, PCI_D3hot, 0);
2997 pci_enable_wake(pdev, PCI_D3cold, 0);
2998
2999 err = ixgbe_init_interrupt_scheme(adapter);
3000 if (err) {
3001 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
3002 "device\n");
3003 return err;
3004 }
3005
3006 ixgbe_napi_add_all(adapter);
3007 ixgbe_reset(adapter);
3008
3009 if (netif_running(netdev)) {
3010 err = ixgbe_open(adapter->netdev);
3011 if (err)
3012 return err;
3013 }
3014
3015 netif_device_attach(netdev);
3016
3017 return 0;
3018}
3019
3020#endif /* CONFIG_PM */
3021static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3022{
3023 struct net_device *netdev = pci_get_drvdata(pdev);
3024 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3025#ifdef CONFIG_PM
3026 int retval = 0;
3027#endif
3028
3029 netif_device_detach(netdev);
3030
3031 if (netif_running(netdev)) {
3032 ixgbe_down(adapter);
3033 ixgbe_free_irq(adapter);
3034 ixgbe_free_all_tx_resources(adapter);
3035 ixgbe_free_all_rx_resources(adapter);
3036 }
3037 ixgbe_reset_interrupt_capability(adapter);
3038 ixgbe_napi_del_all(adapter);
3039 kfree(adapter->tx_ring);
3040 kfree(adapter->rx_ring);
3041
3042#ifdef CONFIG_PM
3043 retval = pci_save_state(pdev);
3044 if (retval)
3045 return retval;
3046#endif
3047
3048 pci_enable_wake(pdev, PCI_D3hot, 0);
3049 pci_enable_wake(pdev, PCI_D3cold, 0);
3050
3051 ixgbe_release_hw_control(adapter);
3052
3053 pci_disable_device(pdev);
3054
3055 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3056
3057 return 0;
3058}
3059
3060static void ixgbe_shutdown(struct pci_dev *pdev)
3061{
3062 ixgbe_suspend(pdev, PMSG_SUSPEND);
3063}
3064
3065/**
2820 * ixgbe_update_stats - Update the board statistics counters. 3066 * ixgbe_update_stats - Update the board statistics counters.
2821 * @adapter: board private structure 3067 * @adapter: board private structure
2822 **/ 3068 **/
@@ -2889,7 +3135,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2889 3135
2890 /* Rx Errors */ 3136 /* Rx Errors */
2891 adapter->net_stats.rx_errors = adapter->stats.crcerrs + 3137 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
2892 adapter->stats.rlec; 3138 adapter->stats.rlec;
2893 adapter->net_stats.rx_dropped = 0; 3139 adapter->net_stats.rx_dropped = 0;
2894 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 3140 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2895 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3141 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
@@ -2903,27 +3149,74 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2903static void ixgbe_watchdog(unsigned long data) 3149static void ixgbe_watchdog(unsigned long data)
2904{ 3150{
2905 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 3151 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
2906 struct net_device *netdev = adapter->netdev; 3152 struct ixgbe_hw *hw = &adapter->hw;
2907 bool link_up; 3153
2908 u32 link_speed = 0; 3154 /* Do the watchdog outside of interrupt context due to the lovely
3155 * delays that some of the newer hardware requires */
3156 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
3157 /* Cause software interrupt to ensure rx rings are cleaned */
3158 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3159 u32 eics =
3160 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
3161 IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
3162 } else {
3163 /* For legacy and MSI interrupts don't set any bits that
3164 * are enabled for EIAM, because this operation would
3165 * set *both* EIMS and EICS for any bit in EIAM */
3166 IXGBE_WRITE_REG(hw, IXGBE_EICS,
3167 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
3168 }
3169 /* Reset the timer */
3170 mod_timer(&adapter->watchdog_timer,
3171 round_jiffies(jiffies + 2 * HZ));
3172 }
2909 3173
2910 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); 3174 schedule_work(&adapter->watchdog_task);
3175}
3176
3177/**
3178 * ixgbe_watchdog_task - worker thread to bring link up
3179 * @work: pointer to work_struct containing our data
3180 **/
3181static void ixgbe_watchdog_task(struct work_struct *work)
3182{
3183 struct ixgbe_adapter *adapter = container_of(work,
3184 struct ixgbe_adapter,
3185 watchdog_task);
3186 struct net_device *netdev = adapter->netdev;
3187 struct ixgbe_hw *hw = &adapter->hw;
3188 u32 link_speed = adapter->link_speed;
3189 bool link_up = adapter->link_up;
3190
3191 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
3192
3193 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3194 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3195 if (link_up ||
3196 time_after(jiffies, (adapter->link_check_timeout +
3197 IXGBE_TRY_LINK_TIMEOUT))) {
3198 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
3199 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3200 }
3201 adapter->link_up = link_up;
3202 adapter->link_speed = link_speed;
3203 }
2911 3204
2912 if (link_up) { 3205 if (link_up) {
2913 if (!netif_carrier_ok(netdev)) { 3206 if (!netif_carrier_ok(netdev)) {
2914 u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3207 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2915 u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS); 3208 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
2916#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) 3209#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
2917#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) 3210#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
2918 DPRINTK(LINK, INFO, "NIC Link is Up %s, " 3211 DPRINTK(LINK, INFO, "NIC Link is Up %s, "
2919 "Flow Control: %s\n", 3212 "Flow Control: %s\n",
2920 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 3213 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
2921 "10 Gbps" : 3214 "10 Gbps" :
2922 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 3215 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
2923 "1 Gbps" : "unknown speed")), 3216 "1 Gbps" : "unknown speed")),
2924 ((FLOW_RX && FLOW_TX) ? "RX/TX" : 3217 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
2925 (FLOW_RX ? "RX" : 3218 (FLOW_RX ? "RX" :
2926 (FLOW_TX ? "TX" : "None")))); 3219 (FLOW_TX ? "TX" : "None"))));
2927 3220
2928 netif_carrier_on(netdev); 3221 netif_carrier_on(netdev);
2929 netif_tx_wake_all_queues(netdev); 3222 netif_tx_wake_all_queues(netdev);
@@ -2932,6 +3225,8 @@ static void ixgbe_watchdog(unsigned long data)
2932 adapter->detect_tx_hung = true; 3225 adapter->detect_tx_hung = true;
2933 } 3226 }
2934 } else { 3227 } else {
3228 adapter->link_up = false;
3229 adapter->link_speed = 0;
2935 if (netif_carrier_ok(netdev)) { 3230 if (netif_carrier_ok(netdev)) {
2936 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 3231 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2937 netif_carrier_off(netdev); 3232 netif_carrier_off(netdev);
@@ -2940,36 +3235,19 @@ static void ixgbe_watchdog(unsigned long data)
2940 } 3235 }
2941 3236
2942 ixgbe_update_stats(adapter); 3237 ixgbe_update_stats(adapter);
2943 3238 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2944 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2945 /* Cause software interrupt to ensure rx rings are cleaned */
2946 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2947 u32 eics =
2948 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
2949 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
2950 } else {
2951 /* for legacy and MSI interrupts don't set any bits that
2952 * are enabled for EIAM, because this operation would
2953 * set *both* EIMS and EICS for any bit in EIAM */
2954 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
2955 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
2956 }
2957 /* Reset the timer */
2958 mod_timer(&adapter->watchdog_timer,
2959 round_jiffies(jiffies + 2 * HZ));
2960 }
2961} 3239}
2962 3240
2963static int ixgbe_tso(struct ixgbe_adapter *adapter, 3241static int ixgbe_tso(struct ixgbe_adapter *adapter,
2964 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 3242 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
2965 u32 tx_flags, u8 *hdr_len) 3243 u32 tx_flags, u8 *hdr_len)
2966{ 3244{
2967 struct ixgbe_adv_tx_context_desc *context_desc; 3245 struct ixgbe_adv_tx_context_desc *context_desc;
2968 unsigned int i; 3246 unsigned int i;
2969 int err; 3247 int err;
2970 struct ixgbe_tx_buffer *tx_buffer_info; 3248 struct ixgbe_tx_buffer *tx_buffer_info;
2971 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 3249 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2972 u32 mss_l4len_idx = 0, l4len; 3250 u32 mss_l4len_idx, l4len;
2973 3251
2974 if (skb_is_gso(skb)) { 3252 if (skb_is_gso(skb)) {
2975 if (skb_header_cloned(skb)) { 3253 if (skb_header_cloned(skb)) {
@@ -2985,16 +3263,16 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
2985 iph->tot_len = 0; 3263 iph->tot_len = 0;
2986 iph->check = 0; 3264 iph->check = 0;
2987 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 3265 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2988 iph->daddr, 0, 3266 iph->daddr, 0,
2989 IPPROTO_TCP, 3267 IPPROTO_TCP,
2990 0); 3268 0);
2991 adapter->hw_tso_ctxt++; 3269 adapter->hw_tso_ctxt++;
2992 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3270 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2993 ipv6_hdr(skb)->payload_len = 0; 3271 ipv6_hdr(skb)->payload_len = 0;
2994 tcp_hdr(skb)->check = 3272 tcp_hdr(skb)->check =
2995 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3273 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2996 &ipv6_hdr(skb)->daddr, 3274 &ipv6_hdr(skb)->daddr,
2997 0, IPPROTO_TCP, 0); 3275 0, IPPROTO_TCP, 0);
2998 adapter->hw_tso6_ctxt++; 3276 adapter->hw_tso6_ctxt++;
2999 } 3277 }
3000 3278
@@ -3008,7 +3286,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3008 vlan_macip_lens |= 3286 vlan_macip_lens |=
3009 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 3287 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3010 vlan_macip_lens |= ((skb_network_offset(skb)) << 3288 vlan_macip_lens |= ((skb_network_offset(skb)) <<
3011 IXGBE_ADVTXD_MACLEN_SHIFT); 3289 IXGBE_ADVTXD_MACLEN_SHIFT);
3012 *hdr_len += skb_network_offset(skb); 3290 *hdr_len += skb_network_offset(skb);
3013 vlan_macip_lens |= 3291 vlan_macip_lens |=
3014 (skb_transport_header(skb) - skb_network_header(skb)); 3292 (skb_transport_header(skb) - skb_network_header(skb));
@@ -3018,8 +3296,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3018 context_desc->seqnum_seed = 0; 3296 context_desc->seqnum_seed = 0;
3019 3297
3020 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3298 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3021 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 3299 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
3022 IXGBE_ADVTXD_DTYP_CTXT); 3300 IXGBE_ADVTXD_DTYP_CTXT);
3023 3301
3024 if (skb->protocol == htons(ETH_P_IP)) 3302 if (skb->protocol == htons(ETH_P_IP))
3025 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 3303 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
@@ -3027,9 +3305,11 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3027 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 3305 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3028 3306
3029 /* MSS L4LEN IDX */ 3307 /* MSS L4LEN IDX */
3030 mss_l4len_idx |= 3308 mss_l4len_idx =
3031 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); 3309 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3032 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); 3310 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
3311 /* use index 1 for TSO */
3312 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3033 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 3313 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3034 3314
3035 tx_buffer_info->time_stamp = jiffies; 3315 tx_buffer_info->time_stamp = jiffies;
@@ -3046,8 +3326,8 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
3046} 3326}
3047 3327
3048static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 3328static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3049 struct ixgbe_ring *tx_ring, 3329 struct ixgbe_ring *tx_ring,
3050 struct sk_buff *skb, u32 tx_flags) 3330 struct sk_buff *skb, u32 tx_flags)
3051{ 3331{
3052 struct ixgbe_adv_tx_context_desc *context_desc; 3332 struct ixgbe_adv_tx_context_desc *context_desc;
3053 unsigned int i; 3333 unsigned int i;
@@ -3064,16 +3344,16 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3064 vlan_macip_lens |= 3344 vlan_macip_lens |=
3065 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 3345 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3066 vlan_macip_lens |= (skb_network_offset(skb) << 3346 vlan_macip_lens |= (skb_network_offset(skb) <<
3067 IXGBE_ADVTXD_MACLEN_SHIFT); 3347 IXGBE_ADVTXD_MACLEN_SHIFT);
3068 if (skb->ip_summed == CHECKSUM_PARTIAL) 3348 if (skb->ip_summed == CHECKSUM_PARTIAL)
3069 vlan_macip_lens |= (skb_transport_header(skb) - 3349 vlan_macip_lens |= (skb_transport_header(skb) -
3070 skb_network_header(skb)); 3350 skb_network_header(skb));
3071 3351
3072 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 3352 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3073 context_desc->seqnum_seed = 0; 3353 context_desc->seqnum_seed = 0;
3074 3354
3075 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 3355 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3076 IXGBE_ADVTXD_DTYP_CTXT); 3356 IXGBE_ADVTXD_DTYP_CTXT);
3077 3357
3078 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3358 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3079 switch (skb->protocol) { 3359 switch (skb->protocol) {
@@ -3081,16 +3361,14 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3081 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 3361 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3082 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3362 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3083 type_tucmd_mlhl |= 3363 type_tucmd_mlhl |=
3084 IXGBE_ADVTXD_TUCMD_L4T_TCP; 3364 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3085 break; 3365 break;
3086
3087 case __constant_htons(ETH_P_IPV6): 3366 case __constant_htons(ETH_P_IPV6):
3088 /* XXX what about other V6 headers?? */ 3367 /* XXX what about other V6 headers?? */
3089 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3368 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3090 type_tucmd_mlhl |= 3369 type_tucmd_mlhl |=
3091 IXGBE_ADVTXD_TUCMD_L4T_TCP; 3370 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3092 break; 3371 break;
3093
3094 default: 3372 default:
3095 if (unlikely(net_ratelimit())) { 3373 if (unlikely(net_ratelimit())) {
3096 DPRINTK(PROBE, WARNING, 3374 DPRINTK(PROBE, WARNING,
@@ -3102,10 +3380,12 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3102 } 3380 }
3103 3381
3104 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 3382 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3383 /* use index zero for tx checksum offload */
3105 context_desc->mss_l4len_idx = 0; 3384 context_desc->mss_l4len_idx = 0;
3106 3385
3107 tx_buffer_info->time_stamp = jiffies; 3386 tx_buffer_info->time_stamp = jiffies;
3108 tx_buffer_info->next_to_watch = i; 3387 tx_buffer_info->next_to_watch = i;
3388
3109 adapter->hw_csum_tx_good++; 3389 adapter->hw_csum_tx_good++;
3110 i++; 3390 i++;
3111 if (i == tx_ring->count) 3391 if (i == tx_ring->count)
@@ -3114,12 +3394,13 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3114 3394
3115 return true; 3395 return true;
3116 } 3396 }
3397
3117 return false; 3398 return false;
3118} 3399}
3119 3400
3120static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 3401static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3121 struct ixgbe_ring *tx_ring, 3402 struct ixgbe_ring *tx_ring,
3122 struct sk_buff *skb, unsigned int first) 3403 struct sk_buff *skb, unsigned int first)
3123{ 3404{
3124 struct ixgbe_tx_buffer *tx_buffer_info; 3405 struct ixgbe_tx_buffer *tx_buffer_info;
3125 unsigned int len = skb->len; 3406 unsigned int len = skb->len;
@@ -3137,8 +3418,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3137 3418
3138 tx_buffer_info->length = size; 3419 tx_buffer_info->length = size;
3139 tx_buffer_info->dma = pci_map_single(adapter->pdev, 3420 tx_buffer_info->dma = pci_map_single(adapter->pdev,
3140 skb->data + offset, 3421 skb->data + offset,
3141 size, PCI_DMA_TODEVICE); 3422 size, PCI_DMA_TODEVICE);
3142 tx_buffer_info->time_stamp = jiffies; 3423 tx_buffer_info->time_stamp = jiffies;
3143 tx_buffer_info->next_to_watch = i; 3424 tx_buffer_info->next_to_watch = i;
3144 3425
@@ -3163,9 +3444,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3163 3444
3164 tx_buffer_info->length = size; 3445 tx_buffer_info->length = size;
3165 tx_buffer_info->dma = pci_map_page(adapter->pdev, 3446 tx_buffer_info->dma = pci_map_page(adapter->pdev,
3166 frag->page, 3447 frag->page,
3167 offset, 3448 offset,
3168 size, PCI_DMA_TODEVICE); 3449 size,
3450 PCI_DMA_TODEVICE);
3169 tx_buffer_info->time_stamp = jiffies; 3451 tx_buffer_info->time_stamp = jiffies;
3170 tx_buffer_info->next_to_watch = i; 3452 tx_buffer_info->next_to_watch = i;
3171 3453
@@ -3188,8 +3470,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3188} 3470}
3189 3471
3190static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 3472static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3191 struct ixgbe_ring *tx_ring, 3473 struct ixgbe_ring *tx_ring,
3192 int tx_flags, int count, u32 paylen, u8 hdr_len) 3474 int tx_flags, int count, u32 paylen, u8 hdr_len)
3193{ 3475{
3194 union ixgbe_adv_tx_desc *tx_desc = NULL; 3476 union ixgbe_adv_tx_desc *tx_desc = NULL;
3195 struct ixgbe_tx_buffer *tx_buffer_info; 3477 struct ixgbe_tx_buffer *tx_buffer_info;
@@ -3208,15 +3490,17 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3208 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 3490 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3209 3491
3210 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3492 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3211 IXGBE_ADVTXD_POPTS_SHIFT; 3493 IXGBE_ADVTXD_POPTS_SHIFT;
3212 3494
3495 /* use index 1 context for tso */
3496 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3213 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3497 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3214 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 3498 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3215 IXGBE_ADVTXD_POPTS_SHIFT; 3499 IXGBE_ADVTXD_POPTS_SHIFT;
3216 3500
3217 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3501 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3218 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3502 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3219 IXGBE_ADVTXD_POPTS_SHIFT; 3503 IXGBE_ADVTXD_POPTS_SHIFT;
3220 3504
3221 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3505 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3222 3506
@@ -3226,9 +3510,8 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3226 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 3510 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3227 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 3511 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3228 tx_desc->read.cmd_type_len = 3512 tx_desc->read.cmd_type_len =
3229 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3513 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3230 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3514 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3231
3232 i++; 3515 i++;
3233 if (i == tx_ring->count) 3516 if (i == tx_ring->count)
3234 i = 0; 3517 i = 0;
@@ -3249,7 +3532,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3249} 3532}
3250 3533
3251static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 3534static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3252 struct ixgbe_ring *tx_ring, int size) 3535 struct ixgbe_ring *tx_ring, int size)
3253{ 3536{
3254 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3537 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3255 3538
@@ -3265,61 +3548,52 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3265 return -EBUSY; 3548 return -EBUSY;
3266 3549
3267 /* A reprieve! - use start_queue because it doesn't call schedule */ 3550 /* A reprieve! - use start_queue because it doesn't call schedule */
3268 netif_wake_subqueue(netdev, tx_ring->queue_index); 3551 netif_start_subqueue(netdev, tx_ring->queue_index);
3269 ++adapter->restart_queue; 3552 ++adapter->restart_queue;
3270 return 0; 3553 return 0;
3271} 3554}
3272 3555
3273static int ixgbe_maybe_stop_tx(struct net_device *netdev, 3556static int ixgbe_maybe_stop_tx(struct net_device *netdev,
3274 struct ixgbe_ring *tx_ring, int size) 3557 struct ixgbe_ring *tx_ring, int size)
3275{ 3558{
3276 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 3559 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3277 return 0; 3560 return 0;
3278 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 3561 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3279} 3562}
3280 3563
3281
3282static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3564static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3283{ 3565{
3284 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3566 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3285 struct ixgbe_ring *tx_ring; 3567 struct ixgbe_ring *tx_ring;
3286 unsigned int len = skb->len;
3287 unsigned int first; 3568 unsigned int first;
3288 unsigned int tx_flags = 0; 3569 unsigned int tx_flags = 0;
3289 u8 hdr_len = 0; 3570 u8 hdr_len = 0;
3290 int r_idx = 0, tso; 3571 int r_idx = 0, tso;
3291 unsigned int mss = 0;
3292 int count = 0; 3572 int count = 0;
3293 unsigned int f; 3573 unsigned int f;
3294 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 3574
3295 len -= skb->data_len;
3296 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; 3575 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
3297 tx_ring = &adapter->tx_ring[r_idx]; 3576 tx_ring = &adapter->tx_ring[r_idx];
3298 3577
3299 3578 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3300 if (skb->len <= 0) { 3579 tx_flags |= vlan_tx_tag_get(skb);
3301 dev_kfree_skb(skb); 3580 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3302 return NETDEV_TX_OK; 3581 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3303 } 3582 }
3304 mss = skb_shinfo(skb)->gso_size; 3583 /* three things can cause us to need a context descriptor */
3305 3584 if (skb_is_gso(skb) ||
3306 if (mss) 3585 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3307 count++; 3586 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3308 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3309 count++; 3587 count++;
3310 3588
3311 count += TXD_USE_COUNT(len); 3589 count += TXD_USE_COUNT(skb_headlen(skb));
3312 for (f = 0; f < nr_frags; f++) 3590 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3313 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3591 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3314 3592
3315 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 3593 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
3316 adapter->tx_busy++; 3594 adapter->tx_busy++;
3317 return NETDEV_TX_BUSY; 3595 return NETDEV_TX_BUSY;
3318 } 3596 }
3319 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3320 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3321 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
3322 }
3323 3597
3324 if (skb->protocol == htons(ETH_P_IP)) 3598 if (skb->protocol == htons(ETH_P_IP))
3325 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3599 tx_flags |= IXGBE_TX_FLAGS_IPV4;
@@ -3333,12 +3607,12 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3333 if (tso) 3607 if (tso)
3334 tx_flags |= IXGBE_TX_FLAGS_TSO; 3608 tx_flags |= IXGBE_TX_FLAGS_TSO;
3335 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 3609 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3336 (skb->ip_summed == CHECKSUM_PARTIAL)) 3610 (skb->ip_summed == CHECKSUM_PARTIAL))
3337 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3611 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3338 3612
3339 ixgbe_tx_queue(adapter, tx_ring, tx_flags, 3613 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
3340 ixgbe_tx_map(adapter, tx_ring, skb, first), 3614 ixgbe_tx_map(adapter, tx_ring, skb, first),
3341 skb->len, hdr_len); 3615 skb->len, hdr_len);
3342 3616
3343 netdev->trans_start = jiffies; 3617 netdev->trans_start = jiffies;
3344 3618
@@ -3372,15 +3646,16 @@ static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3372static int ixgbe_set_mac(struct net_device *netdev, void *p) 3646static int ixgbe_set_mac(struct net_device *netdev, void *p)
3373{ 3647{
3374 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3648 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3649 struct ixgbe_hw *hw = &adapter->hw;
3375 struct sockaddr *addr = p; 3650 struct sockaddr *addr = p;
3376 3651
3377 if (!is_valid_ether_addr(addr->sa_data)) 3652 if (!is_valid_ether_addr(addr->sa_data))
3378 return -EADDRNOTAVAIL; 3653 return -EADDRNOTAVAIL;
3379 3654
3380 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3655 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3381 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 3656 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3382 3657
3383 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3658 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
3384 3659
3385 return 0; 3660 return 0;
3386} 3661}
@@ -3404,28 +3679,19 @@ static void ixgbe_netpoll(struct net_device *netdev)
3404#endif 3679#endif
3405 3680
3406/** 3681/**
3407 * ixgbe_napi_add_all - prep napi structs for use 3682 * ixgbe_link_config - set up initial link with default speed and duplex
3408 * @adapter: private struct 3683 * @hw: pointer to private hardware struct
3409 * helper function to napi_add each possible q_vector->napi 3684 *
3410 */ 3685 * Returns 0 on success, negative on failure
3411static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) 3686 **/
3687static int ixgbe_link_config(struct ixgbe_hw *hw)
3412{ 3688{
3413 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3689 u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
3414 int (*poll)(struct napi_struct *, int);
3415 3690
3416 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3691 /* must always autoneg for both 1G and 10G link */
3417 poll = &ixgbe_clean_rxonly; 3692 hw->mac.autoneg = true;
3418 } else {
3419 poll = &ixgbe_poll;
3420 /* only one q_vector for legacy modes */
3421 q_vectors = 1;
3422 }
3423 3693
3424 for (i = 0; i < q_vectors; i++) { 3694 return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
3425 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3426 netif_napi_add(adapter->netdev, &q_vector->napi,
3427 (*poll), 64);
3428 }
3429} 3695}
3430 3696
3431/** 3697/**
@@ -3440,17 +3706,16 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3440 * and a hardware reset occur. 3706 * and a hardware reset occur.
3441 **/ 3707 **/
3442static int __devinit ixgbe_probe(struct pci_dev *pdev, 3708static int __devinit ixgbe_probe(struct pci_dev *pdev,
3443 const struct pci_device_id *ent) 3709 const struct pci_device_id *ent)
3444{ 3710{
3445 struct net_device *netdev; 3711 struct net_device *netdev;
3446 struct ixgbe_adapter *adapter = NULL; 3712 struct ixgbe_adapter *adapter = NULL;
3447 struct ixgbe_hw *hw; 3713 struct ixgbe_hw *hw;
3448 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 3714 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
3449 unsigned long mmio_start, mmio_len;
3450 static int cards_found; 3715 static int cards_found;
3451 int i, err, pci_using_dac; 3716 int i, err, pci_using_dac;
3452 u16 link_status, link_speed, link_width; 3717 u16 link_status, link_speed, link_width;
3453 u32 part_num; 3718 u32 part_num, eec;
3454 3719
3455 err = pci_enable_device(pdev); 3720 err = pci_enable_device(pdev);
3456 if (err) 3721 if (err)
@@ -3465,7 +3730,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3465 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3730 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3466 if (err) { 3731 if (err) {
3467 dev_err(&pdev->dev, "No usable DMA " 3732 dev_err(&pdev->dev, "No usable DMA "
3468 "configuration, aborting\n"); 3733 "configuration, aborting\n");
3469 goto err_dma; 3734 goto err_dma;
3470 } 3735 }
3471 } 3736 }
@@ -3498,10 +3763,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3498 hw->back = adapter; 3763 hw->back = adapter;
3499 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3764 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3500 3765
3501 mmio_start = pci_resource_start(pdev, 0); 3766 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3502 mmio_len = pci_resource_len(pdev, 0); 3767 pci_resource_len(pdev, 0));
3503
3504 hw->hw_addr = ioremap(mmio_start, mmio_len);
3505 if (!hw->hw_addr) { 3768 if (!hw->hw_addr) {
3506 err = -EIO; 3769 err = -EIO;
3507 goto err_ioremap; 3770 goto err_ioremap;
@@ -3516,7 +3779,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3516 netdev->stop = &ixgbe_close; 3779 netdev->stop = &ixgbe_close;
3517 netdev->hard_start_xmit = &ixgbe_xmit_frame; 3780 netdev->hard_start_xmit = &ixgbe_xmit_frame;
3518 netdev->get_stats = &ixgbe_get_stats; 3781 netdev->get_stats = &ixgbe_get_stats;
3519 netdev->set_multicast_list = &ixgbe_set_multi; 3782 netdev->set_rx_mode = &ixgbe_set_rx_mode;
3783 netdev->set_multicast_list = &ixgbe_set_rx_mode;
3520 netdev->set_mac_address = &ixgbe_set_mac; 3784 netdev->set_mac_address = &ixgbe_set_mac;
3521 netdev->change_mtu = &ixgbe_change_mtu; 3785 netdev->change_mtu = &ixgbe_change_mtu;
3522 ixgbe_set_ethtool_ops(netdev); 3786 ixgbe_set_ethtool_ops(netdev);
@@ -3530,22 +3794,23 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3530#endif 3794#endif
3531 strcpy(netdev->name, pci_name(pdev)); 3795 strcpy(netdev->name, pci_name(pdev));
3532 3796
3533 netdev->mem_start = mmio_start;
3534 netdev->mem_end = mmio_start + mmio_len;
3535
3536 adapter->bd_number = cards_found; 3797 adapter->bd_number = cards_found;
3537 3798
3538 /* PCI config space info */
3539 hw->vendor_id = pdev->vendor;
3540 hw->device_id = pdev->device;
3541 hw->revision_id = pdev->revision;
3542 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3543 hw->subsystem_device_id = pdev->subsystem_device;
3544
3545 /* Setup hw api */ 3799 /* Setup hw api */
3546 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3800 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3547 hw->mac.type = ii->mac; 3801 hw->mac.type = ii->mac;
3548 3802
3803 /* EEPROM */
3804 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
3805 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
3806 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
3807 if (!(eec & (1 << 8)))
3808 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
3809
3810 /* PHY */
3811 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
3812 /* phy->sfp_type = ixgbe_sfp_type_unknown; */
3813
3549 err = ii->get_invariants(hw); 3814 err = ii->get_invariants(hw);
3550 if (err) 3815 if (err)
3551 goto err_hw_init; 3816 goto err_hw_init;
@@ -3555,26 +3820,34 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3555 if (err) 3820 if (err)
3556 goto err_sw_init; 3821 goto err_sw_init;
3557 3822
3823 /* reset_hw fills in the perm_addr as well */
3824 err = hw->mac.ops.reset_hw(hw);
3825 if (err) {
3826 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
3827 goto err_sw_init;
3828 }
3829
3558 netdev->features = NETIF_F_SG | 3830 netdev->features = NETIF_F_SG |
3559 NETIF_F_HW_CSUM | 3831 NETIF_F_IP_CSUM |
3560 NETIF_F_HW_VLAN_TX | 3832 NETIF_F_HW_VLAN_TX |
3561 NETIF_F_HW_VLAN_RX | 3833 NETIF_F_HW_VLAN_RX |
3562 NETIF_F_HW_VLAN_FILTER; 3834 NETIF_F_HW_VLAN_FILTER;
3563 3835
3564 netdev->features |= NETIF_F_LRO; 3836 netdev->features |= NETIF_F_IPV6_CSUM;
3565 netdev->features |= NETIF_F_TSO; 3837 netdev->features |= NETIF_F_TSO;
3566 netdev->features |= NETIF_F_TSO6; 3838 netdev->features |= NETIF_F_TSO6;
3839 netdev->features |= NETIF_F_LRO;
3567 3840
3568 netdev->vlan_features |= NETIF_F_TSO; 3841 netdev->vlan_features |= NETIF_F_TSO;
3569 netdev->vlan_features |= NETIF_F_TSO6; 3842 netdev->vlan_features |= NETIF_F_TSO6;
3570 netdev->vlan_features |= NETIF_F_HW_CSUM; 3843 netdev->vlan_features |= NETIF_F_IP_CSUM;
3571 netdev->vlan_features |= NETIF_F_SG; 3844 netdev->vlan_features |= NETIF_F_SG;
3572 3845
3573 if (pci_using_dac) 3846 if (pci_using_dac)
3574 netdev->features |= NETIF_F_HIGHDMA; 3847 netdev->features |= NETIF_F_HIGHDMA;
3575 3848
3576 /* make sure the EEPROM is good */ 3849 /* make sure the EEPROM is good */
3577 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 3850 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
3578 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 3851 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
3579 err = -EIO; 3852 err = -EIO;
3580 goto err_eeprom; 3853 goto err_eeprom;
@@ -3583,7 +3856,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3583 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 3856 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
3584 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); 3857 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
3585 3858
3586 if (ixgbe_validate_mac_addr(netdev->dev_addr)) { 3859 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
3860 dev_err(&pdev->dev, "invalid MAC address\n");
3587 err = -EIO; 3861 err = -EIO;
3588 goto err_eeprom; 3862 goto err_eeprom;
3589 } 3863 }
@@ -3593,13 +3867,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3593 adapter->watchdog_timer.data = (unsigned long)adapter; 3867 adapter->watchdog_timer.data = (unsigned long)adapter;
3594 3868
3595 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 3869 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
3596 3870 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
3597 /* initialize default flow control settings */
3598 hw->fc.original_type = ixgbe_fc_full;
3599 hw->fc.type = ixgbe_fc_full;
3600 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3601 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3602 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3603 3871
3604 err = ixgbe_init_interrupt_scheme(adapter); 3872 err = ixgbe_init_interrupt_scheme(adapter);
3605 if (err) 3873 if (err)
@@ -3610,32 +3878,39 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3610 link_speed = link_status & IXGBE_PCI_LINK_SPEED; 3878 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3611 link_width = link_status & IXGBE_PCI_LINK_WIDTH; 3879 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3612 dev_info(&pdev->dev, "(PCI Express:%s:%s) " 3880 dev_info(&pdev->dev, "(PCI Express:%s:%s) "
3613 "%02x:%02x:%02x:%02x:%02x:%02x\n", 3881 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3614 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : 3882 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3615 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : 3883 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3616 "Unknown"), 3884 "Unknown"),
3617 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : 3885 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
3618 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : 3886 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
3619 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : 3887 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3620 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : 3888 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3621 "Unknown"), 3889 "Unknown"),
3622 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 3890 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
3623 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); 3891 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3624 ixgbe_read_part_num(hw, &part_num); 3892 ixgbe_read_pba_num_generic(hw, &part_num);
3625 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 3893 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3626 hw->mac.type, hw->phy.type, 3894 hw->mac.type, hw->phy.type,
3627 (part_num >> 8), (part_num & 0xff)); 3895 (part_num >> 8), (part_num & 0xff));
3628 3896
3629 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { 3897 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
3630 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 3898 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
3631 "this card is not sufficient for optimal " 3899 "this card is not sufficient for optimal "
3632 "performance.\n"); 3900 "performance.\n");
3633 dev_warn(&pdev->dev, "For optimal performance a x8 " 3901 dev_warn(&pdev->dev, "For optimal performance a x8 "
3634 "PCI-Express slot is required.\n"); 3902 "PCI-Express slot is required.\n");
3635 } 3903 }
3636 3904
3637 /* reset the hardware with the new settings */ 3905 /* reset the hardware with the new settings */
3638 ixgbe_start_hw(hw); 3906 hw->mac.ops.start_hw(hw);
3907
3908 /* link_config depends on start_hw being called at least once */
3909 err = ixgbe_link_config(hw);
3910 if (err) {
3911 dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
3912 goto err_register;
3913 }
3639 3914
3640 netif_carrier_off(netdev); 3915 netif_carrier_off(netdev);
3641 netif_tx_stop_all_queues(netdev); 3916 netif_tx_stop_all_queues(netdev);
@@ -3647,7 +3922,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3647 if (err) 3922 if (err)
3648 goto err_register; 3923 goto err_register;
3649 3924
3650#ifdef CONFIG_DCA 3925#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3651 if (dca_add_requester(&pdev->dev) == 0) { 3926 if (dca_add_requester(&pdev->dev) == 0) {
3652 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 3927 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3653 /* always use CB2 mode, difference is masked 3928 /* always use CB2 mode, difference is masked
@@ -3697,7 +3972,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3697 3972
3698 flush_scheduled_work(); 3973 flush_scheduled_work();
3699 3974
3700#ifdef CONFIG_DCA 3975#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3701 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 3976 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3702 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 3977 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3703 dca_remove_requester(&pdev->dev); 3978 dca_remove_requester(&pdev->dev);
@@ -3715,6 +3990,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3715 pci_release_regions(pdev); 3990 pci_release_regions(pdev);
3716 3991
3717 DPRINTK(PROBE, INFO, "complete\n"); 3992 DPRINTK(PROBE, INFO, "complete\n");
3993 ixgbe_napi_del_all(adapter);
3718 kfree(adapter->tx_ring); 3994 kfree(adapter->tx_ring);
3719 kfree(adapter->rx_ring); 3995 kfree(adapter->rx_ring);
3720 3996
@@ -3732,7 +4008,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3732 * this device has been detected. 4008 * this device has been detected.
3733 */ 4009 */
3734static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 4010static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3735 pci_channel_state_t state) 4011 pci_channel_state_t state)
3736{ 4012{
3737 struct net_device *netdev = pci_get_drvdata(pdev); 4013 struct net_device *netdev = pci_get_drvdata(pdev);
3738 struct ixgbe_adapter *adapter = netdev->priv; 4014 struct ixgbe_adapter *adapter = netdev->priv;
@@ -3743,7 +4019,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3743 ixgbe_down(adapter); 4019 ixgbe_down(adapter);
3744 pci_disable_device(pdev); 4020 pci_disable_device(pdev);
3745 4021
3746 /* Request a slot slot reset. */ 4022 /* Request a slot reset. */
3747 return PCI_ERS_RESULT_NEED_RESET; 4023 return PCI_ERS_RESULT_NEED_RESET;
3748} 4024}
3749 4025
@@ -3760,7 +4036,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3760 4036
3761 if (pci_enable_device(pdev)) { 4037 if (pci_enable_device(pdev)) {
3762 DPRINTK(PROBE, ERR, 4038 DPRINTK(PROBE, ERR,
3763 "Cannot re-enable PCI device after reset.\n"); 4039 "Cannot re-enable PCI device after reset.\n");
3764 return PCI_ERS_RESULT_DISCONNECT; 4040 return PCI_ERS_RESULT_DISCONNECT;
3765 } 4041 }
3766 pci_set_master(pdev); 4042 pci_set_master(pdev);
@@ -3794,7 +4070,6 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
3794 } 4070 }
3795 4071
3796 netif_device_attach(netdev); 4072 netif_device_attach(netdev);
3797
3798} 4073}
3799 4074
3800static struct pci_error_handlers ixgbe_err_handler = { 4075static struct pci_error_handlers ixgbe_err_handler = {
@@ -3830,13 +4105,14 @@ static int __init ixgbe_init_module(void)
3830 4105
3831 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); 4106 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
3832 4107
3833#ifdef CONFIG_DCA 4108#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3834 dca_register_notify(&dca_notifier); 4109 dca_register_notify(&dca_notifier);
3835 4110
3836#endif 4111#endif
3837 ret = pci_register_driver(&ixgbe_driver); 4112 ret = pci_register_driver(&ixgbe_driver);
3838 return ret; 4113 return ret;
3839} 4114}
4115
3840module_init(ixgbe_init_module); 4116module_init(ixgbe_init_module);
3841 4117
3842/** 4118/**
@@ -3847,24 +4123,24 @@ module_init(ixgbe_init_module);
3847 **/ 4123 **/
3848static void __exit ixgbe_exit_module(void) 4124static void __exit ixgbe_exit_module(void)
3849{ 4125{
3850#ifdef CONFIG_DCA 4126#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3851 dca_unregister_notify(&dca_notifier); 4127 dca_unregister_notify(&dca_notifier);
3852#endif 4128#endif
3853 pci_unregister_driver(&ixgbe_driver); 4129 pci_unregister_driver(&ixgbe_driver);
3854} 4130}
3855 4131
3856#ifdef CONFIG_DCA 4132#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
3857static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 4133static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
3858 void *p) 4134 void *p)
3859{ 4135{
3860 int ret_val; 4136 int ret_val;
3861 4137
3862 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 4138 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
3863 __ixgbe_notify_dca); 4139 __ixgbe_notify_dca);
3864 4140
3865 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 4141 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3866} 4142}
3867#endif /* CONFIG_DCA */ 4143#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
3868 4144
3869module_exit(ixgbe_exit_module); 4145module_exit(ixgbe_exit_module);
3870 4146
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 8002931ae823..764035a8c9a1 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -33,32 +32,36 @@
33#include "ixgbe_common.h" 32#include "ixgbe_common.h"
34#include "ixgbe_phy.h" 33#include "ixgbe_phy.h"
35 34
35static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
36static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 36static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
37static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 37static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
38static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
39static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
40 u32 device_type, u16 phy_data);
41 38
42/** 39/**
43 * ixgbe_identify_phy - Get physical layer module 40 * ixgbe_identify_phy_generic - Get physical layer module
44 * @hw: pointer to hardware structure 41 * @hw: pointer to hardware structure
45 * 42 *
46 * Determines the physical layer module found on the current adapter. 43 * Determines the physical layer module found on the current adapter.
47 **/ 44 **/
48s32 ixgbe_identify_phy(struct ixgbe_hw *hw) 45s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
49{ 46{
50 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 47 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
51 u32 phy_addr; 48 u32 phy_addr;
52 49
53 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 50 if (hw->phy.type == ixgbe_phy_unknown) {
54 if (ixgbe_validate_phy_addr(hw, phy_addr)) { 51 for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
55 hw->phy.addr = phy_addr; 52 if (ixgbe_validate_phy_addr(hw, phy_addr)) {
56 ixgbe_get_phy_id(hw); 53 hw->phy.addr = phy_addr;
57 hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); 54 ixgbe_get_phy_id(hw);
58 status = 0; 55 hw->phy.type =
59 break; 56 ixgbe_get_phy_type_from_id(hw->phy.id);
57 status = 0;
58 break;
59 }
60 } 60 }
61 } else {
62 status = 0;
61 } 63 }
64
62 return status; 65 return status;
63} 66}
64 67
@@ -73,10 +76,8 @@ static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
73 bool valid = false; 76 bool valid = false;
74 77
75 hw->phy.addr = phy_addr; 78 hw->phy.addr = phy_addr;
76 ixgbe_read_phy_reg(hw, 79 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
77 IXGBE_MDIO_PHY_ID_HIGH, 80 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
78 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
79 &phy_id);
80 81
81 if (phy_id != 0xFFFF && phy_id != 0x0) 82 if (phy_id != 0xFFFF && phy_id != 0x0)
82 valid = true; 83 valid = true;
@@ -95,21 +96,18 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
95 u16 phy_id_high = 0; 96 u16 phy_id_high = 0;
96 u16 phy_id_low = 0; 97 u16 phy_id_low = 0;
97 98
98 status = ixgbe_read_phy_reg(hw, 99 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
99 IXGBE_MDIO_PHY_ID_HIGH, 100 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
100 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 101 &phy_id_high);
101 &phy_id_high);
102 102
103 if (status == 0) { 103 if (status == 0) {
104 hw->phy.id = (u32)(phy_id_high << 16); 104 hw->phy.id = (u32)(phy_id_high << 16);
105 status = ixgbe_read_phy_reg(hw, 105 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
106 IXGBE_MDIO_PHY_ID_LOW, 106 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
107 IXGBE_MDIO_PMA_PMD_DEV_TYPE, 107 &phy_id_low);
108 &phy_id_low);
109 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 108 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
110 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 109 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
111 } 110 }
112
113 return status; 111 return status;
114} 112}
115 113
@@ -123,9 +121,6 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
123 enum ixgbe_phy_type phy_type; 121 enum ixgbe_phy_type phy_type;
124 122
125 switch (phy_id) { 123 switch (phy_id) {
126 case TN1010_PHY_ID:
127 phy_type = ixgbe_phy_tn;
128 break;
129 case QT2022_PHY_ID: 124 case QT2022_PHY_ID:
130 phy_type = ixgbe_phy_qt; 125 phy_type = ixgbe_phy_qt;
131 break; 126 break;
@@ -138,32 +133,31 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
138} 133}
139 134
140/** 135/**
141 * ixgbe_reset_phy - Performs a PHY reset 136 * ixgbe_reset_phy_generic - Performs a PHY reset
142 * @hw: pointer to hardware structure 137 * @hw: pointer to hardware structure
143 **/ 138 **/
144s32 ixgbe_reset_phy(struct ixgbe_hw *hw) 139s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
145{ 140{
146 /* 141 /*
147 * Perform soft PHY reset to the PHY_XS. 142 * Perform soft PHY reset to the PHY_XS.
148 * This will cause a soft reset to the PHY 143 * This will cause a soft reset to the PHY
149 */ 144 */
150 return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 145 return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
151 IXGBE_MDIO_PHY_XS_DEV_TYPE, 146 IXGBE_MDIO_PHY_XS_DEV_TYPE,
152 IXGBE_MDIO_PHY_XS_RESET); 147 IXGBE_MDIO_PHY_XS_RESET);
153} 148}
154 149
155/** 150/**
156 * ixgbe_read_phy_reg - Reads a value from a specified PHY register 151 * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
157 * @hw: pointer to hardware structure 152 * @hw: pointer to hardware structure
158 * @reg_addr: 32 bit address of PHY register to read 153 * @reg_addr: 32 bit address of PHY register to read
159 * @phy_data: Pointer to read data from PHY register 154 * @phy_data: Pointer to read data from PHY register
160 **/ 155 **/
161s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 156s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
162 u32 device_type, u16 *phy_data) 157 u32 device_type, u16 *phy_data)
163{ 158{
164 u32 command; 159 u32 command;
165 u32 i; 160 u32 i;
166 u32 timeout = 10;
167 u32 data; 161 u32 data;
168 s32 status = 0; 162 s32 status = 0;
169 u16 gssr; 163 u16 gssr;
@@ -179,9 +173,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
179 if (status == 0) { 173 if (status == 0) {
180 /* Setup and write the address cycle command */ 174 /* Setup and write the address cycle command */
181 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 175 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
182 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 176 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
183 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 177 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
184 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 178 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
185 179
186 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 180 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
187 181
@@ -190,7 +184,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
190 * The MDI Command bit will clear when the operation is 184 * The MDI Command bit will clear when the operation is
191 * complete 185 * complete
192 */ 186 */
193 for (i = 0; i < timeout; i++) { 187 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
194 udelay(10); 188 udelay(10);
195 189
196 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 190 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -210,9 +204,9 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
210 * command 204 * command
211 */ 205 */
212 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 206 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
213 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 207 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
214 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 208 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
215 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); 209 (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
216 210
217 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 211 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
218 212
@@ -221,7 +215,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
221 * completed. The MDI Command bit will clear when the 215 * completed. The MDI Command bit will clear when the
222 * operation is complete 216 * operation is complete
223 */ 217 */
224 for (i = 0; i < timeout; i++) { 218 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
225 udelay(10); 219 udelay(10);
226 220
227 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 221 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
@@ -231,8 +225,7 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
231 } 225 }
232 226
233 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 227 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
234 hw_dbg(hw, 228 hw_dbg(hw, "PHY read command didn't complete\n");
235 "PHY read command didn't complete\n");
236 status = IXGBE_ERR_PHY; 229 status = IXGBE_ERR_PHY;
237 } else { 230 } else {
238 /* 231 /*
@@ -247,22 +240,22 @@ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
247 240
248 ixgbe_release_swfw_sync(hw, gssr); 241 ixgbe_release_swfw_sync(hw, gssr);
249 } 242 }
243
250 return status; 244 return status;
251} 245}
252 246
253/** 247/**
254 * ixgbe_write_phy_reg - Writes a value to specified PHY register 248 * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
255 * @hw: pointer to hardware structure 249 * @hw: pointer to hardware structure
256 * @reg_addr: 32 bit PHY register to write 250 * @reg_addr: 32 bit PHY register to write
257 * @device_type: 5 bit device type 251 * @device_type: 5 bit device type
258 * @phy_data: Data to write to the PHY register 252 * @phy_data: Data to write to the PHY register
259 **/ 253 **/
260static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 254s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
261 u32 device_type, u16 phy_data) 255 u32 device_type, u16 phy_data)
262{ 256{
263 u32 command; 257 u32 command;
264 u32 i; 258 u32 i;
265 u32 timeout = 10;
266 s32 status = 0; 259 s32 status = 0;
267 u16 gssr; 260 u16 gssr;
268 261
@@ -280,9 +273,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
280 273
281 /* Setup and write the address cycle command */ 274 /* Setup and write the address cycle command */
282 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 275 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
283 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 276 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
284 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 277 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
285 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 278 (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
286 279
287 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 280 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
288 281
@@ -291,19 +284,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
291 * The MDI Command bit will clear when the operation is 284 * The MDI Command bit will clear when the operation is
292 * complete 285 * complete
293 */ 286 */
294 for (i = 0; i < timeout; i++) { 287 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
295 udelay(10); 288 udelay(10);
296 289
297 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 290 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
298 291
299 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 292 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
300 hw_dbg(hw, "PHY address cmd didn't complete\n");
301 break; 293 break;
302 }
303 } 294 }
304 295
305 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 296 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
297 hw_dbg(hw, "PHY address cmd didn't complete\n");
306 status = IXGBE_ERR_PHY; 298 status = IXGBE_ERR_PHY;
299 }
307 300
308 if (status == 0) { 301 if (status == 0) {
309 /* 302 /*
@@ -311,9 +304,9 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
311 * command 304 * command
312 */ 305 */
313 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 306 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
314 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 307 (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
315 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 308 (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
316 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); 309 (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
317 310
318 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 311 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
319 312
@@ -322,20 +315,19 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
322 * completed. The MDI Command bit will clear when the 315 * completed. The MDI Command bit will clear when the
323 * operation is complete 316 * operation is complete
324 */ 317 */
325 for (i = 0; i < timeout; i++) { 318 for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
326 udelay(10); 319 udelay(10);
327 320
328 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 321 command = IXGBE_READ_REG(hw, IXGBE_MSCA);
329 322
330 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 323 if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
331 hw_dbg(hw, "PHY write command did not "
332 "complete.\n");
333 break; 324 break;
334 }
335 } 325 }
336 326
337 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 327 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
328 hw_dbg(hw, "PHY address cmd didn't complete\n");
338 status = IXGBE_ERR_PHY; 329 status = IXGBE_ERR_PHY;
330 }
339 } 331 }
340 332
341 ixgbe_release_swfw_sync(hw, gssr); 333 ixgbe_release_swfw_sync(hw, gssr);
@@ -345,67 +337,54 @@ static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr,
345} 337}
346 338
347/** 339/**
348 * ixgbe_setup_tnx_phy_link - Set and restart autoneg 340 * ixgbe_setup_phy_link_generic - Set and restart autoneg
349 * @hw: pointer to hardware structure 341 * @hw: pointer to hardware structure
350 * 342 *
351 * Restart autonegotiation and PHY and waits for completion. 343 * Restart autonegotiation and PHY and waits for completion.
352 **/ 344 **/
353s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) 345s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
354{ 346{
355 s32 status = IXGBE_NOT_IMPLEMENTED; 347 s32 status = IXGBE_NOT_IMPLEMENTED;
356 u32 time_out; 348 u32 time_out;
357 u32 max_time_out = 10; 349 u32 max_time_out = 10;
358 u16 autoneg_speed_selection_register = 0x10; 350 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
359 u16 autoneg_restart_mask = 0x0200;
360 u16 autoneg_complete_mask = 0x0020;
361 u16 autoneg_reg = 0;
362 351
363 /* 352 /*
364 * Set advertisement settings in PHY based on autoneg_advertised 353 * Set advertisement settings in PHY based on autoneg_advertised
365 * settings. If autoneg_advertised = 0, then advertise default values 354 * settings. If autoneg_advertised = 0, then advertise default values
366 * txn devices cannot be "forced" to a autoneg 10G and fail. But can 355 * tnx devices cannot be "forced" to a autoneg 10G and fail. But can
367 * for a 1G. 356 * for a 1G.
368 */ 357 */
369 ixgbe_read_phy_reg(hw, 358 hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
370 autoneg_speed_selection_register, 359 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
371 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
372 &autoneg_reg);
373 360
374 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) 361 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL)
375 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ 362 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */
376 else 363 else
377 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ 364 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */
378 365
379 ixgbe_write_phy_reg(hw, 366 hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG,
380 autoneg_speed_selection_register, 367 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
381 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
382 autoneg_reg);
383
384 368
385 /* Restart PHY autonegotiation and wait for completion */ 369 /* Restart PHY autonegotiation and wait for completion */
386 ixgbe_read_phy_reg(hw, 370 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
387 IXGBE_MDIO_AUTO_NEG_CONTROL, 371 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
388 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
389 &autoneg_reg);
390 372
391 autoneg_reg |= autoneg_restart_mask; 373 autoneg_reg |= IXGBE_MII_RESTART;
392 374
393 ixgbe_write_phy_reg(hw, 375 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
394 IXGBE_MDIO_AUTO_NEG_CONTROL, 376 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
395 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
396 autoneg_reg);
397 377
398 /* Wait for autonegotiation to finish */ 378 /* Wait for autonegotiation to finish */
399 for (time_out = 0; time_out < max_time_out; time_out++) { 379 for (time_out = 0; time_out < max_time_out; time_out++) {
400 udelay(10); 380 udelay(10);
401 /* Restart PHY autonegotiation and wait for completion */ 381 /* Restart PHY autonegotiation and wait for completion */
402 status = ixgbe_read_phy_reg(hw, 382 status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
403 IXGBE_MDIO_AUTO_NEG_STATUS, 383 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
404 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 384 &autoneg_reg);
405 &autoneg_reg);
406 385
407 autoneg_reg &= autoneg_complete_mask; 386 autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
408 if (autoneg_reg == autoneg_complete_mask) { 387 if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
409 status = 0; 388 status = 0;
410 break; 389 break;
411 } 390 }
@@ -418,64 +397,17 @@ s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw)
418} 397}
419 398
420/** 399/**
421 * ixgbe_check_tnx_phy_link - Determine link and speed status 400 * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
422 * @hw: pointer to hardware structure
423 *
424 * Reads the VS1 register to determine if link is up and the current speed for
425 * the PHY.
426 **/
427s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed,
428 bool *link_up)
429{
430 s32 status = 0;
431 u32 time_out;
432 u32 max_time_out = 10;
433 u16 phy_link = 0;
434 u16 phy_speed = 0;
435 u16 phy_data = 0;
436
437 /* Initialize speed and link to default case */
438 *link_up = false;
439 *speed = IXGBE_LINK_SPEED_10GB_FULL;
440
441 /*
442 * Check current speed and link status of the PHY register.
443 * This is a vendor specific register and may have to
444 * be changed for other copper PHYs.
445 */
446 for (time_out = 0; time_out < max_time_out; time_out++) {
447 udelay(10);
448 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
449 *link_up = true;
450 if (phy_speed ==
451 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
452 *speed = IXGBE_LINK_SPEED_1GB_FULL;
453 break;
454 } else {
455 status = ixgbe_read_phy_reg(hw,
456 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
457 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
458 &phy_data);
459 phy_link = phy_data &
460 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
461 phy_speed = phy_data &
462 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
463 }
464 }
465
466 return status;
467}
468
469/**
470 * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities
471 * @hw: pointer to hardware structure 401 * @hw: pointer to hardware structure
472 * @speed: new link speed 402 * @speed: new link speed
473 * @autoneg: true if autonegotiation enabled 403 * @autoneg: true if autonegotiation enabled
474 **/ 404 **/
475s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, 405s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
476 bool autoneg, 406 ixgbe_link_speed speed,
477 bool autoneg_wait_to_complete) 407 bool autoneg,
408 bool autoneg_wait_to_complete)
478{ 409{
410
479 /* 411 /*
480 * Clear autoneg_advertised and set new values based on input link 412 * Clear autoneg_advertised and set new values based on input link
481 * speed. 413 * speed.
@@ -484,11 +416,13 @@ s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed,
484 416
485 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 417 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
486 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 418 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
419
487 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 420 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
488 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 421 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
489 422
490 /* Setup link based on the new speed settings */ 423 /* Setup link based on the new speed settings */
491 ixgbe_setup_tnx_phy_link(hw); 424 hw->phy.ops.setup_link(hw);
492 425
493 return 0; 426 return 0;
494} 427}
428
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index aa3ea72e678e..9bfe3f2b1d8f 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -30,20 +29,52 @@
30#define _IXGBE_PHY_H_ 29#define _IXGBE_PHY_H_
31 30
32#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
33 33
34s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); 34/* EEPROM byte offsets */
35s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); 35#define IXGBE_SFF_IDENTIFIER 0x0
36s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, 36#define IXGBE_SFF_IDENTIFIER_SFP 0x3
37 bool autoneg_wait_to_complete); 37#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
38s32 ixgbe_identify_phy(struct ixgbe_hw *hw); 38#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
39s32 ixgbe_reset_phy(struct ixgbe_hw *hw); 39#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
40s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 40#define IXGBE_SFF_1GBE_COMP_CODES 0x6
41 u32 device_type, u16 *phy_data); 41#define IXGBE_SFF_10GBE_COMP_CODES 0x3
42 42#define IXGBE_SFF_TRANSMISSION_MEDIA 0x9
43/* PHY specific */ 43
44s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw); 44/* Bitmasks */
45s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); 45#define IXGBE_SFF_TWIN_AX_CAPABLE 0x80
46s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, 46#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
47 bool autoneg_wait_to_complete); 47#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
48#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
49#define IXGBE_I2C_EEPROM_READ_MASK 0x100
50#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
51#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
52#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
53#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
54#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
55
56/* Bit-shift macros */
57#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12
58#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8
59#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4
60
61/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
62#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600
63#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
64#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
65
66
67s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
68s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
69s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
70s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
71 u32 device_type, u16 *phy_data);
72s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
73 u32 device_type, u16 phy_data);
74s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
75s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
76 ixgbe_link_speed speed,
77 bool autoneg,
78 bool autoneg_wait_to_complete);
48 79
49#endif /* _IXGBE_PHY_H_ */ 80#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index c0282a223df3..c6f8fa1c4e59 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,7 +20,6 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 25
@@ -37,9 +36,9 @@
37/* Device IDs */ 36/* Device IDs */
38#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 37#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
39#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 38#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
40#define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8
41#define IXGBE_DEV_ID_82598EB_CX4 0x10DD 39#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
42#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC 40#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
41#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
43 42
44/* General Registers */ 43/* General Registers */
45#define IXGBE_CTRL 0x00000 44#define IXGBE_CTRL 0x00000
@@ -70,11 +69,11 @@
70#define IXGBE_EIMC 0x00888 69#define IXGBE_EIMC 0x00888
71#define IXGBE_EIAC 0x00810 70#define IXGBE_EIAC 0x00810
72#define IXGBE_EIAM 0x00890 71#define IXGBE_EIAM 0x00890
73#define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */ 72#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4)))
74#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ 73#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
75#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ 74#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */
76#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ 75#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */
77#define IXGBE_PBACL 0x11068 76#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
78#define IXGBE_GPIE 0x00898 77#define IXGBE_GPIE 0x00898
79 78
80/* Flow Control Registers */ 79/* Flow Control Registers */
@@ -86,20 +85,33 @@
86#define IXGBE_TFCS 0x0CE00 85#define IXGBE_TFCS 0x0CE00
87 86
88/* Receive DMA Registers */ 87/* Receive DMA Registers */
89#define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/ 88#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40)))
90#define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40)) 89#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40)))
91#define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40)) 90#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40)))
92#define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40)) 91#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40)))
93#define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40)) 92#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40)))
94#define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40)) 93#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40)))
95#define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40)) 94/*
96#define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4)) 95 * Split and Replication Receive Control Registers
97 /* array of 16 (0x02100-0x0213C) */ 96 * 00-15 : 0x02100 + n*4
98#define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4)) 97 * 16-64 : 0x01014 + n*0x40
99 /* array of 16 (0x02200-0x0223C) */ 98 * 64-127: 0x0D014 + (n-64)*0x40
100#define IXGBE_RDRXCTL 0x02F00 99 */
100#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
101 (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
102 (0x0D014 + ((_i - 64) * 0x40))))
103/*
104 * Rx DCA Control Register:
105 * 00-15 : 0x02200 + n*4
106 * 16-64 : 0x0100C + n*0x40
107 * 64-127: 0x0D00C + (n-64)*0x40
108 */
109#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
110 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
111 (0x0D00C + ((_i - 64) * 0x40))))
112#define IXGBE_RDRXCTL 0x02F00
101#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 113#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
102 /* 8 of these 0x03C00 - 0x03C1C */ 114 /* 8 of these 0x03C00 - 0x03C1C */
103#define IXGBE_RXCTRL 0x03000 115#define IXGBE_RXCTRL 0x03000
104#define IXGBE_DROPEN 0x03D04 116#define IXGBE_DROPEN 0x03D04
105#define IXGBE_RXPBSIZE_SHIFT 10 117#define IXGBE_RXPBSIZE_SHIFT 10
@@ -107,29 +119,32 @@
107/* Receive Registers */ 119/* Receive Registers */
108#define IXGBE_RXCSUM 0x05000 120#define IXGBE_RXCSUM 0x05000
109#define IXGBE_RFCTL 0x05008 121#define IXGBE_RFCTL 0x05008
122#define IXGBE_DRECCCTL 0x02F08
123#define IXGBE_DRECCCTL_DISABLE 0
124/* Multicast Table Array - 128 entries */
110#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) 125#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
111 /* Multicast Table Array - 128 entries */ 126#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8)))
112#define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */ 127#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8)))
113#define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */ 128/* Packet split receive type */
114#define IXGBE_PSRTYPE 0x05480 129#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4)))
115 /* 0x5480-0x54BC Packet split receive type */ 130/* array of 4096 1-bit vlan filters */
116#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) 131#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
117 /* array of 4096 1-bit vlan filters */ 132/*array of 4096 4-bit vlan vmdq indices */
118#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) 133#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
119 /*array of 4096 4-bit vlan vmdq indicies */
120#define IXGBE_FCTRL 0x05080 134#define IXGBE_FCTRL 0x05080
121#define IXGBE_VLNCTRL 0x05088 135#define IXGBE_VLNCTRL 0x05088
122#define IXGBE_MCSTCTRL 0x05090 136#define IXGBE_MCSTCTRL 0x05090
123#define IXGBE_MRQC 0x05818 137#define IXGBE_MRQC 0x05818
124#define IXGBE_VMD_CTL 0x0581C
125#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ 138#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
126#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ 139#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
127#define IXGBE_IMIRVP 0x05AC0 140#define IXGBE_IMIRVP 0x05AC0
141#define IXGBE_VMD_CTL 0x0581C
128#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ 142#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */
129#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ 143#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */
130 144
145
131/* Transmit DMA registers */ 146/* Transmit DMA registers */
132#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/ 147#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
133#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) 148#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
134#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) 149#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
135#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) 150#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40))
@@ -138,11 +153,10 @@
138#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) 153#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
139#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) 154#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
140#define IXGBE_DTXCTL 0x07E00 155#define IXGBE_DTXCTL 0x07E00
141#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) 156
142 /* there are 16 of these (0-15) */ 157#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
143#define IXGBE_TIPG 0x0CB00 158#define IXGBE_TIPG 0x0CB00
144#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) 159#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */
145 /* there are 8 of these */
146#define IXGBE_MNGTXMAP 0x0CD10 160#define IXGBE_MNGTXMAP 0x0CD10
147#define IXGBE_TIPG_FIBER_DEFAULT 3 161#define IXGBE_TIPG_FIBER_DEFAULT 3
148#define IXGBE_TXPBSIZE_SHIFT 10 162#define IXGBE_TXPBSIZE_SHIFT 10
@@ -154,6 +168,7 @@
154#define IXGBE_IPAV 0x05838 168#define IXGBE_IPAV 0x05838
155#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ 169#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */
156#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ 170#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */
171
157#define IXGBE_WUPL 0x05900 172#define IXGBE_WUPL 0x05900
158#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ 173#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
159#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */ 174#define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */
@@ -170,6 +185,8 @@
170#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ 185#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
171#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ 186#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
172 187
188
189
173/* Stats registers */ 190/* Stats registers */
174#define IXGBE_CRCERRS 0x04000 191#define IXGBE_CRCERRS 0x04000
175#define IXGBE_ILLERRC 0x04004 192#define IXGBE_ILLERRC 0x04004
@@ -224,7 +241,7 @@
224#define IXGBE_XEC 0x04120 241#define IXGBE_XEC 0x04120
225 242
226#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ 243#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */
227#define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */ 244#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4)))
228 245
229#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ 246#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
230#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ 247#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
@@ -275,23 +292,17 @@
275#define IXGBE_DCA_CTRL 0x11074 292#define IXGBE_DCA_CTRL 0x11074
276 293
277/* Diagnostic Registers */ 294/* Diagnostic Registers */
278#define IXGBE_RDSTATCTL 0x02C20 295#define IXGBE_RDSTATCTL 0x02C20
279#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ 296#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
280#define IXGBE_RDHMPN 0x02F08 297#define IXGBE_RDHMPN 0x02F08
281#define IXGBE_RIC_DW0 0x02F10 298#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4))
282#define IXGBE_RIC_DW1 0x02F14 299#define IXGBE_RDPROBE 0x02F20
283#define IXGBE_RIC_DW2 0x02F18 300#define IXGBE_TDSTATCTL 0x07C20
284#define IXGBE_RIC_DW3 0x02F1C 301#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
285#define IXGBE_RDPROBE 0x02F20 302#define IXGBE_TDHMPN 0x07F08
286#define IXGBE_TDSTATCTL 0x07C20 303#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4))
287#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ 304#define IXGBE_TDPROBE 0x07F20
288#define IXGBE_TDHMPN 0x07F08 305#define IXGBE_TXBUFCTRL 0x0C600
289#define IXGBE_TIC_DW0 0x07F10
290#define IXGBE_TIC_DW1 0x07F14
291#define IXGBE_TIC_DW2 0x07F18
292#define IXGBE_TIC_DW3 0x07F1C
293#define IXGBE_TDPROBE 0x07F20
294#define IXGBE_TXBUFCTRL 0x0C600
295#define IXGBE_TXBUFDATA0 0x0C610 306#define IXGBE_TXBUFDATA0 0x0C610
296#define IXGBE_TXBUFDATA1 0x0C614 307#define IXGBE_TXBUFDATA1 0x0C614
297#define IXGBE_TXBUFDATA2 0x0C618 308#define IXGBE_TXBUFDATA2 0x0C618
@@ -356,12 +367,10 @@
356#define IXGBE_ANLP2 0x042B4 367#define IXGBE_ANLP2 0x042B4
357#define IXGBE_ATLASCTL 0x04800 368#define IXGBE_ATLASCTL 0x04800
358 369
359/* RSCCTL Bit Masks */ 370/* RDRXCTL Bit Masks */
360#define IXGBE_RSCCTL_RSCEN 0x01 371#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
361#define IXGBE_RSCCTL_MAXDESC_1 0x00 372#define IXGBE_RDRXCTL_MVMEN 0x00000020
362#define IXGBE_RSCCTL_MAXDESC_4 0x04 373#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
363#define IXGBE_RSCCTL_MAXDESC_8 0x08
364#define IXGBE_RSCCTL_MAXDESC_16 0x0C
365 374
366/* CTRL Bit Masks */ 375/* CTRL Bit Masks */
367#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ 376#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
@@ -394,7 +403,7 @@
394 403
395#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 404#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
396#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ 405#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
397#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ 406#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
398#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ 407#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
399 408
400/* MSCA Bit Masks */ 409/* MSCA Bit Masks */
@@ -418,10 +427,10 @@
418#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ 427#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */
419 428
420/* MSRWD bit masks */ 429/* MSRWD bit masks */
421#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF 430#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF
422#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 431#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0
423#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 432#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000
424#define IXGBE_MSRWD_READ_DATA_SHIFT 16 433#define IXGBE_MSRWD_READ_DATA_SHIFT 16
425 434
426/* Atlas registers */ 435/* Atlas registers */
427#define IXGBE_ATLAS_PDN_LPBK 0x24 436#define IXGBE_ATLAS_PDN_LPBK 0x24
@@ -436,6 +445,7 @@
436#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 445#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0
437#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 446#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0
438 447
448
439/* Device Type definitions for new protocol MDIO commands */ 449/* Device Type definitions for new protocol MDIO commands */
440#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 450#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
441#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 451#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
@@ -443,6 +453,8 @@
443#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 453#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
444#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ 454#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
445 455
456#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
457
446#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ 458#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */
447#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ 459#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */
448#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ 460#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */
@@ -456,23 +468,39 @@
456#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ 468#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */
457#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ 469#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/
458#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ 470#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/
459#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */ 471#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */
460#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ 472#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */
461#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ 473#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */
462 474
475#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Address Reg */
476#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
477#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
478
479/* MII clause 22/28 definitions */
480#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800
481
482#define IXGBE_MII_SPEED_SELECTION_REG 0x10
483#define IXGBE_MII_RESTART 0x200
484#define IXGBE_MII_AUTONEG_COMPLETE 0x20
485#define IXGBE_MII_AUTONEG_REG 0x0
486
463#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 487#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
464#define IXGBE_MAX_PHY_ADDR 32 488#define IXGBE_MAX_PHY_ADDR 32
465 489
466/* PHY IDs*/ 490/* PHY IDs*/
467#define TN1010_PHY_ID 0x00A19410
468#define QT2022_PHY_ID 0x0043A400 491#define QT2022_PHY_ID 0x0043A400
469 492
493/* PHY Types */
494#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
495
470/* General purpose Interrupt Enable */ 496/* General purpose Interrupt Enable */
471#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ 497#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
472#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ 498#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
473#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ 499#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */
474#define IXGBE_GPIE_EIAME 0x40000000 500#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */
475#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 501#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
502#define IXGBE_GPIE_EIAME 0x40000000
503#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
476 504
477/* Transmit Flow Control status */ 505/* Transmit Flow Control status */
478#define IXGBE_TFCS_TXOFF 0x00000001 506#define IXGBE_TFCS_TXOFF 0x00000001
@@ -533,7 +561,7 @@
533#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ 561#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */
534 562
535/* RMCS Bit Masks */ 563/* RMCS Bit Masks */
536#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */ 564#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */
537/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ 565/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
538#define IXGBE_RMCS_RAC 0x00000004 566#define IXGBE_RMCS_RAC 0x00000004
539#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ 567#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
@@ -541,12 +569,15 @@
541#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ 569#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */
542#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ 570#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */
543 571
572
544/* Interrupt register bitmasks */ 573/* Interrupt register bitmasks */
545 574
546/* Extended Interrupt Cause Read */ 575/* Extended Interrupt Cause Read */
547#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ 576#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
548#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ 577#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
549#define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */ 578#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
579#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */
580#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */
550#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ 581#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */
551#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ 582#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */
552#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ 583#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */
@@ -554,11 +585,12 @@
554 585
555/* Extended Interrupt Cause Set */ 586/* Extended Interrupt Cause Set */
556#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 587#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
557#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ 588#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
558#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ 589#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
559#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 590#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
560#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 591#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
561#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ 592#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
593#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */
562#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 594#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
563#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 595#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
564 596
@@ -566,7 +598,9 @@
566#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 598#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
567#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ 599#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
568#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 600#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
569#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 601#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
602#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
603#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
570#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ 604#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */
571#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 605#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
572#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 606#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
@@ -575,18 +609,20 @@
575#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 609#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
576#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ 610#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
577#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 611#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
578#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 612#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
579#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */ 613#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */
614#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */
615#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */
580#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 616#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */
581#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 617#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
582 618
583#define IXGBE_EIMS_ENABLE_MASK (\ 619#define IXGBE_EIMS_ENABLE_MASK ( \
584 IXGBE_EIMS_RTX_QUEUE | \ 620 IXGBE_EIMS_RTX_QUEUE | \
585 IXGBE_EIMS_LSC | \ 621 IXGBE_EIMS_LSC | \
586 IXGBE_EIMS_TCP_TIMER | \ 622 IXGBE_EIMS_TCP_TIMER | \
587 IXGBE_EIMS_OTHER) 623 IXGBE_EIMS_OTHER)
588 624
589/* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ 625/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
590#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ 626#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
591#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ 627#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */
592#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ 628#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -623,6 +659,7 @@
623#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ 659#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */
624#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ 660#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */
625 661
662
626#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 663#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
627 664
628/* STATUS Bit Masks */ 665/* STATUS Bit Masks */
@@ -670,16 +707,16 @@
670#define IXGBE_AUTOC_AN_RESTART 0x00001000 707#define IXGBE_AUTOC_AN_RESTART 0x00001000
671#define IXGBE_AUTOC_FLU 0x00000001 708#define IXGBE_AUTOC_FLU 0x00000001
672#define IXGBE_AUTOC_LMS_SHIFT 13 709#define IXGBE_AUTOC_LMS_SHIFT 13
673#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) 710#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT)
674#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) 711#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT)
675#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) 712#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT)
676#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) 713#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT)
677#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) 714#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT)
678#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) 715#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
679#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) 716#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
680 717
681#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 718#define IXGBE_AUTOC_1G_PMA_PMD 0x00000200
682#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 719#define IXGBE_AUTOC_10G_PMA_PMD 0x00000180
683#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 720#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
684#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 721#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
685#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) 722#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
@@ -705,6 +742,7 @@
705#define IXGBE_LINKS_TL_FAULT 0x00001000 742#define IXGBE_LINKS_TL_FAULT 0x00001000
706#define IXGBE_LINKS_SIGNAL 0x00000F00 743#define IXGBE_LINKS_SIGNAL 0x00000F00
707 744
745#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
708#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 746#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
709 747
710/* SW Semaphore Register bitmasks */ 748/* SW Semaphore Register bitmasks */
@@ -759,6 +797,11 @@
759#define IXGBE_PBANUM0_PTR 0x15 797#define IXGBE_PBANUM0_PTR 0x15
760#define IXGBE_PBANUM1_PTR 0x16 798#define IXGBE_PBANUM1_PTR 0x16
761 799
800/* Legacy EEPROM word offsets */
801#define IXGBE_ISCSI_BOOT_CAPS 0x0033
802#define IXGBE_ISCSI_SETUP_PORT_0 0x0030
803#define IXGBE_ISCSI_SETUP_PORT_1 0x0034
804
762/* EEPROM Commands - SPI */ 805/* EEPROM Commands - SPI */
763#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ 806#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */
764#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 807#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01
@@ -766,7 +809,7 @@
766#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ 809#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
767#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ 810#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */
768#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ 811#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */
769/* EEPROM reset Write Enbale latch */ 812/* EEPROM reset Write Enable latch */
770#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 813#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04
771#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ 814#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */
772#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ 815#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */
@@ -805,26 +848,20 @@
805/* Number of 100 microseconds we wait for PCI Express master disable */ 848/* Number of 100 microseconds we wait for PCI Express master disable */
806#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 849#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
807 850
808/* PHY Types */
809#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
810
811/* Check whether address is multicast. This is little-endian specific check.*/ 851/* Check whether address is multicast. This is little-endian specific check.*/
812#define IXGBE_IS_MULTICAST(Address) \ 852#define IXGBE_IS_MULTICAST(Address) \
813 (bool)(((u8 *)(Address))[0] & ((u8)0x01)) 853 (bool)(((u8 *)(Address))[0] & ((u8)0x01))
814 854
815/* Check whether an address is broadcast. */ 855/* Check whether an address is broadcast. */
816#define IXGBE_IS_BROADCAST(Address) \ 856#define IXGBE_IS_BROADCAST(Address) \
817 ((((u8 *)(Address))[0] == ((u8)0xff)) && \ 857 ((((u8 *)(Address))[0] == ((u8)0xff)) && \
818 (((u8 *)(Address))[1] == ((u8)0xff))) 858 (((u8 *)(Address))[1] == ((u8)0xff)))
819 859
820/* RAH */ 860/* RAH */
821#define IXGBE_RAH_VIND_MASK 0x003C0000 861#define IXGBE_RAH_VIND_MASK 0x003C0000
822#define IXGBE_RAH_VIND_SHIFT 18 862#define IXGBE_RAH_VIND_SHIFT 18
823#define IXGBE_RAH_AV 0x80000000 863#define IXGBE_RAH_AV 0x80000000
824 864#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF
825/* Filters */
826#define IXGBE_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
827#define IXGBE_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
828 865
829/* Header split receive */ 866/* Header split receive */
830#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 867#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
@@ -853,7 +890,7 @@
853#define IXGBE_MAX_FRAME_SZ 0x40040000 890#define IXGBE_MAX_FRAME_SZ 0x40040000
854 891
855#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ 892#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */
856#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */ 893#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */
857 894
858/* Receive Config masks */ 895/* Receive Config masks */
859#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ 896#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
@@ -866,7 +903,7 @@
866#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ 903#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
867#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ 904#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
868#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ 905#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
869/* Receive Priority Flow Control Enbale */ 906/* Receive Priority Flow Control Enable */
870#define IXGBE_FCTRL_RPFCE 0x00004000 907#define IXGBE_FCTRL_RPFCE 0x00004000
871#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ 908#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
872 909
@@ -896,9 +933,8 @@
896/* Receive Descriptor bit definitions */ 933/* Receive Descriptor bit definitions */
897#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ 934#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
898#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ 935#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
899#define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */
900#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 936#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
901#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 937#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
902#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ 938#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
903#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 939#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
904#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ 940#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
@@ -914,7 +950,7 @@
914#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ 950#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
915#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ 951#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
916#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ 952#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
917#define IXGBE_RXDADV_HBO 0x00800000 953#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
918#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ 954#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
919#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ 955#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
920#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ 956#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
@@ -928,15 +964,17 @@
928#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ 964#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
929#define IXGBE_RXD_CFI_SHIFT 12 965#define IXGBE_RXD_CFI_SHIFT 12
930 966
967
931/* SRRCTL bit definitions */ 968/* SRRCTL bit definitions */
932#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ 969#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
933#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F 970#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
934#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 971#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
935#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 972#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
936#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 973#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
937#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 974#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
938#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 975#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
939#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 976#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
977#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
940 978
941#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 979#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000
942#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF 980#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
@@ -970,21 +1008,20 @@
970#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ 1008#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
971#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ 1009#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
972#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ 1010#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
973
974/* Masks to determine if packets should be dropped due to frame errors */ 1011/* Masks to determine if packets should be dropped due to frame errors */
975#define IXGBE_RXD_ERR_FRAME_ERR_MASK (\ 1012#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
976 IXGBE_RXD_ERR_CE | \ 1013 IXGBE_RXD_ERR_CE | \
977 IXGBE_RXD_ERR_LE | \ 1014 IXGBE_RXD_ERR_LE | \
978 IXGBE_RXD_ERR_PE | \ 1015 IXGBE_RXD_ERR_PE | \
979 IXGBE_RXD_ERR_OSE | \ 1016 IXGBE_RXD_ERR_OSE | \
980 IXGBE_RXD_ERR_USE) 1017 IXGBE_RXD_ERR_USE)
981 1018
982#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\ 1019#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
983 IXGBE_RXDADV_ERR_CE | \ 1020 IXGBE_RXDADV_ERR_CE | \
984 IXGBE_RXDADV_ERR_LE | \ 1021 IXGBE_RXDADV_ERR_LE | \
985 IXGBE_RXDADV_ERR_PE | \ 1022 IXGBE_RXDADV_ERR_PE | \
986 IXGBE_RXDADV_ERR_OSE | \ 1023 IXGBE_RXDADV_ERR_OSE | \
987 IXGBE_RXDADV_ERR_USE) 1024 IXGBE_RXDADV_ERR_USE)
988 1025
989/* Multicast bit mask */ 1026/* Multicast bit mask */
990#define IXGBE_MCSTCTRL_MFE 0x4 1027#define IXGBE_MCSTCTRL_MFE 0x4
@@ -1000,6 +1037,7 @@
1000#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ 1037#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
1001#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 1038#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
1002 1039
1040
1003/* Transmit Descriptor - Legacy */ 1041/* Transmit Descriptor - Legacy */
1004struct ixgbe_legacy_tx_desc { 1042struct ixgbe_legacy_tx_desc {
1005 u64 buffer_addr; /* Address of the descriptor's data buffer */ 1043 u64 buffer_addr; /* Address of the descriptor's data buffer */
@@ -1007,15 +1045,15 @@ struct ixgbe_legacy_tx_desc {
1007 __le32 data; 1045 __le32 data;
1008 struct { 1046 struct {
1009 __le16 length; /* Data buffer length */ 1047 __le16 length; /* Data buffer length */
1010 u8 cso; /* Checksum offset */ 1048 u8 cso; /* Checksum offset */
1011 u8 cmd; /* Descriptor control */ 1049 u8 cmd; /* Descriptor control */
1012 } flags; 1050 } flags;
1013 } lower; 1051 } lower;
1014 union { 1052 union {
1015 __le32 data; 1053 __le32 data;
1016 struct { 1054 struct {
1017 u8 status; /* Descriptor status */ 1055 u8 status; /* Descriptor status */
1018 u8 css; /* Checksum start */ 1056 u8 css; /* Checksum start */
1019 __le16 vlan; 1057 __le16 vlan;
1020 } fields; 1058 } fields;
1021 } upper; 1059 } upper;
@@ -1024,7 +1062,7 @@ struct ixgbe_legacy_tx_desc {
1024/* Transmit Descriptor - Advanced */ 1062/* Transmit Descriptor - Advanced */
1025union ixgbe_adv_tx_desc { 1063union ixgbe_adv_tx_desc {
1026 struct { 1064 struct {
1027 __le64 buffer_addr; /* Address of descriptor's data buf */ 1065 __le64 buffer_addr; /* Address of descriptor's data buf */
1028 __le32 cmd_type_len; 1066 __le32 cmd_type_len;
1029 __le32 olinfo_status; 1067 __le32 olinfo_status;
1030 } read; 1068 } read;
@@ -1039,9 +1077,9 @@ union ixgbe_adv_tx_desc {
1039struct ixgbe_legacy_rx_desc { 1077struct ixgbe_legacy_rx_desc {
1040 __le64 buffer_addr; /* Address of the descriptor's data buffer */ 1078 __le64 buffer_addr; /* Address of the descriptor's data buffer */
1041 __le16 length; /* Length of data DMAed into data buffer */ 1079 __le16 length; /* Length of data DMAed into data buffer */
1042 u16 csum; /* Packet checksum */ 1080 __le16 csum; /* Packet checksum */
1043 u8 status; /* Descriptor status */ 1081 u8 status; /* Descriptor status */
1044 u8 errors; /* Descriptor Errors */ 1082 u8 errors; /* Descriptor Errors */
1045 __le16 vlan; 1083 __le16 vlan;
1046}; 1084};
1047 1085
@@ -1053,15 +1091,18 @@ union ixgbe_adv_rx_desc {
1053 } read; 1091 } read;
1054 struct { 1092 struct {
1055 struct { 1093 struct {
1056 struct { 1094 union {
1057 __le16 pkt_info; /* RSS type, Packet type */ 1095 __le32 data;
1058 __le16 hdr_info; /* Split Header, header len */ 1096 struct {
1097 __le16 pkt_info; /* RSS, Pkt type */
1098 __le16 hdr_info; /* Splithdr, hdrlen */
1099 } hs_rss;
1059 } lo_dword; 1100 } lo_dword;
1060 union { 1101 union {
1061 __le32 rss; /* RSS Hash */ 1102 __le32 rss; /* RSS Hash */
1062 struct { 1103 struct {
1063 __le16 ip_id; /* IP id */ 1104 __le16 ip_id; /* IP id */
1064 u16 csum; /* Packet Checksum */ 1105 __le16 csum; /* Packet Checksum */
1065 } csum_ip; 1106 } csum_ip;
1066 } hi_dword; 1107 } hi_dword;
1067 } lower; 1108 } lower;
@@ -1082,49 +1123,69 @@ struct ixgbe_adv_tx_context_desc {
1082}; 1123};
1083 1124
1084/* Adv Transmit Descriptor Config Masks */ 1125/* Adv Transmit Descriptor Config Masks */
1085#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */ 1126#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */
1086#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ 1127#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
1087#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ 1128#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
1088#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ 1129#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
1089#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ 1130#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
1090#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ 1131#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
1091#define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */
1092#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ 1132#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
1093#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ 1133#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */
1094#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ 1134#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
1095#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ 1135#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
1096#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ 1136#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
1097#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ 1137#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
1098#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ 1138#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */
1099#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ 1139#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */
1100#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ 1140#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
1141#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
1101#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ 1142#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
1102#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ 1143#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
1103 IXGBE_ADVTXD_POPTS_SHIFT) 1144 IXGBE_ADVTXD_POPTS_SHIFT)
1104#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ 1145#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
1105 IXGBE_ADVTXD_POPTS_SHIFT) 1146 IXGBE_ADVTXD_POPTS_SHIFT)
1106#define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */ 1147#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
1107#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 1148#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
1108#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 1149#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
1109#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ 1150#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
1110#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ 1151#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
1111#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ 1152#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
1112#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ 1153#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
1113#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ 1154#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
1114#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ 1155#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
1115#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ 1156#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
1116#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ 1157#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
1117#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ 1158#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
1118#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ 1159#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
1119#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ 1160#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/
1120#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ 1161#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
1121#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ 1162#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
1122 1163
1164/* Autonegotiation advertised speeds */
1165typedef u32 ixgbe_autoneg_advertised;
1123/* Link speed */ 1166/* Link speed */
1167typedef u32 ixgbe_link_speed;
1124#define IXGBE_LINK_SPEED_UNKNOWN 0 1168#define IXGBE_LINK_SPEED_UNKNOWN 0
1125#define IXGBE_LINK_SPEED_100_FULL 0x0008 1169#define IXGBE_LINK_SPEED_100_FULL 0x0008
1126#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 1170#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
1127#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 1171#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
1172#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
1173 IXGBE_LINK_SPEED_10GB_FULL)
1174
1175/* Physical layer type */
1176typedef u32 ixgbe_physical_layer;
1177#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
1178#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
1179#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
1180#define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004
1181#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
1182#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
1183#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
1184#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
1185#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
1186#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
1187#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
1188#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
1128 1189
1129 1190
1130enum ixgbe_eeprom_type { 1191enum ixgbe_eeprom_type {
@@ -1141,16 +1202,38 @@ enum ixgbe_mac_type {
1141 1202
1142enum ixgbe_phy_type { 1203enum ixgbe_phy_type {
1143 ixgbe_phy_unknown = 0, 1204 ixgbe_phy_unknown = 0,
1144 ixgbe_phy_tn,
1145 ixgbe_phy_qt, 1205 ixgbe_phy_qt,
1146 ixgbe_phy_xaui 1206 ixgbe_phy_xaui,
1207 ixgbe_phy_tw_tyco,
1208 ixgbe_phy_tw_unknown,
1209 ixgbe_phy_sfp_avago,
1210 ixgbe_phy_sfp_ftl,
1211 ixgbe_phy_sfp_unknown,
1212 ixgbe_phy_generic
1213};
1214
1215/*
1216 * SFP+ module type IDs:
1217 *
1218 * ID Module Type
1219 * =============
1220 * 0 SFP_DA_CU
1221 * 1 SFP_SR
1222 * 2 SFP_LR
1223 */
1224enum ixgbe_sfp_type {
1225 ixgbe_sfp_type_da_cu = 0,
1226 ixgbe_sfp_type_sr = 1,
1227 ixgbe_sfp_type_lr = 2,
1228 ixgbe_sfp_type_unknown = 0xFFFF
1147}; 1229};
1148 1230
1149enum ixgbe_media_type { 1231enum ixgbe_media_type {
1150 ixgbe_media_type_unknown = 0, 1232 ixgbe_media_type_unknown = 0,
1151 ixgbe_media_type_fiber, 1233 ixgbe_media_type_fiber,
1152 ixgbe_media_type_copper, 1234 ixgbe_media_type_copper,
1153 ixgbe_media_type_backplane 1235 ixgbe_media_type_backplane,
1236 ixgbe_media_type_virtual
1154}; 1237};
1155 1238
1156/* Flow Control Settings */ 1239/* Flow Control Settings */
@@ -1167,6 +1250,8 @@ struct ixgbe_addr_filter_info {
1167 u32 rar_used_count; 1250 u32 rar_used_count;
1168 u32 mc_addr_in_rar_count; 1251 u32 mc_addr_in_rar_count;
1169 u32 mta_in_use; 1252 u32 mta_in_use;
1253 u32 overflow_promisc;
1254 bool user_set_promisc;
1170}; 1255};
1171 1256
1172/* Flow control parameters */ 1257/* Flow control parameters */
@@ -1242,57 +1327,118 @@ struct ixgbe_hw_stats {
1242/* forward declaration */ 1327/* forward declaration */
1243struct ixgbe_hw; 1328struct ixgbe_hw;
1244 1329
1330/* iterator type for walking multicast address lists */
1331typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1332 u32 *vmdq);
1333
1334/* Function pointer table */
1335struct ixgbe_eeprom_operations {
1336 s32 (*init_params)(struct ixgbe_hw *);
1337 s32 (*read)(struct ixgbe_hw *, u16, u16 *);
1338 s32 (*write)(struct ixgbe_hw *, u16, u16);
1339 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
1340 s32 (*update_checksum)(struct ixgbe_hw *);
1341};
1342
1245struct ixgbe_mac_operations { 1343struct ixgbe_mac_operations {
1246 s32 (*reset)(struct ixgbe_hw *); 1344 s32 (*init_hw)(struct ixgbe_hw *);
1345 s32 (*reset_hw)(struct ixgbe_hw *);
1346 s32 (*start_hw)(struct ixgbe_hw *);
1347 s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
1247 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); 1348 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
1349 s32 (*get_supported_physical_layer)(struct ixgbe_hw *);
1350 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
1351 s32 (*stop_adapter)(struct ixgbe_hw *);
1352 s32 (*get_bus_info)(struct ixgbe_hw *);
1353 s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
1354 s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
1355
1356 /* Link */
1248 s32 (*setup_link)(struct ixgbe_hw *); 1357 s32 (*setup_link)(struct ixgbe_hw *);
1249 s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); 1358 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
1250 s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); 1359 bool);
1251 s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *); 1360 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
1361 s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
1362 bool *);
1363
1364 /* LED */
1365 s32 (*led_on)(struct ixgbe_hw *, u32);
1366 s32 (*led_off)(struct ixgbe_hw *, u32);
1367 s32 (*blink_led_start)(struct ixgbe_hw *, u32);
1368 s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
1369
1370 /* RAR, Multicast, VLAN */
1371 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
1372 s32 (*clear_rar)(struct ixgbe_hw *, u32);
1373 s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
1374 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
1375 s32 (*init_rx_addrs)(struct ixgbe_hw *);
1376 s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
1377 ixgbe_mc_addr_itr);
1378 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
1379 ixgbe_mc_addr_itr);
1380 s32 (*enable_mc)(struct ixgbe_hw *);
1381 s32 (*disable_mc)(struct ixgbe_hw *);
1382 s32 (*clear_vfta)(struct ixgbe_hw *);
1383 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
1384 s32 (*init_uta_tables)(struct ixgbe_hw *);
1385
1386 /* Flow Control */
1387 s32 (*setup_fc)(struct ixgbe_hw *, s32);
1252}; 1388};
1253 1389
1254struct ixgbe_phy_operations { 1390struct ixgbe_phy_operations {
1391 s32 (*identify)(struct ixgbe_hw *);
1392 s32 (*identify_sfp)(struct ixgbe_hw *);
1393 s32 (*reset)(struct ixgbe_hw *);
1394 s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
1395 s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
1255 s32 (*setup_link)(struct ixgbe_hw *); 1396 s32 (*setup_link)(struct ixgbe_hw *);
1256 s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); 1397 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
1257 s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); 1398 bool);
1258}; 1399 s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
1259 1400 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
1260struct ixgbe_mac_info { 1401 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
1261 struct ixgbe_mac_operations ops; 1402 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
1262 enum ixgbe_mac_type type;
1263 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1264 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1265 s32 mc_filter_type;
1266 u32 num_rx_queues;
1267 u32 num_tx_queues;
1268 u32 num_rx_addrs;
1269 u32 link_attach_type;
1270 u32 link_mode_select;
1271 bool link_settings_loaded;
1272}; 1403};
1273 1404
1274struct ixgbe_eeprom_info { 1405struct ixgbe_eeprom_info {
1275 enum ixgbe_eeprom_type type; 1406 struct ixgbe_eeprom_operations ops;
1276 u16 word_size; 1407 enum ixgbe_eeprom_type type;
1277 u16 address_bits; 1408 u32 semaphore_delay;
1409 u16 word_size;
1410 u16 address_bits;
1278}; 1411};
1279 1412
1280struct ixgbe_phy_info { 1413struct ixgbe_mac_info {
1281 struct ixgbe_phy_operations ops; 1414 struct ixgbe_mac_operations ops;
1282 1415 enum ixgbe_mac_type type;
1283 enum ixgbe_phy_type type; 1416 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1284 u32 addr; 1417 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
1285 u32 id; 1418 s32 mc_filter_type;
1286 u32 revision; 1419 u32 mcft_size;
1287 enum ixgbe_media_type media_type; 1420 u32 vft_size;
1288 u32 autoneg_advertised; 1421 u32 num_rar_entries;
1289 bool autoneg_wait_to_complete; 1422 u32 max_tx_queues;
1423 u32 max_rx_queues;
1424 u32 link_attach_type;
1425 u32 link_mode_select;
1426 bool link_settings_loaded;
1427 bool autoneg;
1428 bool autoneg_failed;
1290}; 1429};
1291 1430
1292struct ixgbe_info { 1431struct ixgbe_phy_info {
1293 enum ixgbe_mac_type mac; 1432 struct ixgbe_phy_operations ops;
1294 s32 (*get_invariants)(struct ixgbe_hw *); 1433 enum ixgbe_phy_type type;
1295 struct ixgbe_mac_operations *mac_ops; 1434 u32 addr;
1435 u32 id;
1436 enum ixgbe_sfp_type sfp_type;
1437 u32 revision;
1438 enum ixgbe_media_type media_type;
1439 bool reset_disable;
1440 ixgbe_autoneg_advertised autoneg_advertised;
1441 bool autoneg_wait_to_complete;
1296}; 1442};
1297 1443
1298struct ixgbe_hw { 1444struct ixgbe_hw {
@@ -1311,6 +1457,15 @@ struct ixgbe_hw {
1311 bool adapter_stopped; 1457 bool adapter_stopped;
1312}; 1458};
1313 1459
1460struct ixgbe_info {
1461 enum ixgbe_mac_type mac;
1462 s32 (*get_invariants)(struct ixgbe_hw *);
1463 struct ixgbe_mac_operations *mac_ops;
1464 struct ixgbe_eeprom_operations *eeprom_ops;
1465 struct ixgbe_phy_operations *phy_ops;
1466};
1467
1468
1314/* Error Codes */ 1469/* Error Codes */
1315#define IXGBE_ERR_EEPROM -1 1470#define IXGBE_ERR_EEPROM -1
1316#define IXGBE_ERR_EEPROM_CHECKSUM -2 1471#define IXGBE_ERR_EEPROM_CHECKSUM -2
@@ -1329,6 +1484,8 @@ struct ixgbe_hw {
1329#define IXGBE_ERR_RESET_FAILED -15 1484#define IXGBE_ERR_RESET_FAILED -15
1330#define IXGBE_ERR_SWFW_SYNC -16 1485#define IXGBE_ERR_SWFW_SYNC -16
1331#define IXGBE_ERR_PHY_ADDR_INVALID -17 1486#define IXGBE_ERR_PHY_ADDR_INVALID -17
1487#define IXGBE_ERR_I2C -18
1488#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
1332#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 1489#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
1333 1490
1334#endif /* _IXGBE_TYPE_H_ */ 1491#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
new file mode 100644
index 000000000000..f292df557544
--- /dev/null
+++ b/drivers/net/jme.c
@@ -0,0 +1,3019 @@
1/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#include <linux/version.h>
25#include <linux/module.h>
26#include <linux/kernel.h>
27#include <linux/pci.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/mii.h>
32#include <linux/crc32.h>
33#include <linux/delay.h>
34#include <linux/spinlock.h>
35#include <linux/in.h>
36#include <linux/ip.h>
37#include <linux/ipv6.h>
38#include <linux/tcp.h>
39#include <linux/udp.h>
40#include <linux/if_vlan.h>
41#include "jme.h"
42
43static int force_pseudohp = -1;
44static int no_pseudohp = -1;
45static int no_extplug = -1;
46module_param(force_pseudohp, int, 0);
47MODULE_PARM_DESC(force_pseudohp,
48 "Enable pseudo hot-plug feature manually by driver instead of BIOS.");
49module_param(no_pseudohp, int, 0);
50MODULE_PARM_DESC(no_pseudohp, "Disable pseudo hot-plug feature.");
51module_param(no_extplug, int, 0);
52MODULE_PARM_DESC(no_extplug,
53 "Do not use external plug signal for pseudo hot-plug.");
54
55static int
56jme_mdio_read(struct net_device *netdev, int phy, int reg)
57{
58 struct jme_adapter *jme = netdev_priv(netdev);
59 int i, val, again = (reg == MII_BMSR) ? 1 : 0;
60
61read_again:
62 jwrite32(jme, JME_SMI, SMI_OP_REQ |
63 smi_phy_addr(phy) |
64 smi_reg_addr(reg));
65
66 wmb();
67 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
68 udelay(20);
69 val = jread32(jme, JME_SMI);
70 if ((val & SMI_OP_REQ) == 0)
71 break;
72 }
73
74 if (i == 0) {
75 jeprintk(jme->pdev, "phy(%d) read timeout : %d\n", phy, reg);
76 return 0;
77 }
78
79 if (again--)
80 goto read_again;
81
82 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
83}
84
85static void
86jme_mdio_write(struct net_device *netdev,
87 int phy, int reg, int val)
88{
89 struct jme_adapter *jme = netdev_priv(netdev);
90 int i;
91
92 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
93 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
94 smi_phy_addr(phy) | smi_reg_addr(reg));
95
96 wmb();
97 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
98 udelay(20);
99 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
100 break;
101 }
102
103 if (i == 0)
104 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
105
106 return;
107}
108
109static inline void
110jme_reset_phy_processor(struct jme_adapter *jme)
111{
112 u32 val;
113
114 jme_mdio_write(jme->dev,
115 jme->mii_if.phy_id,
116 MII_ADVERTISE, ADVERTISE_ALL |
117 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
118
119 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
120 jme_mdio_write(jme->dev,
121 jme->mii_if.phy_id,
122 MII_CTRL1000,
123 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
124
125 val = jme_mdio_read(jme->dev,
126 jme->mii_if.phy_id,
127 MII_BMCR);
128
129 jme_mdio_write(jme->dev,
130 jme->mii_if.phy_id,
131 MII_BMCR, val | BMCR_RESET);
132
133 return;
134}
135
136static void
137jme_setup_wakeup_frame(struct jme_adapter *jme,
138 u32 *mask, u32 crc, int fnr)
139{
140 int i;
141
142 /*
143 * Setup CRC pattern
144 */
145 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
146 wmb();
147 jwrite32(jme, JME_WFODP, crc);
148 wmb();
149
150 /*
151 * Setup Mask
152 */
153 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
154 jwrite32(jme, JME_WFOI,
155 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
156 (fnr & WFOI_FRAME_SEL));
157 wmb();
158 jwrite32(jme, JME_WFODP, mask[i]);
159 wmb();
160 }
161}
162
163static inline void
164jme_reset_mac_processor(struct jme_adapter *jme)
165{
166 u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
167 u32 crc = 0xCDCDCDCD;
168 u32 gpreg0;
169 int i;
170
171 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
172 udelay(2);
173 jwrite32(jme, JME_GHC, jme->reg_ghc);
174
175 jwrite32(jme, JME_RXDBA_LO, 0x00000000);
176 jwrite32(jme, JME_RXDBA_HI, 0x00000000);
177 jwrite32(jme, JME_RXQDC, 0x00000000);
178 jwrite32(jme, JME_RXNDA, 0x00000000);
179 jwrite32(jme, JME_TXDBA_LO, 0x00000000);
180 jwrite32(jme, JME_TXDBA_HI, 0x00000000);
181 jwrite32(jme, JME_TXQDC, 0x00000000);
182 jwrite32(jme, JME_TXNDA, 0x00000000);
183
184 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
185 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
186 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
187 jme_setup_wakeup_frame(jme, mask, crc, i);
188 if (jme->fpgaver)
189 gpreg0 = GPREG0_DEFAULT | GPREG0_LNKINTPOLL;
190 else
191 gpreg0 = GPREG0_DEFAULT;
192 jwrite32(jme, JME_GPREG0, gpreg0);
193 jwrite32(jme, JME_GPREG1, 0);
194}
195
196static inline void
197jme_reset_ghc_speed(struct jme_adapter *jme)
198{
199 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
200 jwrite32(jme, JME_GHC, jme->reg_ghc);
201}
202
203static inline void
204jme_clear_pm(struct jme_adapter *jme)
205{
206 jwrite32(jme, JME_PMCS, 0xFFFF0000 | jme->reg_pmcs);
207 pci_set_power_state(jme->pdev, PCI_D0);
208 pci_enable_wake(jme->pdev, PCI_D0, false);
209}
210
211static int
212jme_reload_eeprom(struct jme_adapter *jme)
213{
214 u32 val;
215 int i;
216
217 val = jread32(jme, JME_SMBCSR);
218
219 if (val & SMBCSR_EEPROMD) {
220 val |= SMBCSR_CNACK;
221 jwrite32(jme, JME_SMBCSR, val);
222 val |= SMBCSR_RELOAD;
223 jwrite32(jme, JME_SMBCSR, val);
224 mdelay(12);
225
226 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
227 mdelay(1);
228 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
229 break;
230 }
231
232 if (i == 0) {
233 jeprintk(jme->pdev, "eeprom reload timeout\n");
234 return -EIO;
235 }
236 }
237
238 return 0;
239}
240
241static void
242jme_load_macaddr(struct net_device *netdev)
243{
244 struct jme_adapter *jme = netdev_priv(netdev);
245 unsigned char macaddr[6];
246 u32 val;
247
248 spin_lock_bh(&jme->macaddr_lock);
249 val = jread32(jme, JME_RXUMA_LO);
250 macaddr[0] = (val >> 0) & 0xFF;
251 macaddr[1] = (val >> 8) & 0xFF;
252 macaddr[2] = (val >> 16) & 0xFF;
253 macaddr[3] = (val >> 24) & 0xFF;
254 val = jread32(jme, JME_RXUMA_HI);
255 macaddr[4] = (val >> 0) & 0xFF;
256 macaddr[5] = (val >> 8) & 0xFF;
257 memcpy(netdev->dev_addr, macaddr, 6);
258 spin_unlock_bh(&jme->macaddr_lock);
259}
260
261static inline void
262jme_set_rx_pcc(struct jme_adapter *jme, int p)
263{
264 switch (p) {
265 case PCC_OFF:
266 jwrite32(jme, JME_PCCRX0,
267 ((PCC_OFF_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
268 ((PCC_OFF_CNT << PCCRX_SHIFT) & PCCRX_MASK));
269 break;
270 case PCC_P1:
271 jwrite32(jme, JME_PCCRX0,
272 ((PCC_P1_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
273 ((PCC_P1_CNT << PCCRX_SHIFT) & PCCRX_MASK));
274 break;
275 case PCC_P2:
276 jwrite32(jme, JME_PCCRX0,
277 ((PCC_P2_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
278 ((PCC_P2_CNT << PCCRX_SHIFT) & PCCRX_MASK));
279 break;
280 case PCC_P3:
281 jwrite32(jme, JME_PCCRX0,
282 ((PCC_P3_TO << PCCRXTO_SHIFT) & PCCRXTO_MASK) |
283 ((PCC_P3_CNT << PCCRX_SHIFT) & PCCRX_MASK));
284 break;
285 default:
286 break;
287 }
288 wmb();
289
290 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
291 msg_rx_status(jme, "Switched to PCC_P%d\n", p);
292}
293
294static void
295jme_start_irq(struct jme_adapter *jme)
296{
297 register struct dynpcc_info *dpi = &(jme->dpi);
298
299 jme_set_rx_pcc(jme, PCC_P1);
300 dpi->cur = PCC_P1;
301 dpi->attempt = PCC_P1;
302 dpi->cnt = 0;
303
304 jwrite32(jme, JME_PCCTX,
305 ((PCC_TX_TO << PCCTXTO_SHIFT) & PCCTXTO_MASK) |
306 ((PCC_TX_CNT << PCCTX_SHIFT) & PCCTX_MASK) |
307 PCCTXQ0_EN
308 );
309
310 /*
311 * Enable Interrupts
312 */
313 jwrite32(jme, JME_IENS, INTR_ENABLE);
314}
315
316static inline void
317jme_stop_irq(struct jme_adapter *jme)
318{
319 /*
320 * Disable Interrupts
321 */
322 jwrite32f(jme, JME_IENC, INTR_ENABLE);
323}
324
325static inline void
326jme_enable_shadow(struct jme_adapter *jme)
327{
328 jwrite32(jme,
329 JME_SHBA_LO,
330 ((u32)jme->shadow_dma & ~((u32)0x1F)) | SHBA_POSTEN);
331}
332
333static inline void
334jme_disable_shadow(struct jme_adapter *jme)
335{
336 jwrite32(jme, JME_SHBA_LO, 0x0);
337}
338
339static u32
340jme_linkstat_from_phy(struct jme_adapter *jme)
341{
342 u32 phylink, bmsr;
343
344 phylink = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 17);
345 bmsr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMSR);
346 if (bmsr & BMSR_ANCOMP)
347 phylink |= PHY_LINK_AUTONEG_COMPLETE;
348
349 return phylink;
350}
351
352static inline void
353jme_set_phyfifoa(struct jme_adapter *jme)
354{
355 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
356}
357
358static inline void
359jme_set_phyfifob(struct jme_adapter *jme)
360{
361 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
362}
363
364static int
365jme_check_link(struct net_device *netdev, int testonly)
366{
367 struct jme_adapter *jme = netdev_priv(netdev);
368 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr;
369 char linkmsg[64];
370 int rc = 0;
371
372 linkmsg[0] = '\0';
373
374 if (jme->fpgaver)
375 phylink = jme_linkstat_from_phy(jme);
376 else
377 phylink = jread32(jme, JME_PHY_LINK);
378
379 if (phylink & PHY_LINK_UP) {
380 if (!(phylink & PHY_LINK_AUTONEG_COMPLETE)) {
381 /*
382 * If we did not enable AN
383 * Speed/Duplex Info should be obtained from SMI
384 */
385 phylink = PHY_LINK_UP;
386
387 bmcr = jme_mdio_read(jme->dev,
388 jme->mii_if.phy_id,
389 MII_BMCR);
390
391 phylink |= ((bmcr & BMCR_SPEED1000) &&
392 (bmcr & BMCR_SPEED100) == 0) ?
393 PHY_LINK_SPEED_1000M :
394 (bmcr & BMCR_SPEED100) ?
395 PHY_LINK_SPEED_100M :
396 PHY_LINK_SPEED_10M;
397
398 phylink |= (bmcr & BMCR_FULLDPLX) ?
399 PHY_LINK_DUPLEX : 0;
400
401 strcat(linkmsg, "Forced: ");
402 } else {
403 /*
404 * Keep polling for speed/duplex resolve complete
405 */
406 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
407 --cnt) {
408
409 udelay(1);
410
411 if (jme->fpgaver)
412 phylink = jme_linkstat_from_phy(jme);
413 else
414 phylink = jread32(jme, JME_PHY_LINK);
415 }
416 if (!cnt)
417 jeprintk(jme->pdev,
418 "Waiting speed resolve timeout.\n");
419
420 strcat(linkmsg, "ANed: ");
421 }
422
423 if (jme->phylink == phylink) {
424 rc = 1;
425 goto out;
426 }
427 if (testonly)
428 goto out;
429
430 jme->phylink = phylink;
431
432 ghc = jme->reg_ghc & ~(GHC_SPEED_10M |
433 GHC_SPEED_100M |
434 GHC_SPEED_1000M |
435 GHC_DPX);
436 switch (phylink & PHY_LINK_SPEED_MASK) {
437 case PHY_LINK_SPEED_10M:
438 ghc |= GHC_SPEED_10M;
439 strcat(linkmsg, "10 Mbps, ");
440 if (is_buggy250(jme->pdev->device, jme->chiprev))
441 jme_set_phyfifoa(jme);
442 break;
443 case PHY_LINK_SPEED_100M:
444 ghc |= GHC_SPEED_100M;
445 strcat(linkmsg, "100 Mbps, ");
446 if (is_buggy250(jme->pdev->device, jme->chiprev))
447 jme_set_phyfifob(jme);
448 break;
449 case PHY_LINK_SPEED_1000M:
450 ghc |= GHC_SPEED_1000M;
451 strcat(linkmsg, "1000 Mbps, ");
452 if (is_buggy250(jme->pdev->device, jme->chiprev))
453 jme_set_phyfifoa(jme);
454 break;
455 default:
456 break;
457 }
458 ghc |= (phylink & PHY_LINK_DUPLEX) ? GHC_DPX : 0;
459
460 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
461 "Full-Duplex, " :
462 "Half-Duplex, ");
463
464 if (phylink & PHY_LINK_MDI_STAT)
465 strcat(linkmsg, "MDI-X");
466 else
467 strcat(linkmsg, "MDI");
468
469 if (phylink & PHY_LINK_DUPLEX) {
470 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
471 } else {
472 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
473 TXMCS_BACKOFF |
474 TXMCS_CARRIERSENSE |
475 TXMCS_COLLISION);
476 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
477 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
478 TXTRHD_TXREN |
479 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
480 }
481
482 jme->reg_ghc = ghc;
483 jwrite32(jme, JME_GHC, ghc);
484
485 msg_link(jme, "Link is up at %s.\n", linkmsg);
486 netif_carrier_on(netdev);
487 } else {
488 if (testonly)
489 goto out;
490
491 msg_link(jme, "Link is down.\n");
492 jme->phylink = 0;
493 netif_carrier_off(netdev);
494 }
495
496out:
497 return rc;
498}
499
500static int
501jme_setup_tx_resources(struct jme_adapter *jme)
502{
503 struct jme_ring *txring = &(jme->txring[0]);
504
505 txring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
506 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
507 &(txring->dmaalloc),
508 GFP_ATOMIC);
509
510 if (!txring->alloc) {
511 txring->desc = NULL;
512 txring->dmaalloc = 0;
513 txring->dma = 0;
514 return -ENOMEM;
515 }
516
517 /*
518 * 16 Bytes align
519 */
520 txring->desc = (void *)ALIGN((unsigned long)(txring->alloc),
521 RING_DESC_ALIGN);
522 txring->dma = ALIGN(txring->dmaalloc, RING_DESC_ALIGN);
523 txring->next_to_use = 0;
524 atomic_set(&txring->next_to_clean, 0);
525 atomic_set(&txring->nr_free, jme->tx_ring_size);
526
527 /*
528 * Initialize Transmit Descriptors
529 */
530 memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
531 memset(txring->bufinf, 0,
532 sizeof(struct jme_buffer_info) * jme->tx_ring_size);
533
534 return 0;
535}
536
537static void
538jme_free_tx_resources(struct jme_adapter *jme)
539{
540 int i;
541 struct jme_ring *txring = &(jme->txring[0]);
542 struct jme_buffer_info *txbi = txring->bufinf;
543
544 if (txring->alloc) {
545 for (i = 0 ; i < jme->tx_ring_size ; ++i) {
546 txbi = txring->bufinf + i;
547 if (txbi->skb) {
548 dev_kfree_skb(txbi->skb);
549 txbi->skb = NULL;
550 }
551 txbi->mapping = 0;
552 txbi->len = 0;
553 txbi->nr_desc = 0;
554 txbi->start_xmit = 0;
555 }
556
557 dma_free_coherent(&(jme->pdev->dev),
558 TX_RING_ALLOC_SIZE(jme->tx_ring_size),
559 txring->alloc,
560 txring->dmaalloc);
561
562 txring->alloc = NULL;
563 txring->desc = NULL;
564 txring->dmaalloc = 0;
565 txring->dma = 0;
566 }
567 txring->next_to_use = 0;
568 atomic_set(&txring->next_to_clean, 0);
569 atomic_set(&txring->nr_free, 0);
570
571}
572
573static inline void
574jme_enable_tx_engine(struct jme_adapter *jme)
575{
576 /*
577 * Select Queue 0
578 */
579 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
580 wmb();
581
582 /*
583 * Setup TX Queue 0 DMA Bass Address
584 */
585 jwrite32(jme, JME_TXDBA_LO, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
586 jwrite32(jme, JME_TXDBA_HI, (__u64)(jme->txring[0].dma) >> 32);
587 jwrite32(jme, JME_TXNDA, (__u64)jme->txring[0].dma & 0xFFFFFFFFUL);
588
589 /*
590 * Setup TX Descptor Count
591 */
592 jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
593
594 /*
595 * Enable TX Engine
596 */
597 wmb();
598 jwrite32(jme, JME_TXCS, jme->reg_txcs |
599 TXCS_SELECT_QUEUE0 |
600 TXCS_ENABLE);
601
602}
603
604static inline void
605jme_restart_tx_engine(struct jme_adapter *jme)
606{
607 /*
608 * Restart TX Engine
609 */
610 jwrite32(jme, JME_TXCS, jme->reg_txcs |
611 TXCS_SELECT_QUEUE0 |
612 TXCS_ENABLE);
613}
614
615static inline void
616jme_disable_tx_engine(struct jme_adapter *jme)
617{
618 int i;
619 u32 val;
620
621 /*
622 * Disable TX Engine
623 */
624 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
625 wmb();
626
627 val = jread32(jme, JME_TXCS);
628 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
629 mdelay(1);
630 val = jread32(jme, JME_TXCS);
631 rmb();
632 }
633
634 if (!i)
635 jeprintk(jme->pdev, "Disable TX engine timeout.\n");
636}
637
638static void
639jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
640{
641 struct jme_ring *rxring = jme->rxring;
642 register struct rxdesc *rxdesc = rxring->desc;
643 struct jme_buffer_info *rxbi = rxring->bufinf;
644 rxdesc += i;
645 rxbi += i;
646
647 rxdesc->dw[0] = 0;
648 rxdesc->dw[1] = 0;
649 rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32);
650 rxdesc->desc1.bufaddrl = cpu_to_le32(
651 (__u64)rxbi->mapping & 0xFFFFFFFFUL);
652 rxdesc->desc1.datalen = cpu_to_le16(rxbi->len);
653 if (jme->dev->features & NETIF_F_HIGHDMA)
654 rxdesc->desc1.flags = RXFLAG_64BIT;
655 wmb();
656 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
657}
658
659static int
660jme_make_new_rx_buf(struct jme_adapter *jme, int i)
661{
662 struct jme_ring *rxring = &(jme->rxring[0]);
663 struct jme_buffer_info *rxbi = rxring->bufinf + i;
664 struct sk_buff *skb;
665
666 skb = netdev_alloc_skb(jme->dev,
667 jme->dev->mtu + RX_EXTRA_LEN);
668 if (unlikely(!skb))
669 return -ENOMEM;
670
671 rxbi->skb = skb;
672 rxbi->len = skb_tailroom(skb);
673 rxbi->mapping = pci_map_page(jme->pdev,
674 virt_to_page(skb->data),
675 offset_in_page(skb->data),
676 rxbi->len,
677 PCI_DMA_FROMDEVICE);
678
679 return 0;
680}
681
682static void
683jme_free_rx_buf(struct jme_adapter *jme, int i)
684{
685 struct jme_ring *rxring = &(jme->rxring[0]);
686 struct jme_buffer_info *rxbi = rxring->bufinf;
687 rxbi += i;
688
689 if (rxbi->skb) {
690 pci_unmap_page(jme->pdev,
691 rxbi->mapping,
692 rxbi->len,
693 PCI_DMA_FROMDEVICE);
694 dev_kfree_skb(rxbi->skb);
695 rxbi->skb = NULL;
696 rxbi->mapping = 0;
697 rxbi->len = 0;
698 }
699}
700
701static void
702jme_free_rx_resources(struct jme_adapter *jme)
703{
704 int i;
705 struct jme_ring *rxring = &(jme->rxring[0]);
706
707 if (rxring->alloc) {
708 for (i = 0 ; i < jme->rx_ring_size ; ++i)
709 jme_free_rx_buf(jme, i);
710
711 dma_free_coherent(&(jme->pdev->dev),
712 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
713 rxring->alloc,
714 rxring->dmaalloc);
715 rxring->alloc = NULL;
716 rxring->desc = NULL;
717 rxring->dmaalloc = 0;
718 rxring->dma = 0;
719 }
720 rxring->next_to_use = 0;
721 atomic_set(&rxring->next_to_clean, 0);
722}
723
724static int
725jme_setup_rx_resources(struct jme_adapter *jme)
726{
727 int i;
728 struct jme_ring *rxring = &(jme->rxring[0]);
729
730 rxring->alloc = dma_alloc_coherent(&(jme->pdev->dev),
731 RX_RING_ALLOC_SIZE(jme->rx_ring_size),
732 &(rxring->dmaalloc),
733 GFP_ATOMIC);
734 if (!rxring->alloc) {
735 rxring->desc = NULL;
736 rxring->dmaalloc = 0;
737 rxring->dma = 0;
738 return -ENOMEM;
739 }
740
741 /*
742 * 16 Bytes align
743 */
744 rxring->desc = (void *)ALIGN((unsigned long)(rxring->alloc),
745 RING_DESC_ALIGN);
746 rxring->dma = ALIGN(rxring->dmaalloc, RING_DESC_ALIGN);
747 rxring->next_to_use = 0;
748 atomic_set(&rxring->next_to_clean, 0);
749
750 /*
751 * Initiallize Receive Descriptors
752 */
753 for (i = 0 ; i < jme->rx_ring_size ; ++i) {
754 if (unlikely(jme_make_new_rx_buf(jme, i))) {
755 jme_free_rx_resources(jme);
756 return -ENOMEM;
757 }
758
759 jme_set_clean_rxdesc(jme, i);
760 }
761
762 return 0;
763}
764
765static inline void
766jme_enable_rx_engine(struct jme_adapter *jme)
767{
768 /*
769 * Select Queue 0
770 */
771 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
772 RXCS_QUEUESEL_Q0);
773 wmb();
774
775 /*
776 * Setup RX DMA Bass Address
777 */
778 jwrite32(jme, JME_RXDBA_LO, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
779 jwrite32(jme, JME_RXDBA_HI, (__u64)(jme->rxring[0].dma) >> 32);
780 jwrite32(jme, JME_RXNDA, (__u64)jme->rxring[0].dma & 0xFFFFFFFFUL);
781
782 /*
783 * Setup RX Descriptor Count
784 */
785 jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
786
787 /*
788 * Setup Unicast Filter
789 */
790 jme_set_multi(jme->dev);
791
792 /*
793 * Enable RX Engine
794 */
795 wmb();
796 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
797 RXCS_QUEUESEL_Q0 |
798 RXCS_ENABLE |
799 RXCS_QST);
800}
801
802static inline void
803jme_restart_rx_engine(struct jme_adapter *jme)
804{
805 /*
806 * Start RX Engine
807 */
808 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
809 RXCS_QUEUESEL_Q0 |
810 RXCS_ENABLE |
811 RXCS_QST);
812}
813
814static inline void
815jme_disable_rx_engine(struct jme_adapter *jme)
816{
817 int i;
818 u32 val;
819
820 /*
821 * Disable RX Engine
822 */
823 jwrite32(jme, JME_RXCS, jme->reg_rxcs);
824 wmb();
825
826 val = jread32(jme, JME_RXCS);
827 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
828 mdelay(1);
829 val = jread32(jme, JME_RXCS);
830 rmb();
831 }
832
833 if (!i)
834 jeprintk(jme->pdev, "Disable RX engine timeout.\n");
835
836}
837
838static int
839jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
840{
841 if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
842 return false;
843
844 if (unlikely(!(flags & RXWBFLAG_MF) &&
845 (flags & RXWBFLAG_TCPON) && !(flags & RXWBFLAG_TCPCS))) {
846 msg_rx_err(jme, "TCP Checksum error.\n");
847 goto out_sumerr;
848 }
849
850 if (unlikely(!(flags & RXWBFLAG_MF) &&
851 (flags & RXWBFLAG_UDPON) && !(flags & RXWBFLAG_UDPCS))) {
852 msg_rx_err(jme, "UDP Checksum error.\n");
853 goto out_sumerr;
854 }
855
856 if (unlikely((flags & RXWBFLAG_IPV4) && !(flags & RXWBFLAG_IPCS))) {
857 msg_rx_err(jme, "IPv4 Checksum error.\n");
858 goto out_sumerr;
859 }
860
861 return true;
862
863out_sumerr:
864 return false;
865}
866
867static void
868jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
869{
870 struct jme_ring *rxring = &(jme->rxring[0]);
871 struct rxdesc *rxdesc = rxring->desc;
872 struct jme_buffer_info *rxbi = rxring->bufinf;
873 struct sk_buff *skb;
874 int framesize;
875
876 rxdesc += idx;
877 rxbi += idx;
878
879 skb = rxbi->skb;
880 pci_dma_sync_single_for_cpu(jme->pdev,
881 rxbi->mapping,
882 rxbi->len,
883 PCI_DMA_FROMDEVICE);
884
885 if (unlikely(jme_make_new_rx_buf(jme, idx))) {
886 pci_dma_sync_single_for_device(jme->pdev,
887 rxbi->mapping,
888 rxbi->len,
889 PCI_DMA_FROMDEVICE);
890
891 ++(NET_STAT(jme).rx_dropped);
892 } else {
893 framesize = le16_to_cpu(rxdesc->descwb.framesize)
894 - RX_PREPAD_SIZE;
895
896 skb_reserve(skb, RX_PREPAD_SIZE);
897 skb_put(skb, framesize);
898 skb->protocol = eth_type_trans(skb, jme->dev);
899
900 if (jme_rxsum_ok(jme, rxdesc->descwb.flags))
901 skb->ip_summed = CHECKSUM_UNNECESSARY;
902 else
903 skb->ip_summed = CHECKSUM_NONE;
904
905 if (rxdesc->descwb.flags & RXWBFLAG_TAGON) {
906 if (jme->vlgrp) {
907 jme->jme_vlan_rx(skb, jme->vlgrp,
908 le32_to_cpu(rxdesc->descwb.vlan));
909 NET_STAT(jme).rx_bytes += 4;
910 }
911 } else {
912 jme->jme_rx(skb);
913 }
914
915 if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
916 RXWBFLAG_DEST_MUL)
917 ++(NET_STAT(jme).multicast);
918
919 jme->dev->last_rx = jiffies;
920 NET_STAT(jme).rx_bytes += framesize;
921 ++(NET_STAT(jme).rx_packets);
922 }
923
924 jme_set_clean_rxdesc(jme, idx);
925
926}
927
928static int
929jme_process_receive(struct jme_adapter *jme, int limit)
930{
931 struct jme_ring *rxring = &(jme->rxring[0]);
932 struct rxdesc *rxdesc = rxring->desc;
933 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
934
935 if (unlikely(!atomic_dec_and_test(&jme->rx_cleaning)))
936 goto out_inc;
937
938 if (unlikely(atomic_read(&jme->link_changing) != 1))
939 goto out_inc;
940
941 if (unlikely(!netif_carrier_ok(jme->dev)))
942 goto out_inc;
943
944 i = atomic_read(&rxring->next_to_clean);
945 while (limit-- > 0) {
946 rxdesc = rxring->desc;
947 rxdesc += i;
948
949 if ((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
950 !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
951 goto out;
952
953 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
954
955 if (unlikely(desccnt > 1 ||
956 rxdesc->descwb.errstat & RXWBERR_ALLERR)) {
957
958 if (rxdesc->descwb.errstat & RXWBERR_CRCERR)
959 ++(NET_STAT(jme).rx_crc_errors);
960 else if (rxdesc->descwb.errstat & RXWBERR_OVERUN)
961 ++(NET_STAT(jme).rx_fifo_errors);
962 else
963 ++(NET_STAT(jme).rx_errors);
964
965 if (desccnt > 1)
966 limit -= desccnt - 1;
967
968 for (j = i, ccnt = desccnt ; ccnt-- ; ) {
969 jme_set_clean_rxdesc(jme, j);
970 j = (j + 1) & (mask);
971 }
972
973 } else {
974 jme_alloc_and_feed_skb(jme, i);
975 }
976
977 i = (i + desccnt) & (mask);
978 }
979
980out:
981 atomic_set(&rxring->next_to_clean, i);
982
983out_inc:
984 atomic_inc(&jme->rx_cleaning);
985
986 return limit > 0 ? limit : 0;
987
988}
989
990static void
991jme_attempt_pcc(struct dynpcc_info *dpi, int atmp)
992{
993 if (likely(atmp == dpi->cur)) {
994 dpi->cnt = 0;
995 return;
996 }
997
998 if (dpi->attempt == atmp) {
999 ++(dpi->cnt);
1000 } else {
1001 dpi->attempt = atmp;
1002 dpi->cnt = 0;
1003 }
1004
1005}
1006
1007static void
1008jme_dynamic_pcc(struct jme_adapter *jme)
1009{
1010 register struct dynpcc_info *dpi = &(jme->dpi);
1011
1012 if ((NET_STAT(jme).rx_bytes - dpi->last_bytes) > PCC_P3_THRESHOLD)
1013 jme_attempt_pcc(dpi, PCC_P3);
1014 else if ((NET_STAT(jme).rx_packets - dpi->last_pkts) > PCC_P2_THRESHOLD
1015 || dpi->intr_cnt > PCC_INTR_THRESHOLD)
1016 jme_attempt_pcc(dpi, PCC_P2);
1017 else
1018 jme_attempt_pcc(dpi, PCC_P1);
1019
1020 if (unlikely(dpi->attempt != dpi->cur && dpi->cnt > 5)) {
1021 if (dpi->attempt < dpi->cur)
1022 tasklet_schedule(&jme->rxclean_task);
1023 jme_set_rx_pcc(jme, dpi->attempt);
1024 dpi->cur = dpi->attempt;
1025 dpi->cnt = 0;
1026 }
1027}
1028
1029static void
1030jme_start_pcc_timer(struct jme_adapter *jme)
1031{
1032 struct dynpcc_info *dpi = &(jme->dpi);
1033 dpi->last_bytes = NET_STAT(jme).rx_bytes;
1034 dpi->last_pkts = NET_STAT(jme).rx_packets;
1035 dpi->intr_cnt = 0;
1036 jwrite32(jme, JME_TMCSR,
1037 TMCSR_EN | ((0xFFFFFF - PCC_INTERVAL_US) & TMCSR_CNT));
1038}
1039
1040static inline void
1041jme_stop_pcc_timer(struct jme_adapter *jme)
1042{
1043 jwrite32(jme, JME_TMCSR, 0);
1044}
1045
1046static void
1047jme_shutdown_nic(struct jme_adapter *jme)
1048{
1049 u32 phylink;
1050
1051 phylink = jme_linkstat_from_phy(jme);
1052
1053 if (!(phylink & PHY_LINK_UP)) {
1054 /*
1055 * Disable all interrupt before issue timer
1056 */
1057 jme_stop_irq(jme);
1058 jwrite32(jme, JME_TIMER2, TMCSR_EN | 0xFFFFFE);
1059 }
1060}
1061
1062static void
1063jme_pcc_tasklet(unsigned long arg)
1064{
1065 struct jme_adapter *jme = (struct jme_adapter *)arg;
1066 struct net_device *netdev = jme->dev;
1067
1068 if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
1069 jme_shutdown_nic(jme);
1070 return;
1071 }
1072
1073 if (unlikely(!netif_carrier_ok(netdev) ||
1074 (atomic_read(&jme->link_changing) != 1)
1075 )) {
1076 jme_stop_pcc_timer(jme);
1077 return;
1078 }
1079
1080 if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
1081 jme_dynamic_pcc(jme);
1082
1083 jme_start_pcc_timer(jme);
1084}
1085
1086static inline void
1087jme_polling_mode(struct jme_adapter *jme)
1088{
1089 jme_set_rx_pcc(jme, PCC_OFF);
1090}
1091
1092static inline void
1093jme_interrupt_mode(struct jme_adapter *jme)
1094{
1095 jme_set_rx_pcc(jme, PCC_P1);
1096}
1097
1098static inline int
1099jme_pseudo_hotplug_enabled(struct jme_adapter *jme)
1100{
1101 u32 apmc;
1102 apmc = jread32(jme, JME_APMC);
1103 return apmc & JME_APMC_PSEUDO_HP_EN;
1104}
1105
1106static void
1107jme_start_shutdown_timer(struct jme_adapter *jme)
1108{
1109 u32 apmc;
1110
1111 apmc = jread32(jme, JME_APMC) | JME_APMC_PCIE_SD_EN;
1112 apmc &= ~JME_APMC_EPIEN_CTRL;
1113 if (!no_extplug) {
1114 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_EN);
1115 wmb();
1116 }
1117 jwrite32f(jme, JME_APMC, apmc);
1118
1119 jwrite32f(jme, JME_TIMER2, 0);
1120 set_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1121 jwrite32(jme, JME_TMCSR,
1122 TMCSR_EN | ((0xFFFFFF - APMC_PHP_SHUTDOWN_DELAY) & TMCSR_CNT));
1123}
1124
1125static void
1126jme_stop_shutdown_timer(struct jme_adapter *jme)
1127{
1128 u32 apmc;
1129
1130 jwrite32f(jme, JME_TMCSR, 0);
1131 jwrite32f(jme, JME_TIMER2, 0);
1132 clear_bit(JME_FLAG_SHUTDOWN, &jme->flags);
1133
1134 apmc = jread32(jme, JME_APMC);
1135 apmc &= ~(JME_APMC_PCIE_SD_EN | JME_APMC_EPIEN_CTRL);
1136 jwrite32f(jme, JME_APMC, apmc | JME_APMC_EPIEN_CTRL_DIS);
1137 wmb();
1138 jwrite32f(jme, JME_APMC, apmc);
1139}
1140
1141static void
1142jme_link_change_tasklet(unsigned long arg)
1143{
1144 struct jme_adapter *jme = (struct jme_adapter *)arg;
1145 struct net_device *netdev = jme->dev;
1146 int rc;
1147
1148 while (!atomic_dec_and_test(&jme->link_changing)) {
1149 atomic_inc(&jme->link_changing);
1150 msg_intr(jme, "Get link change lock failed.\n");
1151 while (atomic_read(&jme->link_changing) != 1)
1152 msg_intr(jme, "Waiting link change lock.\n");
1153 }
1154
1155 if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
1156 goto out;
1157
1158 jme->old_mtu = netdev->mtu;
1159 netif_stop_queue(netdev);
1160 if (jme_pseudo_hotplug_enabled(jme))
1161 jme_stop_shutdown_timer(jme);
1162
1163 jme_stop_pcc_timer(jme);
1164 tasklet_disable(&jme->txclean_task);
1165 tasklet_disable(&jme->rxclean_task);
1166 tasklet_disable(&jme->rxempty_task);
1167
1168 if (netif_carrier_ok(netdev)) {
1169 jme_reset_ghc_speed(jme);
1170 jme_disable_rx_engine(jme);
1171 jme_disable_tx_engine(jme);
1172 jme_reset_mac_processor(jme);
1173 jme_free_rx_resources(jme);
1174 jme_free_tx_resources(jme);
1175
1176 if (test_bit(JME_FLAG_POLL, &jme->flags))
1177 jme_polling_mode(jme);
1178
1179 netif_carrier_off(netdev);
1180 }
1181
1182 jme_check_link(netdev, 0);
1183 if (netif_carrier_ok(netdev)) {
1184 rc = jme_setup_rx_resources(jme);
1185 if (rc) {
1186 jeprintk(jme->pdev, "Allocating resources for RX error"
1187 ", Device STOPPED!\n");
1188 goto out_enable_tasklet;
1189 }
1190
1191 rc = jme_setup_tx_resources(jme);
1192 if (rc) {
1193 jeprintk(jme->pdev, "Allocating resources for TX error"
1194 ", Device STOPPED!\n");
1195 goto err_out_free_rx_resources;
1196 }
1197
1198 jme_enable_rx_engine(jme);
1199 jme_enable_tx_engine(jme);
1200
1201 netif_start_queue(netdev);
1202
1203 if (test_bit(JME_FLAG_POLL, &jme->flags))
1204 jme_interrupt_mode(jme);
1205
1206 jme_start_pcc_timer(jme);
1207 } else if (jme_pseudo_hotplug_enabled(jme)) {
1208 jme_start_shutdown_timer(jme);
1209 }
1210
1211 goto out_enable_tasklet;
1212
1213err_out_free_rx_resources:
1214 jme_free_rx_resources(jme);
1215out_enable_tasklet:
1216 tasklet_enable(&jme->txclean_task);
1217 tasklet_hi_enable(&jme->rxclean_task);
1218 tasklet_hi_enable(&jme->rxempty_task);
1219out:
1220 atomic_inc(&jme->link_changing);
1221}
1222
1223static void
1224jme_rx_clean_tasklet(unsigned long arg)
1225{
1226 struct jme_adapter *jme = (struct jme_adapter *)arg;
1227 struct dynpcc_info *dpi = &(jme->dpi);
1228
1229 jme_process_receive(jme, jme->rx_ring_size);
1230 ++(dpi->intr_cnt);
1231
1232}
1233
1234static int
1235jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1236{
1237 struct jme_adapter *jme = jme_napi_priv(holder);
1238 struct net_device *netdev = jme->dev;
1239 int rest;
1240
1241 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
1242
1243 while (atomic_read(&jme->rx_empty) > 0) {
1244 atomic_dec(&jme->rx_empty);
1245 ++(NET_STAT(jme).rx_dropped);
1246 jme_restart_rx_engine(jme);
1247 }
1248 atomic_inc(&jme->rx_empty);
1249
1250 if (rest) {
1251 JME_RX_COMPLETE(netdev, holder);
1252 jme_interrupt_mode(jme);
1253 }
1254
1255 JME_NAPI_WEIGHT_SET(budget, rest);
1256 return JME_NAPI_WEIGHT_VAL(budget) - rest;
1257}
1258
1259static void
1260jme_rx_empty_tasklet(unsigned long arg)
1261{
1262 struct jme_adapter *jme = (struct jme_adapter *)arg;
1263
1264 if (unlikely(atomic_read(&jme->link_changing) != 1))
1265 return;
1266
1267 if (unlikely(!netif_carrier_ok(jme->dev)))
1268 return;
1269
1270 msg_rx_status(jme, "RX Queue Full!\n");
1271
1272 jme_rx_clean_tasklet(arg);
1273
1274 while (atomic_read(&jme->rx_empty) > 0) {
1275 atomic_dec(&jme->rx_empty);
1276 ++(NET_STAT(jme).rx_dropped);
1277 jme_restart_rx_engine(jme);
1278 }
1279 atomic_inc(&jme->rx_empty);
1280}
1281
1282static void
1283jme_wake_queue_if_stopped(struct jme_adapter *jme)
1284{
1285 struct jme_ring *txring = jme->txring;
1286
1287 smp_wmb();
1288 if (unlikely(netif_queue_stopped(jme->dev) &&
1289 atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
1290 msg_tx_done(jme, "TX Queue Waked.\n");
1291 netif_wake_queue(jme->dev);
1292 }
1293
1294}
1295
1296static void
1297jme_tx_clean_tasklet(unsigned long arg)
1298{
1299 struct jme_adapter *jme = (struct jme_adapter *)arg;
1300 struct jme_ring *txring = &(jme->txring[0]);
1301 struct txdesc *txdesc = txring->desc;
1302 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
1303 int i, j, cnt = 0, max, err, mask;
1304
1305 tx_dbg(jme, "Into txclean.\n");
1306
1307 if (unlikely(!atomic_dec_and_test(&jme->tx_cleaning)))
1308 goto out;
1309
1310 if (unlikely(atomic_read(&jme->link_changing) != 1))
1311 goto out;
1312
1313 if (unlikely(!netif_carrier_ok(jme->dev)))
1314 goto out;
1315
1316 max = jme->tx_ring_size - atomic_read(&txring->nr_free);
1317 mask = jme->tx_ring_mask;
1318
1319 for (i = atomic_read(&txring->next_to_clean) ; cnt < max ; ) {
1320
1321 ctxbi = txbi + i;
1322
1323 if (likely(ctxbi->skb &&
1324 !(txdesc[i].descwb.flags & TXWBFLAG_OWN))) {
1325
1326 tx_dbg(jme, "txclean: %d+%d@%lu\n",
1327 i, ctxbi->nr_desc, jiffies);
1328
1329 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1330
1331 for (j = 1 ; j < ctxbi->nr_desc ; ++j) {
1332 ttxbi = txbi + ((i + j) & (mask));
1333 txdesc[(i + j) & (mask)].dw[0] = 0;
1334
1335 pci_unmap_page(jme->pdev,
1336 ttxbi->mapping,
1337 ttxbi->len,
1338 PCI_DMA_TODEVICE);
1339
1340 ttxbi->mapping = 0;
1341 ttxbi->len = 0;
1342 }
1343
1344 dev_kfree_skb(ctxbi->skb);
1345
1346 cnt += ctxbi->nr_desc;
1347
1348 if (unlikely(err)) {
1349 ++(NET_STAT(jme).tx_carrier_errors);
1350 } else {
1351 ++(NET_STAT(jme).tx_packets);
1352 NET_STAT(jme).tx_bytes += ctxbi->len;
1353 }
1354
1355 ctxbi->skb = NULL;
1356 ctxbi->len = 0;
1357 ctxbi->start_xmit = 0;
1358
1359 } else {
1360 break;
1361 }
1362
1363 i = (i + ctxbi->nr_desc) & mask;
1364
1365 ctxbi->nr_desc = 0;
1366 }
1367
1368 tx_dbg(jme, "txclean: done %d@%lu.\n", i, jiffies);
1369 atomic_set(&txring->next_to_clean, i);
1370 atomic_add(cnt, &txring->nr_free);
1371
1372 jme_wake_queue_if_stopped(jme);
1373
1374out:
1375 atomic_inc(&jme->tx_cleaning);
1376}
1377
1378static void
1379jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
1380{
1381 /*
1382 * Disable interrupt
1383 */
1384 jwrite32f(jme, JME_IENC, INTR_ENABLE);
1385
1386 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1387 /*
1388 * Link change event is critical
1389 * all other events are ignored
1390 */
1391 jwrite32(jme, JME_IEVE, intrstat);
1392 tasklet_schedule(&jme->linkch_task);
1393 goto out_reenable;
1394 }
1395
1396 if (intrstat & INTR_TMINTR) {
1397 jwrite32(jme, JME_IEVE, INTR_TMINTR);
1398 tasklet_schedule(&jme->pcc_task);
1399 }
1400
1401 if (intrstat & (INTR_PCCTXTO | INTR_PCCTX)) {
1402 jwrite32(jme, JME_IEVE, INTR_PCCTXTO | INTR_PCCTX | INTR_TX0);
1403 tasklet_schedule(&jme->txclean_task);
1404 }
1405
1406 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1407 jwrite32(jme, JME_IEVE, (intrstat & (INTR_PCCRX0TO |
1408 INTR_PCCRX0 |
1409 INTR_RX0EMP)) |
1410 INTR_RX0);
1411 }
1412
1413 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
1414 if (intrstat & INTR_RX0EMP)
1415 atomic_inc(&jme->rx_empty);
1416
1417 if ((intrstat & (INTR_PCCRX0TO | INTR_PCCRX0 | INTR_RX0EMP))) {
1418 if (likely(JME_RX_SCHEDULE_PREP(jme))) {
1419 jme_polling_mode(jme);
1420 JME_RX_SCHEDULE(jme);
1421 }
1422 }
1423 } else {
1424 if (intrstat & INTR_RX0EMP) {
1425 atomic_inc(&jme->rx_empty);
1426 tasklet_hi_schedule(&jme->rxempty_task);
1427 } else if (intrstat & (INTR_PCCRX0TO | INTR_PCCRX0)) {
1428 tasklet_hi_schedule(&jme->rxclean_task);
1429 }
1430 }
1431
1432out_reenable:
1433 /*
1434 * Re-enable interrupt
1435 */
1436 jwrite32f(jme, JME_IENS, INTR_ENABLE);
1437}
1438
1439static irqreturn_t
1440jme_intr(int irq, void *dev_id)
1441{
1442 struct net_device *netdev = dev_id;
1443 struct jme_adapter *jme = netdev_priv(netdev);
1444 u32 intrstat;
1445
1446 intrstat = jread32(jme, JME_IEVE);
1447
1448 /*
1449 * Check if it's really an interrupt for us
1450 */
1451 if (unlikely(intrstat == 0))
1452 return IRQ_NONE;
1453
1454 /*
1455 * Check if the device still exist
1456 */
1457 if (unlikely(intrstat == ~((typeof(intrstat))0)))
1458 return IRQ_NONE;
1459
1460 jme_intr_msi(jme, intrstat);
1461
1462 return IRQ_HANDLED;
1463}
1464
1465static irqreturn_t
1466jme_msi(int irq, void *dev_id)
1467{
1468 struct net_device *netdev = dev_id;
1469 struct jme_adapter *jme = netdev_priv(netdev);
1470 u32 intrstat;
1471
1472 pci_dma_sync_single_for_cpu(jme->pdev,
1473 jme->shadow_dma,
1474 sizeof(u32) * SHADOW_REG_NR,
1475 PCI_DMA_FROMDEVICE);
1476 intrstat = jme->shadow_regs[SHADOW_IEVE];
1477 jme->shadow_regs[SHADOW_IEVE] = 0;
1478
1479 jme_intr_msi(jme, intrstat);
1480
1481 return IRQ_HANDLED;
1482}
1483
1484static void
1485jme_reset_link(struct jme_adapter *jme)
1486{
1487 jwrite32(jme, JME_TMCSR, TMCSR_SWIT);
1488}
1489
1490static void
1491jme_restart_an(struct jme_adapter *jme)
1492{
1493 u32 bmcr;
1494
1495 spin_lock_bh(&jme->phy_lock);
1496 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1497 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
1498 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
1499 spin_unlock_bh(&jme->phy_lock);
1500}
1501
1502static int
1503jme_request_irq(struct jme_adapter *jme)
1504{
1505 int rc;
1506 struct net_device *netdev = jme->dev;
1507 irq_handler_t handler = jme_intr;
1508 int irq_flags = IRQF_SHARED;
1509
1510 if (!pci_enable_msi(jme->pdev)) {
1511 set_bit(JME_FLAG_MSI, &jme->flags);
1512 handler = jme_msi;
1513 irq_flags = 0;
1514 }
1515
1516 rc = request_irq(jme->pdev->irq, handler, irq_flags, netdev->name,
1517 netdev);
1518 if (rc) {
1519 jeprintk(jme->pdev,
1520 "Unable to request %s interrupt (return: %d)\n",
1521 test_bit(JME_FLAG_MSI, &jme->flags) ? "MSI" : "INTx",
1522 rc);
1523
1524 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1525 pci_disable_msi(jme->pdev);
1526 clear_bit(JME_FLAG_MSI, &jme->flags);
1527 }
1528 } else {
1529 netdev->irq = jme->pdev->irq;
1530 }
1531
1532 return rc;
1533}
1534
1535static void
1536jme_free_irq(struct jme_adapter *jme)
1537{
1538 free_irq(jme->pdev->irq, jme->dev);
1539 if (test_bit(JME_FLAG_MSI, &jme->flags)) {
1540 pci_disable_msi(jme->pdev);
1541 clear_bit(JME_FLAG_MSI, &jme->flags);
1542 jme->dev->irq = jme->pdev->irq;
1543 }
1544}
1545
1546static int
1547jme_open(struct net_device *netdev)
1548{
1549 struct jme_adapter *jme = netdev_priv(netdev);
1550 int rc;
1551
1552 jme_clear_pm(jme);
1553 JME_NAPI_ENABLE(jme);
1554
1555 tasklet_enable(&jme->txclean_task);
1556 tasklet_hi_enable(&jme->rxclean_task);
1557 tasklet_hi_enable(&jme->rxempty_task);
1558
1559 rc = jme_request_irq(jme);
1560 if (rc)
1561 goto err_out;
1562
1563 jme_enable_shadow(jme);
1564 jme_start_irq(jme);
1565
1566 if (test_bit(JME_FLAG_SSET, &jme->flags))
1567 jme_set_settings(netdev, &jme->old_ecmd);
1568 else
1569 jme_reset_phy_processor(jme);
1570
1571 jme_reset_link(jme);
1572
1573 return 0;
1574
1575err_out:
1576 netif_stop_queue(netdev);
1577 netif_carrier_off(netdev);
1578 return rc;
1579}
1580
1581static void
1582jme_set_100m_half(struct jme_adapter *jme)
1583{
1584 u32 bmcr, tmp;
1585
1586 bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
1587 tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
1588 BMCR_SPEED1000 | BMCR_FULLDPLX);
1589 tmp |= BMCR_SPEED100;
1590
1591 if (bmcr != tmp)
1592 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, tmp);
1593
1594 if (jme->fpgaver)
1595 jwrite32(jme, JME_GHC, GHC_SPEED_100M | GHC_LINK_POLL);
1596 else
1597 jwrite32(jme, JME_GHC, GHC_SPEED_100M);
1598}
1599
1600#define JME_WAIT_LINK_TIME 2000 /* 2000ms */
1601static void
1602jme_wait_link(struct jme_adapter *jme)
1603{
1604 u32 phylink, to = JME_WAIT_LINK_TIME;
1605
1606 mdelay(1000);
1607 phylink = jme_linkstat_from_phy(jme);
1608 while (!(phylink & PHY_LINK_UP) && (to -= 10) > 0) {
1609 mdelay(10);
1610 phylink = jme_linkstat_from_phy(jme);
1611 }
1612}
1613
1614static inline void
1615jme_phy_off(struct jme_adapter *jme)
1616{
1617 jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
1618}
1619
1620static int
1621jme_close(struct net_device *netdev)
1622{
1623 struct jme_adapter *jme = netdev_priv(netdev);
1624
1625 netif_stop_queue(netdev);
1626 netif_carrier_off(netdev);
1627
1628 jme_stop_irq(jme);
1629 jme_disable_shadow(jme);
1630 jme_free_irq(jme);
1631
1632 JME_NAPI_DISABLE(jme);
1633
1634 tasklet_kill(&jme->linkch_task);
1635 tasklet_kill(&jme->txclean_task);
1636 tasklet_kill(&jme->rxclean_task);
1637 tasklet_kill(&jme->rxempty_task);
1638
1639 jme_reset_ghc_speed(jme);
1640 jme_disable_rx_engine(jme);
1641 jme_disable_tx_engine(jme);
1642 jme_reset_mac_processor(jme);
1643 jme_free_rx_resources(jme);
1644 jme_free_tx_resources(jme);
1645 jme->phylink = 0;
1646 jme_phy_off(jme);
1647
1648 return 0;
1649}
1650
1651static int
1652jme_alloc_txdesc(struct jme_adapter *jme,
1653 struct sk_buff *skb)
1654{
1655 struct jme_ring *txring = jme->txring;
1656 int idx, nr_alloc, mask = jme->tx_ring_mask;
1657
1658 idx = txring->next_to_use;
1659 nr_alloc = skb_shinfo(skb)->nr_frags + 2;
1660
1661 if (unlikely(atomic_read(&txring->nr_free) < nr_alloc))
1662 return -1;
1663
1664 atomic_sub(nr_alloc, &txring->nr_free);
1665
1666 txring->next_to_use = (txring->next_to_use + nr_alloc) & mask;
1667
1668 return idx;
1669}
1670
1671static void
1672jme_fill_tx_map(struct pci_dev *pdev,
1673 struct txdesc *txdesc,
1674 struct jme_buffer_info *txbi,
1675 struct page *page,
1676 u32 page_offset,
1677 u32 len,
1678 u8 hidma)
1679{
1680 dma_addr_t dmaaddr;
1681
1682 dmaaddr = pci_map_page(pdev,
1683 page,
1684 page_offset,
1685 len,
1686 PCI_DMA_TODEVICE);
1687
1688 pci_dma_sync_single_for_device(pdev,
1689 dmaaddr,
1690 len,
1691 PCI_DMA_TODEVICE);
1692
1693 txdesc->dw[0] = 0;
1694 txdesc->dw[1] = 0;
1695 txdesc->desc2.flags = TXFLAG_OWN;
1696 txdesc->desc2.flags |= (hidma) ? TXFLAG_64BIT : 0;
1697 txdesc->desc2.datalen = cpu_to_le16(len);
1698 txdesc->desc2.bufaddrh = cpu_to_le32((__u64)dmaaddr >> 32);
1699 txdesc->desc2.bufaddrl = cpu_to_le32(
1700 (__u64)dmaaddr & 0xFFFFFFFFUL);
1701
1702 txbi->mapping = dmaaddr;
1703 txbi->len = len;
1704}
1705
1706static void
1707jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1708{
1709 struct jme_ring *txring = jme->txring;
1710 struct txdesc *txdesc = txring->desc, *ctxdesc;
1711 struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
1712 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
1713 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1714 int mask = jme->tx_ring_mask;
1715 struct skb_frag_struct *frag;
1716 u32 len;
1717
1718 for (i = 0 ; i < nr_frags ; ++i) {
1719 frag = &skb_shinfo(skb)->frags[i];
1720 ctxdesc = txdesc + ((idx + i + 2) & (mask));
1721 ctxbi = txbi + ((idx + i + 2) & (mask));
1722
1723 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, frag->page,
1724 frag->page_offset, frag->size, hidma);
1725 }
1726
1727 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
1728 ctxdesc = txdesc + ((idx + 1) & (mask));
1729 ctxbi = txbi + ((idx + 1) & (mask));
1730 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
1731 offset_in_page(skb->data), len, hidma);
1732
1733}
1734
1735static int
1736jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
1737{
1738 if (unlikely(skb_shinfo(skb)->gso_size &&
1739 skb_header_cloned(skb) &&
1740 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
1741 dev_kfree_skb(skb);
1742 return -1;
1743 }
1744
1745 return 0;
1746}
1747
1748static int
1749jme_tx_tso(struct sk_buff *skb,
1750 u16 *mss, u8 *flags)
1751{
1752 *mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT;
1753 if (*mss) {
1754 *flags |= TXFLAG_LSEN;
1755
1756 if (skb->protocol == htons(ETH_P_IP)) {
1757 struct iphdr *iph = ip_hdr(skb);
1758
1759 iph->check = 0;
1760 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1761 iph->daddr, 0,
1762 IPPROTO_TCP,
1763 0);
1764 } else {
1765 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1766
1767 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ip6h->saddr,
1768 &ip6h->daddr, 0,
1769 IPPROTO_TCP,
1770 0);
1771 }
1772
1773 return 0;
1774 }
1775
1776 return 1;
1777}
1778
1779static void
1780jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
1781{
1782 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1783 u8 ip_proto;
1784
1785 switch (skb->protocol) {
1786 case htons(ETH_P_IP):
1787 ip_proto = ip_hdr(skb)->protocol;
1788 break;
1789 case htons(ETH_P_IPV6):
1790 ip_proto = ipv6_hdr(skb)->nexthdr;
1791 break;
1792 default:
1793 ip_proto = 0;
1794 break;
1795 }
1796
1797 switch (ip_proto) {
1798 case IPPROTO_TCP:
1799 *flags |= TXFLAG_TCPCS;
1800 break;
1801 case IPPROTO_UDP:
1802 *flags |= TXFLAG_UDPCS;
1803 break;
1804 default:
1805 msg_tx_err(jme, "Error upper layer protocol.\n");
1806 break;
1807 }
1808 }
1809}
1810
1811static inline void
1812jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags)
1813{
1814 if (vlan_tx_tag_present(skb)) {
1815 *flags |= TXFLAG_TAGON;
1816 *vlan = vlan_tx_tag_get(skb);
1817 }
1818}
1819
1820static int
1821jme_fill_first_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx)
1822{
1823 struct jme_ring *txring = jme->txring;
1824 struct txdesc *txdesc;
1825 struct jme_buffer_info *txbi;
1826 u8 flags;
1827
1828 txdesc = (struct txdesc *)txring->desc + idx;
1829 txbi = txring->bufinf + idx;
1830
1831 txdesc->dw[0] = 0;
1832 txdesc->dw[1] = 0;
1833 txdesc->dw[2] = 0;
1834 txdesc->dw[3] = 0;
1835 txdesc->desc1.pktsize = cpu_to_le16(skb->len);
1836 /*
1837 * Set OWN bit at final.
1838 * When kernel transmit faster than NIC.
1839 * And NIC trying to send this descriptor before we tell
1840 * it to start sending this TX queue.
1841 * Other fields are already filled correctly.
1842 */
1843 wmb();
1844 flags = TXFLAG_OWN | TXFLAG_INT;
1845 /*
1846 * Set checksum flags while not tso
1847 */
1848 if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags))
1849 jme_tx_csum(jme, skb, &flags);
1850 jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags);
1851 txdesc->desc1.flags = flags;
1852 /*
1853 * Set tx buffer info after telling NIC to send
1854 * For better tx_clean timing
1855 */
1856 wmb();
1857 txbi->nr_desc = skb_shinfo(skb)->nr_frags + 2;
1858 txbi->skb = skb;
1859 txbi->len = skb->len;
1860 txbi->start_xmit = jiffies;
1861 if (!txbi->start_xmit)
1862 txbi->start_xmit = (0UL-1);
1863
1864 return 0;
1865}
1866
1867static void
1868jme_stop_queue_if_full(struct jme_adapter *jme)
1869{
1870 struct jme_ring *txring = jme->txring;
1871 struct jme_buffer_info *txbi = txring->bufinf;
1872 int idx = atomic_read(&txring->next_to_clean);
1873
1874 txbi += idx;
1875
1876 smp_wmb();
1877 if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
1878 netif_stop_queue(jme->dev);
1879 msg_tx_queued(jme, "TX Queue Paused.\n");
1880 smp_wmb();
1881 if (atomic_read(&txring->nr_free)
1882 >= (jme->tx_wake_threshold)) {
1883 netif_wake_queue(jme->dev);
1884 msg_tx_queued(jme, "TX Queue Fast Waked.\n");
1885 }
1886 }
1887
1888 if (unlikely(txbi->start_xmit &&
1889 (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
1890 txbi->skb)) {
1891 netif_stop_queue(jme->dev);
1892 msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
1893 }
1894}
1895
1896/*
1897 * This function is already protected by netif_tx_lock()
1898 */
1899
1900static int
1901jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1902{
1903 struct jme_adapter *jme = netdev_priv(netdev);
1904 int idx;
1905
1906 if (unlikely(jme_expand_header(jme, skb))) {
1907 ++(NET_STAT(jme).tx_dropped);
1908 return NETDEV_TX_OK;
1909 }
1910
1911 idx = jme_alloc_txdesc(jme, skb);
1912
1913 if (unlikely(idx < 0)) {
1914 netif_stop_queue(netdev);
1915 msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n");
1916
1917 return NETDEV_TX_BUSY;
1918 }
1919
1920 jme_map_tx_skb(jme, skb, idx);
1921 jme_fill_first_tx_desc(jme, skb, idx);
1922
1923 jwrite32(jme, JME_TXCS, jme->reg_txcs |
1924 TXCS_SELECT_QUEUE0 |
1925 TXCS_QUEUE0S |
1926 TXCS_ENABLE);
1927 netdev->trans_start = jiffies;
1928
1929 tx_dbg(jme, "xmit: %d+%d@%lu\n", idx,
1930 skb_shinfo(skb)->nr_frags + 2,
1931 jiffies);
1932 jme_stop_queue_if_full(jme);
1933
1934 return NETDEV_TX_OK;
1935}
1936
1937static int
1938jme_set_macaddr(struct net_device *netdev, void *p)
1939{
1940 struct jme_adapter *jme = netdev_priv(netdev);
1941 struct sockaddr *addr = p;
1942 u32 val;
1943
1944 if (netif_running(netdev))
1945 return -EBUSY;
1946
1947 spin_lock_bh(&jme->macaddr_lock);
1948 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1949
1950 val = (addr->sa_data[3] & 0xff) << 24 |
1951 (addr->sa_data[2] & 0xff) << 16 |
1952 (addr->sa_data[1] & 0xff) << 8 |
1953 (addr->sa_data[0] & 0xff);
1954 jwrite32(jme, JME_RXUMA_LO, val);
1955 val = (addr->sa_data[5] & 0xff) << 8 |
1956 (addr->sa_data[4] & 0xff);
1957 jwrite32(jme, JME_RXUMA_HI, val);
1958 spin_unlock_bh(&jme->macaddr_lock);
1959
1960 return 0;
1961}
1962
1963static void
1964jme_set_multi(struct net_device *netdev)
1965{
1966 struct jme_adapter *jme = netdev_priv(netdev);
1967 u32 mc_hash[2] = {};
1968 int i;
1969
1970 spin_lock_bh(&jme->rxmcs_lock);
1971
1972 jme->reg_rxmcs |= RXMCS_BRDFRAME | RXMCS_UNIFRAME;
1973
1974 if (netdev->flags & IFF_PROMISC) {
1975 jme->reg_rxmcs |= RXMCS_ALLFRAME;
1976 } else if (netdev->flags & IFF_ALLMULTI) {
1977 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
1978 } else if (netdev->flags & IFF_MULTICAST) {
1979 struct dev_mc_list *mclist;
1980 int bit_nr;
1981
1982 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
1983 for (i = 0, mclist = netdev->mc_list;
1984 mclist && i < netdev->mc_count;
1985 ++i, mclist = mclist->next) {
1986
1987 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
1988 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
1989 }
1990
1991 jwrite32(jme, JME_RXMCHT_LO, mc_hash[0]);
1992 jwrite32(jme, JME_RXMCHT_HI, mc_hash[1]);
1993 }
1994
1995 wmb();
1996 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
1997
1998 spin_unlock_bh(&jme->rxmcs_lock);
1999}
2000
2001static int
2002jme_change_mtu(struct net_device *netdev, int new_mtu)
2003{
2004 struct jme_adapter *jme = netdev_priv(netdev);
2005
2006 if (new_mtu == jme->old_mtu)
2007 return 0;
2008
2009 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
2010 ((new_mtu) < IPV6_MIN_MTU))
2011 return -EINVAL;
2012
2013 if (new_mtu > 4000) {
2014 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2015 jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
2016 jme_restart_rx_engine(jme);
2017 } else {
2018 jme->reg_rxcs &= ~RXCS_FIFOTHNP;
2019 jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
2020 jme_restart_rx_engine(jme);
2021 }
2022
2023 if (new_mtu > 1900) {
2024 netdev->features &= ~(NETIF_F_HW_CSUM |
2025 NETIF_F_TSO |
2026 NETIF_F_TSO6);
2027 } else {
2028 if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
2029 netdev->features |= NETIF_F_HW_CSUM;
2030 if (test_bit(JME_FLAG_TSO, &jme->flags))
2031 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2032 }
2033
2034 netdev->mtu = new_mtu;
2035 jme_reset_link(jme);
2036
2037 return 0;
2038}
2039
2040static void
2041jme_tx_timeout(struct net_device *netdev)
2042{
2043 struct jme_adapter *jme = netdev_priv(netdev);
2044
2045 jme->phylink = 0;
2046 jme_reset_phy_processor(jme);
2047 if (test_bit(JME_FLAG_SSET, &jme->flags))
2048 jme_set_settings(netdev, &jme->old_ecmd);
2049
2050 /*
2051 * Force to Reset the link again
2052 */
2053 jme_reset_link(jme);
2054}
2055
2056static void
2057jme_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2058{
2059 struct jme_adapter *jme = netdev_priv(netdev);
2060
2061 jme->vlgrp = grp;
2062}
2063
2064static void
2065jme_get_drvinfo(struct net_device *netdev,
2066 struct ethtool_drvinfo *info)
2067{
2068 struct jme_adapter *jme = netdev_priv(netdev);
2069
2070 strcpy(info->driver, DRV_NAME);
2071 strcpy(info->version, DRV_VERSION);
2072 strcpy(info->bus_info, pci_name(jme->pdev));
2073}
2074
2075static int
2076jme_get_regs_len(struct net_device *netdev)
2077{
2078 return JME_REG_LEN;
2079}
2080
2081static void
2082mmapio_memcpy(struct jme_adapter *jme, u32 *p, u32 reg, int len)
2083{
2084 int i;
2085
2086 for (i = 0 ; i < len ; i += 4)
2087 p[i >> 2] = jread32(jme, reg + i);
2088}
2089
2090static void
2091mdio_memcpy(struct jme_adapter *jme, u32 *p, int reg_nr)
2092{
2093 int i;
2094 u16 *p16 = (u16 *)p;
2095
2096 for (i = 0 ; i < reg_nr ; ++i)
2097 p16[i] = jme_mdio_read(jme->dev, jme->mii_if.phy_id, i);
2098}
2099
2100static void
2101jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
2102{
2103 struct jme_adapter *jme = netdev_priv(netdev);
2104 u32 *p32 = (u32 *)p;
2105
2106 memset(p, 0xFF, JME_REG_LEN);
2107
2108 regs->version = 1;
2109 mmapio_memcpy(jme, p32, JME_MAC, JME_MAC_LEN);
2110
2111 p32 += 0x100 >> 2;
2112 mmapio_memcpy(jme, p32, JME_PHY, JME_PHY_LEN);
2113
2114 p32 += 0x100 >> 2;
2115 mmapio_memcpy(jme, p32, JME_MISC, JME_MISC_LEN);
2116
2117 p32 += 0x100 >> 2;
2118 mmapio_memcpy(jme, p32, JME_RSS, JME_RSS_LEN);
2119
2120 p32 += 0x100 >> 2;
2121 mdio_memcpy(jme, p32, JME_PHY_REG_NR);
2122}
2123
2124static int
2125jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2126{
2127 struct jme_adapter *jme = netdev_priv(netdev);
2128
2129 ecmd->tx_coalesce_usecs = PCC_TX_TO;
2130 ecmd->tx_max_coalesced_frames = PCC_TX_CNT;
2131
2132 if (test_bit(JME_FLAG_POLL, &jme->flags)) {
2133 ecmd->use_adaptive_rx_coalesce = false;
2134 ecmd->rx_coalesce_usecs = 0;
2135 ecmd->rx_max_coalesced_frames = 0;
2136 return 0;
2137 }
2138
2139 ecmd->use_adaptive_rx_coalesce = true;
2140
2141 switch (jme->dpi.cur) {
2142 case PCC_P1:
2143 ecmd->rx_coalesce_usecs = PCC_P1_TO;
2144 ecmd->rx_max_coalesced_frames = PCC_P1_CNT;
2145 break;
2146 case PCC_P2:
2147 ecmd->rx_coalesce_usecs = PCC_P2_TO;
2148 ecmd->rx_max_coalesced_frames = PCC_P2_CNT;
2149 break;
2150 case PCC_P3:
2151 ecmd->rx_coalesce_usecs = PCC_P3_TO;
2152 ecmd->rx_max_coalesced_frames = PCC_P3_CNT;
2153 break;
2154 default:
2155 break;
2156 }
2157
2158 return 0;
2159}
2160
2161static int
2162jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd)
2163{
2164 struct jme_adapter *jme = netdev_priv(netdev);
2165 struct dynpcc_info *dpi = &(jme->dpi);
2166
2167 if (netif_running(netdev))
2168 return -EBUSY;
2169
2170 if (ecmd->use_adaptive_rx_coalesce
2171 && test_bit(JME_FLAG_POLL, &jme->flags)) {
2172 clear_bit(JME_FLAG_POLL, &jme->flags);
2173 jme->jme_rx = netif_rx;
2174 jme->jme_vlan_rx = vlan_hwaccel_rx;
2175 dpi->cur = PCC_P1;
2176 dpi->attempt = PCC_P1;
2177 dpi->cnt = 0;
2178 jme_set_rx_pcc(jme, PCC_P1);
2179 jme_interrupt_mode(jme);
2180 } else if (!(ecmd->use_adaptive_rx_coalesce)
2181 && !(test_bit(JME_FLAG_POLL, &jme->flags))) {
2182 set_bit(JME_FLAG_POLL, &jme->flags);
2183 jme->jme_rx = netif_receive_skb;
2184 jme->jme_vlan_rx = vlan_hwaccel_receive_skb;
2185 jme_interrupt_mode(jme);
2186 }
2187
2188 return 0;
2189}
2190
2191static void
2192jme_get_pauseparam(struct net_device *netdev,
2193 struct ethtool_pauseparam *ecmd)
2194{
2195 struct jme_adapter *jme = netdev_priv(netdev);
2196 u32 val;
2197
2198 ecmd->tx_pause = (jme->reg_txpfc & TXPFC_PF_EN) != 0;
2199 ecmd->rx_pause = (jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0;
2200
2201 spin_lock_bh(&jme->phy_lock);
2202 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2203 spin_unlock_bh(&jme->phy_lock);
2204
2205 ecmd->autoneg =
2206 (val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0;
2207}
2208
2209static int
2210jme_set_pauseparam(struct net_device *netdev,
2211 struct ethtool_pauseparam *ecmd)
2212{
2213 struct jme_adapter *jme = netdev_priv(netdev);
2214 u32 val;
2215
2216 if (((jme->reg_txpfc & TXPFC_PF_EN) != 0) ^
2217 (ecmd->tx_pause != 0)) {
2218
2219 if (ecmd->tx_pause)
2220 jme->reg_txpfc |= TXPFC_PF_EN;
2221 else
2222 jme->reg_txpfc &= ~TXPFC_PF_EN;
2223
2224 jwrite32(jme, JME_TXPFC, jme->reg_txpfc);
2225 }
2226
2227 spin_lock_bh(&jme->rxmcs_lock);
2228 if (((jme->reg_rxmcs & RXMCS_FLOWCTRL) != 0) ^
2229 (ecmd->rx_pause != 0)) {
2230
2231 if (ecmd->rx_pause)
2232 jme->reg_rxmcs |= RXMCS_FLOWCTRL;
2233 else
2234 jme->reg_rxmcs &= ~RXMCS_FLOWCTRL;
2235
2236 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2237 }
2238 spin_unlock_bh(&jme->rxmcs_lock);
2239
2240 spin_lock_bh(&jme->phy_lock);
2241 val = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_ADVERTISE);
2242 if (((val & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) != 0) ^
2243 (ecmd->autoneg != 0)) {
2244
2245 if (ecmd->autoneg)
2246 val |= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2247 else
2248 val &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2249
2250 jme_mdio_write(jme->dev, jme->mii_if.phy_id,
2251 MII_ADVERTISE, val);
2252 }
2253 spin_unlock_bh(&jme->phy_lock);
2254
2255 return 0;
2256}
2257
2258static void
2259jme_get_wol(struct net_device *netdev,
2260 struct ethtool_wolinfo *wol)
2261{
2262 struct jme_adapter *jme = netdev_priv(netdev);
2263
2264 wol->supported = WAKE_MAGIC | WAKE_PHY;
2265
2266 wol->wolopts = 0;
2267
2268 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2269 wol->wolopts |= WAKE_PHY;
2270
2271 if (jme->reg_pmcs & PMCS_MFEN)
2272 wol->wolopts |= WAKE_MAGIC;
2273
2274}
2275
2276static int
2277jme_set_wol(struct net_device *netdev,
2278 struct ethtool_wolinfo *wol)
2279{
2280 struct jme_adapter *jme = netdev_priv(netdev);
2281
2282 if (wol->wolopts & (WAKE_MAGICSECURE |
2283 WAKE_UCAST |
2284 WAKE_MCAST |
2285 WAKE_BCAST |
2286 WAKE_ARP))
2287 return -EOPNOTSUPP;
2288
2289 jme->reg_pmcs = 0;
2290
2291 if (wol->wolopts & WAKE_PHY)
2292 jme->reg_pmcs |= PMCS_LFEN | PMCS_LREN;
2293
2294 if (wol->wolopts & WAKE_MAGIC)
2295 jme->reg_pmcs |= PMCS_MFEN;
2296
2297 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2298
2299 return 0;
2300}
2301
2302static int
2303jme_get_settings(struct net_device *netdev,
2304 struct ethtool_cmd *ecmd)
2305{
2306 struct jme_adapter *jme = netdev_priv(netdev);
2307 int rc;
2308
2309 spin_lock_bh(&jme->phy_lock);
2310 rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
2311 spin_unlock_bh(&jme->phy_lock);
2312 return rc;
2313}
2314
2315static int
2316jme_set_settings(struct net_device *netdev,
2317 struct ethtool_cmd *ecmd)
2318{
2319 struct jme_adapter *jme = netdev_priv(netdev);
2320 int rc, fdc = 0;
2321
2322 if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
2323 return -EINVAL;
2324
2325 if (jme->mii_if.force_media &&
2326 ecmd->autoneg != AUTONEG_ENABLE &&
2327 (jme->mii_if.full_duplex != ecmd->duplex))
2328 fdc = 1;
2329
2330 spin_lock_bh(&jme->phy_lock);
2331 rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
2332 spin_unlock_bh(&jme->phy_lock);
2333
2334 if (!rc && fdc)
2335 jme_reset_link(jme);
2336
2337 if (!rc) {
2338 set_bit(JME_FLAG_SSET, &jme->flags);
2339 jme->old_ecmd = *ecmd;
2340 }
2341
2342 return rc;
2343}
2344
2345static u32
2346jme_get_link(struct net_device *netdev)
2347{
2348 struct jme_adapter *jme = netdev_priv(netdev);
2349 return jread32(jme, JME_PHY_LINK) & PHY_LINK_UP;
2350}
2351
2352static u32
2353jme_get_msglevel(struct net_device *netdev)
2354{
2355 struct jme_adapter *jme = netdev_priv(netdev);
2356 return jme->msg_enable;
2357}
2358
2359static void
2360jme_set_msglevel(struct net_device *netdev, u32 value)
2361{
2362 struct jme_adapter *jme = netdev_priv(netdev);
2363 jme->msg_enable = value;
2364}
2365
2366static u32
2367jme_get_rx_csum(struct net_device *netdev)
2368{
2369 struct jme_adapter *jme = netdev_priv(netdev);
2370 return jme->reg_rxmcs & RXMCS_CHECKSUM;
2371}
2372
2373static int
2374jme_set_rx_csum(struct net_device *netdev, u32 on)
2375{
2376 struct jme_adapter *jme = netdev_priv(netdev);
2377
2378 spin_lock_bh(&jme->rxmcs_lock);
2379 if (on)
2380 jme->reg_rxmcs |= RXMCS_CHECKSUM;
2381 else
2382 jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
2383 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
2384 spin_unlock_bh(&jme->rxmcs_lock);
2385
2386 return 0;
2387}
2388
2389static int
2390jme_set_tx_csum(struct net_device *netdev, u32 on)
2391{
2392 struct jme_adapter *jme = netdev_priv(netdev);
2393
2394 if (on) {
2395 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2396 if (netdev->mtu <= 1900)
2397 netdev->features |= NETIF_F_HW_CSUM;
2398 } else {
2399 clear_bit(JME_FLAG_TXCSUM, &jme->flags);
2400 netdev->features &= ~NETIF_F_HW_CSUM;
2401 }
2402
2403 return 0;
2404}
2405
2406static int
2407jme_set_tso(struct net_device *netdev, u32 on)
2408{
2409 struct jme_adapter *jme = netdev_priv(netdev);
2410
2411 if (on) {
2412 set_bit(JME_FLAG_TSO, &jme->flags);
2413 if (netdev->mtu <= 1900)
2414 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2415 } else {
2416 clear_bit(JME_FLAG_TSO, &jme->flags);
2417 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2418 }
2419
2420 return 0;
2421}
2422
2423static int
2424jme_nway_reset(struct net_device *netdev)
2425{
2426 struct jme_adapter *jme = netdev_priv(netdev);
2427 jme_restart_an(jme);
2428 return 0;
2429}
2430
2431static u8
2432jme_smb_read(struct jme_adapter *jme, unsigned int addr)
2433{
2434 u32 val;
2435 int to;
2436
2437 val = jread32(jme, JME_SMBCSR);
2438 to = JME_SMB_BUSY_TIMEOUT;
2439 while ((val & SMBCSR_BUSY) && --to) {
2440 msleep(1);
2441 val = jread32(jme, JME_SMBCSR);
2442 }
2443 if (!to) {
2444 msg_hw(jme, "SMB Bus Busy.\n");
2445 return 0xFF;
2446 }
2447
2448 jwrite32(jme, JME_SMBINTF,
2449 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2450 SMBINTF_HWRWN_READ |
2451 SMBINTF_HWCMD);
2452
2453 val = jread32(jme, JME_SMBINTF);
2454 to = JME_SMB_BUSY_TIMEOUT;
2455 while ((val & SMBINTF_HWCMD) && --to) {
2456 msleep(1);
2457 val = jread32(jme, JME_SMBINTF);
2458 }
2459 if (!to) {
2460 msg_hw(jme, "SMB Bus Busy.\n");
2461 return 0xFF;
2462 }
2463
2464 return (val & SMBINTF_HWDATR) >> SMBINTF_HWDATR_SHIFT;
2465}
2466
2467static void
2468jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
2469{
2470 u32 val;
2471 int to;
2472
2473 val = jread32(jme, JME_SMBCSR);
2474 to = JME_SMB_BUSY_TIMEOUT;
2475 while ((val & SMBCSR_BUSY) && --to) {
2476 msleep(1);
2477 val = jread32(jme, JME_SMBCSR);
2478 }
2479 if (!to) {
2480 msg_hw(jme, "SMB Bus Busy.\n");
2481 return;
2482 }
2483
2484 jwrite32(jme, JME_SMBINTF,
2485 ((data << SMBINTF_HWDATW_SHIFT) & SMBINTF_HWDATW) |
2486 ((addr << SMBINTF_HWADDR_SHIFT) & SMBINTF_HWADDR) |
2487 SMBINTF_HWRWN_WRITE |
2488 SMBINTF_HWCMD);
2489
2490 val = jread32(jme, JME_SMBINTF);
2491 to = JME_SMB_BUSY_TIMEOUT;
2492 while ((val & SMBINTF_HWCMD) && --to) {
2493 msleep(1);
2494 val = jread32(jme, JME_SMBINTF);
2495 }
2496 if (!to) {
2497 msg_hw(jme, "SMB Bus Busy.\n");
2498 return;
2499 }
2500
2501 mdelay(2);
2502}
2503
2504static int
2505jme_get_eeprom_len(struct net_device *netdev)
2506{
2507 struct jme_adapter *jme = netdev_priv(netdev);
2508 u32 val;
2509 val = jread32(jme, JME_SMBCSR);
2510 return (val & SMBCSR_EEPROMD) ? JME_SMB_LEN : 0;
2511}
2512
2513static int
2514jme_get_eeprom(struct net_device *netdev,
2515 struct ethtool_eeprom *eeprom, u8 *data)
2516{
2517 struct jme_adapter *jme = netdev_priv(netdev);
2518 int i, offset = eeprom->offset, len = eeprom->len;
2519
2520 /*
2521 * ethtool will check the boundary for us
2522 */
2523 eeprom->magic = JME_EEPROM_MAGIC;
2524 for (i = 0 ; i < len ; ++i)
2525 data[i] = jme_smb_read(jme, i + offset);
2526
2527 return 0;
2528}
2529
2530static int
2531jme_set_eeprom(struct net_device *netdev,
2532 struct ethtool_eeprom *eeprom, u8 *data)
2533{
2534 struct jme_adapter *jme = netdev_priv(netdev);
2535 int i, offset = eeprom->offset, len = eeprom->len;
2536
2537 if (eeprom->magic != JME_EEPROM_MAGIC)
2538 return -EINVAL;
2539
2540 /*
2541 * ethtool will check the boundary for us
2542 */
2543 for (i = 0 ; i < len ; ++i)
2544 jme_smb_write(jme, i + offset, data[i]);
2545
2546 return 0;
2547}
2548
2549static const struct ethtool_ops jme_ethtool_ops = {
2550 .get_drvinfo = jme_get_drvinfo,
2551 .get_regs_len = jme_get_regs_len,
2552 .get_regs = jme_get_regs,
2553 .get_coalesce = jme_get_coalesce,
2554 .set_coalesce = jme_set_coalesce,
2555 .get_pauseparam = jme_get_pauseparam,
2556 .set_pauseparam = jme_set_pauseparam,
2557 .get_wol = jme_get_wol,
2558 .set_wol = jme_set_wol,
2559 .get_settings = jme_get_settings,
2560 .set_settings = jme_set_settings,
2561 .get_link = jme_get_link,
2562 .get_msglevel = jme_get_msglevel,
2563 .set_msglevel = jme_set_msglevel,
2564 .get_rx_csum = jme_get_rx_csum,
2565 .set_rx_csum = jme_set_rx_csum,
2566 .set_tx_csum = jme_set_tx_csum,
2567 .set_tso = jme_set_tso,
2568 .set_sg = ethtool_op_set_sg,
2569 .nway_reset = jme_nway_reset,
2570 .get_eeprom_len = jme_get_eeprom_len,
2571 .get_eeprom = jme_get_eeprom,
2572 .set_eeprom = jme_set_eeprom,
2573};
2574
2575static int
2576jme_pci_dma64(struct pci_dev *pdev)
2577{
2578 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2579 if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
2580 return 1;
2581
2582 if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2583 if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
2584 return 1;
2585
2586 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2587 if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
2588 return 0;
2589
2590 return -1;
2591}
2592
2593static inline void
2594jme_phy_init(struct jme_adapter *jme)
2595{
2596 u16 reg26;
2597
2598 reg26 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, 26);
2599 jme_mdio_write(jme->dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
2600}
2601
2602static inline void
2603jme_check_hw_ver(struct jme_adapter *jme)
2604{
2605 u32 chipmode;
2606
2607 chipmode = jread32(jme, JME_CHIPMODE);
2608
2609 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
2610 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2611}
2612
2613static int __devinit
2614jme_init_one(struct pci_dev *pdev,
2615 const struct pci_device_id *ent)
2616{
2617 int rc = 0, using_dac, i;
2618 struct net_device *netdev;
2619 struct jme_adapter *jme;
2620 u16 bmcr, bmsr;
2621 u32 apmc;
2622
2623 /*
2624 * set up PCI device basics
2625 */
2626 rc = pci_enable_device(pdev);
2627 if (rc) {
2628 jeprintk(pdev, "Cannot enable PCI device.\n");
2629 goto err_out;
2630 }
2631
2632 using_dac = jme_pci_dma64(pdev);
2633 if (using_dac < 0) {
2634 jeprintk(pdev, "Cannot set PCI DMA Mask.\n");
2635 rc = -EIO;
2636 goto err_out_disable_pdev;
2637 }
2638
2639 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2640 jeprintk(pdev, "No PCI resource region found.\n");
2641 rc = -ENOMEM;
2642 goto err_out_disable_pdev;
2643 }
2644
2645 rc = pci_request_regions(pdev, DRV_NAME);
2646 if (rc) {
2647 jeprintk(pdev, "Cannot obtain PCI resource region.\n");
2648 goto err_out_disable_pdev;
2649 }
2650
2651 pci_set_master(pdev);
2652
2653 /*
2654 * alloc and init net device
2655 */
2656 netdev = alloc_etherdev(sizeof(*jme));
2657 if (!netdev) {
2658 jeprintk(pdev, "Cannot allocate netdev structure.\n");
2659 rc = -ENOMEM;
2660 goto err_out_release_regions;
2661 }
2662 netdev->open = jme_open;
2663 netdev->stop = jme_close;
2664 netdev->hard_start_xmit = jme_start_xmit;
2665 netdev->set_mac_address = jme_set_macaddr;
2666 netdev->set_multicast_list = jme_set_multi;
2667 netdev->change_mtu = jme_change_mtu;
2668 netdev->ethtool_ops = &jme_ethtool_ops;
2669 netdev->tx_timeout = jme_tx_timeout;
2670 netdev->watchdog_timeo = TX_TIMEOUT;
2671 netdev->vlan_rx_register = jme_vlan_rx_register;
2672 NETDEV_GET_STATS(netdev, &jme_get_stats);
2673 netdev->features = NETIF_F_HW_CSUM |
2674 NETIF_F_SG |
2675 NETIF_F_TSO |
2676 NETIF_F_TSO6 |
2677 NETIF_F_HW_VLAN_TX |
2678 NETIF_F_HW_VLAN_RX;
2679 if (using_dac)
2680 netdev->features |= NETIF_F_HIGHDMA;
2681
2682 SET_NETDEV_DEV(netdev, &pdev->dev);
2683 pci_set_drvdata(pdev, netdev);
2684
2685 /*
2686 * init adapter info
2687 */
2688 jme = netdev_priv(netdev);
2689 jme->pdev = pdev;
2690 jme->dev = netdev;
2691 jme->jme_rx = netif_rx;
2692 jme->jme_vlan_rx = vlan_hwaccel_rx;
2693 jme->old_mtu = netdev->mtu = 1500;
2694 jme->phylink = 0;
2695 jme->tx_ring_size = 1 << 10;
2696 jme->tx_ring_mask = jme->tx_ring_size - 1;
2697 jme->tx_wake_threshold = 1 << 9;
2698 jme->rx_ring_size = 1 << 9;
2699 jme->rx_ring_mask = jme->rx_ring_size - 1;
2700 jme->msg_enable = JME_DEF_MSG_ENABLE;
2701 jme->regs = ioremap(pci_resource_start(pdev, 0),
2702 pci_resource_len(pdev, 0));
2703 if (!(jme->regs)) {
2704 jeprintk(pdev, "Mapping PCI resource region error.\n");
2705 rc = -ENOMEM;
2706 goto err_out_free_netdev;
2707 }
2708 jme->shadow_regs = pci_alloc_consistent(pdev,
2709 sizeof(u32) * SHADOW_REG_NR,
2710 &(jme->shadow_dma));
2711 if (!(jme->shadow_regs)) {
2712 jeprintk(pdev, "Allocating shadow register mapping error.\n");
2713 rc = -ENOMEM;
2714 goto err_out_unmap;
2715 }
2716
2717 if (no_pseudohp) {
2718 apmc = jread32(jme, JME_APMC) & ~JME_APMC_PSEUDO_HP_EN;
2719 jwrite32(jme, JME_APMC, apmc);
2720 } else if (force_pseudohp) {
2721 apmc = jread32(jme, JME_APMC) | JME_APMC_PSEUDO_HP_EN;
2722 jwrite32(jme, JME_APMC, apmc);
2723 }
2724
2725 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
2726
2727 spin_lock_init(&jme->phy_lock);
2728 spin_lock_init(&jme->macaddr_lock);
2729 spin_lock_init(&jme->rxmcs_lock);
2730
2731 atomic_set(&jme->link_changing, 1);
2732 atomic_set(&jme->rx_cleaning, 1);
2733 atomic_set(&jme->tx_cleaning, 1);
2734 atomic_set(&jme->rx_empty, 1);
2735
2736 tasklet_init(&jme->pcc_task,
2737 &jme_pcc_tasklet,
2738 (unsigned long) jme);
2739 tasklet_init(&jme->linkch_task,
2740 &jme_link_change_tasklet,
2741 (unsigned long) jme);
2742 tasklet_init(&jme->txclean_task,
2743 &jme_tx_clean_tasklet,
2744 (unsigned long) jme);
2745 tasklet_init(&jme->rxclean_task,
2746 &jme_rx_clean_tasklet,
2747 (unsigned long) jme);
2748 tasklet_init(&jme->rxempty_task,
2749 &jme_rx_empty_tasklet,
2750 (unsigned long) jme);
2751 tasklet_disable_nosync(&jme->txclean_task);
2752 tasklet_disable_nosync(&jme->rxclean_task);
2753 tasklet_disable_nosync(&jme->rxempty_task);
2754 jme->dpi.cur = PCC_P1;
2755
2756 jme->reg_ghc = 0;
2757 jme->reg_rxcs = RXCS_DEFAULT;
2758 jme->reg_rxmcs = RXMCS_DEFAULT;
2759 jme->reg_txpfc = 0;
2760 jme->reg_pmcs = PMCS_MFEN;
2761 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2762 set_bit(JME_FLAG_TSO, &jme->flags);
2763
2764 /*
2765 * Get Max Read Req Size from PCI Config Space
2766 */
2767 pci_read_config_byte(pdev, PCI_DCSR_MRRS, &jme->mrrs);
2768 jme->mrrs &= PCI_DCSR_MRRS_MASK;
2769 switch (jme->mrrs) {
2770 case MRRS_128B:
2771 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
2772 break;
2773 case MRRS_256B:
2774 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
2775 break;
2776 default:
2777 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2778 break;
2779 };
2780
2781 /*
2782 * Must check before reset_mac_processor
2783 */
2784 jme_check_hw_ver(jme);
2785 jme->mii_if.dev = netdev;
2786 if (jme->fpgaver) {
2787 jme->mii_if.phy_id = 0;
2788 for (i = 1 ; i < 32 ; ++i) {
2789 bmcr = jme_mdio_read(netdev, i, MII_BMCR);
2790 bmsr = jme_mdio_read(netdev, i, MII_BMSR);
2791 if (bmcr != 0xFFFFU && (bmcr != 0 || bmsr != 0)) {
2792 jme->mii_if.phy_id = i;
2793 break;
2794 }
2795 }
2796
2797 if (!jme->mii_if.phy_id) {
2798 rc = -EIO;
2799 jeprintk(pdev, "Can not find phy_id.\n");
2800 goto err_out_free_shadow;
2801 }
2802
2803 jme->reg_ghc |= GHC_LINK_POLL;
2804 } else {
2805 jme->mii_if.phy_id = 1;
2806 }
2807 if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
2808 jme->mii_if.supports_gmii = true;
2809 else
2810 jme->mii_if.supports_gmii = false;
2811 jme->mii_if.mdio_read = jme_mdio_read;
2812 jme->mii_if.mdio_write = jme_mdio_write;
2813
2814 jme_clear_pm(jme);
2815 jme_set_phyfifoa(jme);
2816 pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
2817 if (!jme->fpgaver)
2818 jme_phy_init(jme);
2819 jme_phy_off(jme);
2820
2821 /*
2822 * Reset MAC processor and reload EEPROM for MAC Address
2823 */
2824 jme_reset_mac_processor(jme);
2825 rc = jme_reload_eeprom(jme);
2826 if (rc) {
2827 jeprintk(pdev,
2828 "Reload eeprom for reading MAC Address error.\n");
2829 goto err_out_free_shadow;
2830 }
2831 jme_load_macaddr(netdev);
2832
2833 /*
2834 * Tell stack that we are not ready to work until open()
2835 */
2836 netif_carrier_off(netdev);
2837 netif_stop_queue(netdev);
2838
2839 /*
2840 * Register netdev
2841 */
2842 rc = register_netdev(netdev);
2843 if (rc) {
2844 jeprintk(pdev, "Cannot register net device.\n");
2845 goto err_out_free_shadow;
2846 }
2847
2848 msg_probe(jme,
2849 "JMC250 gigabit%s ver:%x rev:%x "
2850 "macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
2851 (jme->fpgaver != 0) ? " (FPGA)" : "",
2852 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
2853 jme->rev,
2854 netdev->dev_addr[0],
2855 netdev->dev_addr[1],
2856 netdev->dev_addr[2],
2857 netdev->dev_addr[3],
2858 netdev->dev_addr[4],
2859 netdev->dev_addr[5]);
2860
2861 return 0;
2862
2863err_out_free_shadow:
2864 pci_free_consistent(pdev,
2865 sizeof(u32) * SHADOW_REG_NR,
2866 jme->shadow_regs,
2867 jme->shadow_dma);
2868err_out_unmap:
2869 iounmap(jme->regs);
2870err_out_free_netdev:
2871 pci_set_drvdata(pdev, NULL);
2872 free_netdev(netdev);
2873err_out_release_regions:
2874 pci_release_regions(pdev);
2875err_out_disable_pdev:
2876 pci_disable_device(pdev);
2877err_out:
2878 return rc;
2879}
2880
2881static void __devexit
2882jme_remove_one(struct pci_dev *pdev)
2883{
2884 struct net_device *netdev = pci_get_drvdata(pdev);
2885 struct jme_adapter *jme = netdev_priv(netdev);
2886
2887 unregister_netdev(netdev);
2888 pci_free_consistent(pdev,
2889 sizeof(u32) * SHADOW_REG_NR,
2890 jme->shadow_regs,
2891 jme->shadow_dma);
2892 iounmap(jme->regs);
2893 pci_set_drvdata(pdev, NULL);
2894 free_netdev(netdev);
2895 pci_release_regions(pdev);
2896 pci_disable_device(pdev);
2897
2898}
2899
2900static int
2901jme_suspend(struct pci_dev *pdev, pm_message_t state)
2902{
2903 struct net_device *netdev = pci_get_drvdata(pdev);
2904 struct jme_adapter *jme = netdev_priv(netdev);
2905
2906 atomic_dec(&jme->link_changing);
2907
2908 netif_device_detach(netdev);
2909 netif_stop_queue(netdev);
2910 jme_stop_irq(jme);
2911
2912 tasklet_disable(&jme->txclean_task);
2913 tasklet_disable(&jme->rxclean_task);
2914 tasklet_disable(&jme->rxempty_task);
2915
2916 jme_disable_shadow(jme);
2917
2918 if (netif_carrier_ok(netdev)) {
2919 if (test_bit(JME_FLAG_POLL, &jme->flags))
2920 jme_polling_mode(jme);
2921
2922 jme_stop_pcc_timer(jme);
2923 jme_reset_ghc_speed(jme);
2924 jme_disable_rx_engine(jme);
2925 jme_disable_tx_engine(jme);
2926 jme_reset_mac_processor(jme);
2927 jme_free_rx_resources(jme);
2928 jme_free_tx_resources(jme);
2929 netif_carrier_off(netdev);
2930 jme->phylink = 0;
2931 }
2932
2933 tasklet_enable(&jme->txclean_task);
2934 tasklet_hi_enable(&jme->rxclean_task);
2935 tasklet_hi_enable(&jme->rxempty_task);
2936
2937 pci_save_state(pdev);
2938 if (jme->reg_pmcs) {
2939 jme_set_100m_half(jme);
2940
2941 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
2942 jme_wait_link(jme);
2943
2944 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2945
2946 pci_enable_wake(pdev, PCI_D3cold, true);
2947 } else {
2948 jme_phy_off(jme);
2949 }
2950 pci_set_power_state(pdev, PCI_D3cold);
2951
2952 return 0;
2953}
2954
2955static int
2956jme_resume(struct pci_dev *pdev)
2957{
2958 struct net_device *netdev = pci_get_drvdata(pdev);
2959 struct jme_adapter *jme = netdev_priv(netdev);
2960
2961 jme_clear_pm(jme);
2962 pci_restore_state(pdev);
2963
2964 if (test_bit(JME_FLAG_SSET, &jme->flags))
2965 jme_set_settings(netdev, &jme->old_ecmd);
2966 else
2967 jme_reset_phy_processor(jme);
2968
2969 jme_enable_shadow(jme);
2970 jme_start_irq(jme);
2971 netif_device_attach(netdev);
2972
2973 atomic_inc(&jme->link_changing);
2974
2975 jme_reset_link(jme);
2976
2977 return 0;
2978}
2979
2980static struct pci_device_id jme_pci_tbl[] = {
2981 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
2982 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
2983 { }
2984};
2985
2986static struct pci_driver jme_driver = {
2987 .name = DRV_NAME,
2988 .id_table = jme_pci_tbl,
2989 .probe = jme_init_one,
2990 .remove = __devexit_p(jme_remove_one),
2991#ifdef CONFIG_PM
2992 .suspend = jme_suspend,
2993 .resume = jme_resume,
2994#endif /* CONFIG_PM */
2995};
2996
2997static int __init
2998jme_init_module(void)
2999{
3000 printk(KERN_INFO PFX "JMicron JMC250 gigabit ethernet "
3001 "driver version %s\n", DRV_VERSION);
3002 return pci_register_driver(&jme_driver);
3003}
3004
3005static void __exit
3006jme_cleanup_module(void)
3007{
3008 pci_unregister_driver(&jme_driver);
3009}
3010
3011module_init(jme_init_module);
3012module_exit(jme_cleanup_module);
3013
3014MODULE_AUTHOR("Guo-Fu Tseng <cooldavid@cooldavid.org>");
3015MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
3016MODULE_LICENSE("GPL");
3017MODULE_VERSION(DRV_VERSION);
3018MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
3019
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
new file mode 100644
index 000000000000..b29688431a6d
--- /dev/null
+++ b/drivers/net/jme.h
@@ -0,0 +1,1199 @@
1/*
2 * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
3 *
4 * Copyright 2008 JMicron Technology Corporation
5 * http://www.jmicron.com/
6 *
7 * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#ifndef __JME_H_INCLUDED__
25#define __JME_H_INCLUDEE__
26
27#define DRV_NAME "jme"
28#define DRV_VERSION "1.0.2"
29#define PFX DRV_NAME ": "
30
31#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
32#define PCI_DEVICE_ID_JMICRON_JMC260 0x0260
33
34/*
35 * Message related definitions
36 */
37#define JME_DEF_MSG_ENABLE \
38 (NETIF_MSG_PROBE | \
39 NETIF_MSG_LINK | \
40 NETIF_MSG_RX_ERR | \
41 NETIF_MSG_TX_ERR | \
42 NETIF_MSG_HW)
43
44#define jeprintk(pdev, fmt, args...) \
45 printk(KERN_ERR PFX fmt, ## args)
46
47#ifdef TX_DEBUG
48#define tx_dbg(priv, fmt, args...) \
49 printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args)
50#else
51#define tx_dbg(priv, fmt, args...)
52#endif
53
54#define jme_msg(msglvl, type, priv, fmt, args...) \
55 if (netif_msg_##type(priv)) \
56 printk(msglvl "%s: " fmt, (priv)->dev->name, ## args)
57
58#define msg_probe(priv, fmt, args...) \
59 jme_msg(KERN_INFO, probe, priv, fmt, ## args)
60
61#define msg_link(priv, fmt, args...) \
62 jme_msg(KERN_INFO, link, priv, fmt, ## args)
63
64#define msg_intr(priv, fmt, args...) \
65 jme_msg(KERN_INFO, intr, priv, fmt, ## args)
66
67#define msg_rx_err(priv, fmt, args...) \
68 jme_msg(KERN_ERR, rx_err, priv, fmt, ## args)
69
70#define msg_rx_status(priv, fmt, args...) \
71 jme_msg(KERN_INFO, rx_status, priv, fmt, ## args)
72
73#define msg_tx_err(priv, fmt, args...) \
74 jme_msg(KERN_ERR, tx_err, priv, fmt, ## args)
75
76#define msg_tx_done(priv, fmt, args...) \
77 jme_msg(KERN_INFO, tx_done, priv, fmt, ## args)
78
79#define msg_tx_queued(priv, fmt, args...) \
80 jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args)
81
82#define msg_hw(priv, fmt, args...) \
83 jme_msg(KERN_ERR, hw, priv, fmt, ## args)
84
85/*
86 * Extra PCI Configuration space interface
87 */
88#define PCI_DCSR_MRRS 0x59
89#define PCI_DCSR_MRRS_MASK 0x70
90
91enum pci_dcsr_mrrs_vals {
92 MRRS_128B = 0x00,
93 MRRS_256B = 0x10,
94 MRRS_512B = 0x20,
95 MRRS_1024B = 0x30,
96 MRRS_2048B = 0x40,
97 MRRS_4096B = 0x50,
98};
99
100#define PCI_SPI 0xB0
101
102enum pci_spi_bits {
103 SPI_EN = 0x10,
104 SPI_MISO = 0x08,
105 SPI_MOSI = 0x04,
106 SPI_SCLK = 0x02,
107 SPI_CS = 0x01,
108};
109
110struct jme_spi_op {
111 void __user *uwbuf;
112 void __user *urbuf;
113 __u8 wn; /* Number of write actions */
114 __u8 rn; /* Number of read actions */
115 __u8 bitn; /* Number of bits per action */
116 __u8 spd; /* The maxim acceptable speed of controller, in MHz.*/
117 __u8 mode; /* CPOL, CPHA, and Duplex mode of SPI */
118
119 /* Internal use only */
120 u8 *kwbuf;
121 u8 *krbuf;
122 u8 sr;
123 u16 halfclk; /* Half of clock cycle calculated from spd, in ns */
124};
125
126enum jme_spi_op_bits {
127 SPI_MODE_CPHA = 0x01,
128 SPI_MODE_CPOL = 0x02,
129 SPI_MODE_DUP = 0x80,
130};
131
132#define HALF_US 500 /* 500 ns */
133#define JMESPIIOCTL SIOCDEVPRIVATE
134
135/*
136 * Dynamic(adaptive)/Static PCC values
137 */
138enum dynamic_pcc_values {
139 PCC_OFF = 0,
140 PCC_P1 = 1,
141 PCC_P2 = 2,
142 PCC_P3 = 3,
143
144 PCC_OFF_TO = 0,
145 PCC_P1_TO = 1,
146 PCC_P2_TO = 64,
147 PCC_P3_TO = 128,
148
149 PCC_OFF_CNT = 0,
150 PCC_P1_CNT = 1,
151 PCC_P2_CNT = 16,
152 PCC_P3_CNT = 32,
153};
154struct dynpcc_info {
155 unsigned long last_bytes;
156 unsigned long last_pkts;
157 unsigned long intr_cnt;
158 unsigned char cur;
159 unsigned char attempt;
160 unsigned char cnt;
161};
162#define PCC_INTERVAL_US 100000
163#define PCC_INTERVAL (HZ / (1000000 / PCC_INTERVAL_US))
164#define PCC_P3_THRESHOLD (2 * 1024 * 1024)
165#define PCC_P2_THRESHOLD 800
166#define PCC_INTR_THRESHOLD 800
167#define PCC_TX_TO 1000
168#define PCC_TX_CNT 8
169
170/*
171 * TX/RX Descriptors
172 *
173 * TX/RX Ring DESC Count Must be multiple of 16 and <= 1024
174 */
175#define RING_DESC_ALIGN 16 /* Descriptor alignment */
176#define TX_DESC_SIZE 16
177#define TX_RING_NR 8
178#define TX_RING_ALLOC_SIZE(s) ((s * TX_DESC_SIZE) + RING_DESC_ALIGN)
179
180struct txdesc {
181 union {
182 __u8 all[16];
183 __le32 dw[4];
184 struct {
185 /* DW0 */
186 __le16 vlan;
187 __u8 rsv1;
188 __u8 flags;
189
190 /* DW1 */
191 __le16 datalen;
192 __le16 mss;
193
194 /* DW2 */
195 __le16 pktsize;
196 __le16 rsv2;
197
198 /* DW3 */
199 __le32 bufaddr;
200 } desc1;
201 struct {
202 /* DW0 */
203 __le16 rsv1;
204 __u8 rsv2;
205 __u8 flags;
206
207 /* DW1 */
208 __le16 datalen;
209 __le16 rsv3;
210
211 /* DW2 */
212 __le32 bufaddrh;
213
214 /* DW3 */
215 __le32 bufaddrl;
216 } desc2;
217 struct {
218 /* DW0 */
219 __u8 ehdrsz;
220 __u8 rsv1;
221 __u8 rsv2;
222 __u8 flags;
223
224 /* DW1 */
225 __le16 trycnt;
226 __le16 segcnt;
227
228 /* DW2 */
229 __le16 pktsz;
230 __le16 rsv3;
231
232 /* DW3 */
233 __le32 bufaddrl;
234 } descwb;
235 };
236};
237
238enum jme_txdesc_flags_bits {
239 TXFLAG_OWN = 0x80,
240 TXFLAG_INT = 0x40,
241 TXFLAG_64BIT = 0x20,
242 TXFLAG_TCPCS = 0x10,
243 TXFLAG_UDPCS = 0x08,
244 TXFLAG_IPCS = 0x04,
245 TXFLAG_LSEN = 0x02,
246 TXFLAG_TAGON = 0x01,
247};
248
249#define TXDESC_MSS_SHIFT 2
250enum jme_rxdescwb_flags_bits {
251 TXWBFLAG_OWN = 0x80,
252 TXWBFLAG_INT = 0x40,
253 TXWBFLAG_TMOUT = 0x20,
254 TXWBFLAG_TRYOUT = 0x10,
255 TXWBFLAG_COL = 0x08,
256
257 TXWBFLAG_ALLERR = TXWBFLAG_TMOUT |
258 TXWBFLAG_TRYOUT |
259 TXWBFLAG_COL,
260};
261
262#define RX_DESC_SIZE 16
263#define RX_RING_NR 4
264#define RX_RING_ALLOC_SIZE(s) ((s * RX_DESC_SIZE) + RING_DESC_ALIGN)
265#define RX_BUF_DMA_ALIGN 8
266#define RX_PREPAD_SIZE 10
267#define ETH_CRC_LEN 2
268#define RX_VLANHDR_LEN 2
269#define RX_EXTRA_LEN (RX_PREPAD_SIZE + \
270 ETH_HLEN + \
271 ETH_CRC_LEN + \
272 RX_VLANHDR_LEN + \
273 RX_BUF_DMA_ALIGN)
274
275struct rxdesc {
276 union {
277 __u8 all[16];
278 __le32 dw[4];
279 struct {
280 /* DW0 */
281 __le16 rsv2;
282 __u8 rsv1;
283 __u8 flags;
284
285 /* DW1 */
286 __le16 datalen;
287 __le16 wbcpl;
288
289 /* DW2 */
290 __le32 bufaddrh;
291
292 /* DW3 */
293 __le32 bufaddrl;
294 } desc1;
295 struct {
296 /* DW0 */
297 __le16 vlan;
298 __le16 flags;
299
300 /* DW1 */
301 __le16 framesize;
302 __u8 errstat;
303 __u8 desccnt;
304
305 /* DW2 */
306 __le32 rsshash;
307
308 /* DW3 */
309 __u8 hashfun;
310 __u8 hashtype;
311 __le16 resrv;
312 } descwb;
313 };
314};
315
316enum jme_rxdesc_flags_bits {
317 RXFLAG_OWN = 0x80,
318 RXFLAG_INT = 0x40,
319 RXFLAG_64BIT = 0x20,
320};
321
322enum jme_rxwbdesc_flags_bits {
323 RXWBFLAG_OWN = 0x8000,
324 RXWBFLAG_INT = 0x4000,
325 RXWBFLAG_MF = 0x2000,
326 RXWBFLAG_64BIT = 0x2000,
327 RXWBFLAG_TCPON = 0x1000,
328 RXWBFLAG_UDPON = 0x0800,
329 RXWBFLAG_IPCS = 0x0400,
330 RXWBFLAG_TCPCS = 0x0200,
331 RXWBFLAG_UDPCS = 0x0100,
332 RXWBFLAG_TAGON = 0x0080,
333 RXWBFLAG_IPV4 = 0x0040,
334 RXWBFLAG_IPV6 = 0x0020,
335 RXWBFLAG_PAUSE = 0x0010,
336 RXWBFLAG_MAGIC = 0x0008,
337 RXWBFLAG_WAKEUP = 0x0004,
338 RXWBFLAG_DEST = 0x0003,
339 RXWBFLAG_DEST_UNI = 0x0001,
340 RXWBFLAG_DEST_MUL = 0x0002,
341 RXWBFLAG_DEST_BRO = 0x0003,
342};
343
344enum jme_rxwbdesc_desccnt_mask {
345 RXWBDCNT_WBCPL = 0x80,
346 RXWBDCNT_DCNT = 0x7F,
347};
348
349enum jme_rxwbdesc_errstat_bits {
350 RXWBERR_LIMIT = 0x80,
351 RXWBERR_MIIER = 0x40,
352 RXWBERR_NIBON = 0x20,
353 RXWBERR_COLON = 0x10,
354 RXWBERR_ABORT = 0x08,
355 RXWBERR_SHORT = 0x04,
356 RXWBERR_OVERUN = 0x02,
357 RXWBERR_CRCERR = 0x01,
358 RXWBERR_ALLERR = 0xFF,
359};
360
361/*
362 * Buffer information corresponding to ring descriptors.
363 */
364struct jme_buffer_info {
365 struct sk_buff *skb;
366 dma_addr_t mapping;
367 int len;
368 int nr_desc;
369 unsigned long start_xmit;
370};
371
372/*
373 * The structure holding buffer information and ring descriptors all together.
374 */
375#define MAX_RING_DESC_NR 1024
376struct jme_ring {
377 void *alloc; /* pointer to allocated memory */
378 void *desc; /* pointer to ring memory */
379 dma_addr_t dmaalloc; /* phys address of ring alloc */
380 dma_addr_t dma; /* phys address for ring dma */
381
382 /* Buffer information corresponding to each descriptor */
383 struct jme_buffer_info bufinf[MAX_RING_DESC_NR];
384
385 int next_to_use;
386 atomic_t next_to_clean;
387 atomic_t nr_free;
388};
389
390#define NET_STAT(priv) (priv->dev->stats)
391#define NETDEV_GET_STATS(netdev, fun_ptr)
392#define DECLARE_NET_DEVICE_STATS
393
394#define DECLARE_NAPI_STRUCT struct napi_struct napi;
395#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
396 netif_napi_add(dev, napis, pollfn, q);
397#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
398#define JME_NAPI_WEIGHT(w) int w
399#define JME_NAPI_WEIGHT_VAL(w) w
400#define JME_NAPI_WEIGHT_SET(w, r)
401#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis)
402#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
403#define JME_NAPI_DISABLE(priv) \
404 if (!napi_disable_pending(&priv->napi)) \
405 napi_disable(&priv->napi);
406#define JME_RX_SCHEDULE_PREP(priv) \
407 netif_rx_schedule_prep(priv->dev, &priv->napi)
408#define JME_RX_SCHEDULE(priv) \
409 __netif_rx_schedule(priv->dev, &priv->napi);
410
411/*
412 * Jmac Adapter Private data
413 */
414#define SHADOW_REG_NR 8
415struct jme_adapter {
416 struct pci_dev *pdev;
417 struct net_device *dev;
418 void __iomem *regs;
419 dma_addr_t shadow_dma;
420 u32 *shadow_regs;
421 struct mii_if_info mii_if;
422 struct jme_ring rxring[RX_RING_NR];
423 struct jme_ring txring[TX_RING_NR];
424 spinlock_t phy_lock;
425 spinlock_t macaddr_lock;
426 spinlock_t rxmcs_lock;
427 struct tasklet_struct rxempty_task;
428 struct tasklet_struct rxclean_task;
429 struct tasklet_struct txclean_task;
430 struct tasklet_struct linkch_task;
431 struct tasklet_struct pcc_task;
432 unsigned long flags;
433 u32 reg_txcs;
434 u32 reg_txpfc;
435 u32 reg_rxcs;
436 u32 reg_rxmcs;
437 u32 reg_ghc;
438 u32 reg_pmcs;
439 u32 phylink;
440 u32 tx_ring_size;
441 u32 tx_ring_mask;
442 u32 tx_wake_threshold;
443 u32 rx_ring_size;
444 u32 rx_ring_mask;
445 u8 mrrs;
446 unsigned int fpgaver;
447 unsigned int chiprev;
448 u8 rev;
449 u32 msg_enable;
450 struct ethtool_cmd old_ecmd;
451 unsigned int old_mtu;
452 struct vlan_group *vlgrp;
453 struct dynpcc_info dpi;
454 atomic_t intr_sem;
455 atomic_t link_changing;
456 atomic_t tx_cleaning;
457 atomic_t rx_cleaning;
458 atomic_t rx_empty;
459 int (*jme_rx)(struct sk_buff *skb);
460 int (*jme_vlan_rx)(struct sk_buff *skb,
461 struct vlan_group *grp,
462 unsigned short vlan_tag);
463 DECLARE_NAPI_STRUCT
464 DECLARE_NET_DEVICE_STATS
465};
466
467enum shadow_reg_val {
468 SHADOW_IEVE = 0,
469};
470
471enum jme_flags_bits {
472 JME_FLAG_MSI = 1,
473 JME_FLAG_SSET = 2,
474 JME_FLAG_TXCSUM = 3,
475 JME_FLAG_TSO = 4,
476 JME_FLAG_POLL = 5,
477 JME_FLAG_SHUTDOWN = 6,
478};
479
480#define TX_TIMEOUT (5 * HZ)
481#define JME_REG_LEN 0x500
482#define MAX_ETHERNET_JUMBO_PACKET_SIZE 9216
483
484static inline struct jme_adapter*
485jme_napi_priv(struct napi_struct *napi)
486{
487 struct jme_adapter *jme;
488 jme = container_of(napi, struct jme_adapter, napi);
489 return jme;
490}
491
492/*
493 * MMaped I/O Resters
494 */
495enum jme_iomap_offsets {
496 JME_MAC = 0x0000,
497 JME_PHY = 0x0400,
498 JME_MISC = 0x0800,
499 JME_RSS = 0x0C00,
500};
501
502enum jme_iomap_lens {
503 JME_MAC_LEN = 0x80,
504 JME_PHY_LEN = 0x58,
505 JME_MISC_LEN = 0x98,
506 JME_RSS_LEN = 0xFF,
507};
508
509enum jme_iomap_regs {
510 JME_TXCS = JME_MAC | 0x00, /* Transmit Control and Status */
511 JME_TXDBA_LO = JME_MAC | 0x04, /* Transmit Queue Desc Base Addr */
512 JME_TXDBA_HI = JME_MAC | 0x08, /* Transmit Queue Desc Base Addr */
513 JME_TXQDC = JME_MAC | 0x0C, /* Transmit Queue Desc Count */
514 JME_TXNDA = JME_MAC | 0x10, /* Transmit Queue Next Desc Addr */
515 JME_TXMCS = JME_MAC | 0x14, /* Transmit MAC Control Status */
516 JME_TXPFC = JME_MAC | 0x18, /* Transmit Pause Frame Control */
517 JME_TXTRHD = JME_MAC | 0x1C, /* Transmit Timer/Retry@Half-Dup */
518
519 JME_RXCS = JME_MAC | 0x20, /* Receive Control and Status */
520 JME_RXDBA_LO = JME_MAC | 0x24, /* Receive Queue Desc Base Addr */
521 JME_RXDBA_HI = JME_MAC | 0x28, /* Receive Queue Desc Base Addr */
522 JME_RXQDC = JME_MAC | 0x2C, /* Receive Queue Desc Count */
523 JME_RXNDA = JME_MAC | 0x30, /* Receive Queue Next Desc Addr */
524 JME_RXMCS = JME_MAC | 0x34, /* Receive MAC Control Status */
525 JME_RXUMA_LO = JME_MAC | 0x38, /* Receive Unicast MAC Address */
526 JME_RXUMA_HI = JME_MAC | 0x3C, /* Receive Unicast MAC Address */
527 JME_RXMCHT_LO = JME_MAC | 0x40, /* Recv Multicast Addr HashTable */
528 JME_RXMCHT_HI = JME_MAC | 0x44, /* Recv Multicast Addr HashTable */
529 JME_WFODP = JME_MAC | 0x48, /* Wakeup Frame Output Data Port */
530 JME_WFOI = JME_MAC | 0x4C, /* Wakeup Frame Output Interface */
531
532 JME_SMI = JME_MAC | 0x50, /* Station Management Interface */
533 JME_GHC = JME_MAC | 0x54, /* Global Host Control */
534 JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
535
536
537 JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
538 JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
539 JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
540 JME_SMBINTF = JME_PHY | 0x44, /* SMB Interface */
541
542
543 JME_TMCSR = JME_MISC | 0x00, /* Timer Control/Status Register */
544 JME_GPREG0 = JME_MISC | 0x08, /* General purpose REG-0 */
545 JME_GPREG1 = JME_MISC | 0x0C, /* General purpose REG-1 */
546 JME_IEVE = JME_MISC | 0x20, /* Interrupt Event Status */
547 JME_IREQ = JME_MISC | 0x24, /* Intr Req Status(For Debug) */
548 JME_IENS = JME_MISC | 0x28, /* Intr Enable - Setting Port */
549 JME_IENC = JME_MISC | 0x2C, /* Interrupt Enable - Clear Port */
550 JME_PCCRX0 = JME_MISC | 0x30, /* PCC Control for RX Queue 0 */
551 JME_PCCTX = JME_MISC | 0x40, /* PCC Control for TX Queues */
552 JME_CHIPMODE = JME_MISC | 0x44, /* Identify FPGA Version */
553 JME_SHBA_HI = JME_MISC | 0x48, /* Shadow Register Base HI */
554 JME_SHBA_LO = JME_MISC | 0x4C, /* Shadow Register Base LO */
555 JME_TIMER1 = JME_MISC | 0x70, /* Timer1 */
556 JME_TIMER2 = JME_MISC | 0x74, /* Timer2 */
557 JME_APMC = JME_MISC | 0x7C, /* Aggressive Power Mode Control */
558 JME_PCCSRX0 = JME_MISC | 0x80, /* PCC Status of RX0 */
559};
560
561/*
562 * TX Control/Status Bits
563 */
564enum jme_txcs_bits {
565 TXCS_QUEUE7S = 0x00008000,
566 TXCS_QUEUE6S = 0x00004000,
567 TXCS_QUEUE5S = 0x00002000,
568 TXCS_QUEUE4S = 0x00001000,
569 TXCS_QUEUE3S = 0x00000800,
570 TXCS_QUEUE2S = 0x00000400,
571 TXCS_QUEUE1S = 0x00000200,
572 TXCS_QUEUE0S = 0x00000100,
573 TXCS_FIFOTH = 0x000000C0,
574 TXCS_DMASIZE = 0x00000030,
575 TXCS_BURST = 0x00000004,
576 TXCS_ENABLE = 0x00000001,
577};
578
579enum jme_txcs_value {
580 TXCS_FIFOTH_16QW = 0x000000C0,
581 TXCS_FIFOTH_12QW = 0x00000080,
582 TXCS_FIFOTH_8QW = 0x00000040,
583 TXCS_FIFOTH_4QW = 0x00000000,
584
585 TXCS_DMASIZE_64B = 0x00000000,
586 TXCS_DMASIZE_128B = 0x00000010,
587 TXCS_DMASIZE_256B = 0x00000020,
588 TXCS_DMASIZE_512B = 0x00000030,
589
590 TXCS_SELECT_QUEUE0 = 0x00000000,
591 TXCS_SELECT_QUEUE1 = 0x00010000,
592 TXCS_SELECT_QUEUE2 = 0x00020000,
593 TXCS_SELECT_QUEUE3 = 0x00030000,
594 TXCS_SELECT_QUEUE4 = 0x00040000,
595 TXCS_SELECT_QUEUE5 = 0x00050000,
596 TXCS_SELECT_QUEUE6 = 0x00060000,
597 TXCS_SELECT_QUEUE7 = 0x00070000,
598
599 TXCS_DEFAULT = TXCS_FIFOTH_4QW |
600 TXCS_BURST,
601};
602
603#define JME_TX_DISABLE_TIMEOUT 10 /* 10 msec */
604
605/*
606 * TX MAC Control/Status Bits
607 */
608enum jme_txmcs_bit_masks {
609 TXMCS_IFG2 = 0xC0000000,
610 TXMCS_IFG1 = 0x30000000,
611 TXMCS_TTHOLD = 0x00000300,
612 TXMCS_FBURST = 0x00000080,
613 TXMCS_CARRIEREXT = 0x00000040,
614 TXMCS_DEFER = 0x00000020,
615 TXMCS_BACKOFF = 0x00000010,
616 TXMCS_CARRIERSENSE = 0x00000008,
617 TXMCS_COLLISION = 0x00000004,
618 TXMCS_CRC = 0x00000002,
619 TXMCS_PADDING = 0x00000001,
620};
621
622enum jme_txmcs_values {
623 TXMCS_IFG2_6_4 = 0x00000000,
624 TXMCS_IFG2_8_5 = 0x40000000,
625 TXMCS_IFG2_10_6 = 0x80000000,
626 TXMCS_IFG2_12_7 = 0xC0000000,
627
628 TXMCS_IFG1_8_4 = 0x00000000,
629 TXMCS_IFG1_12_6 = 0x10000000,
630 TXMCS_IFG1_16_8 = 0x20000000,
631 TXMCS_IFG1_20_10 = 0x30000000,
632
633 TXMCS_TTHOLD_1_8 = 0x00000000,
634 TXMCS_TTHOLD_1_4 = 0x00000100,
635 TXMCS_TTHOLD_1_2 = 0x00000200,
636 TXMCS_TTHOLD_FULL = 0x00000300,
637
638 TXMCS_DEFAULT = TXMCS_IFG2_8_5 |
639 TXMCS_IFG1_16_8 |
640 TXMCS_TTHOLD_FULL |
641 TXMCS_DEFER |
642 TXMCS_CRC |
643 TXMCS_PADDING,
644};
645
646enum jme_txpfc_bits_masks {
647 TXPFC_VLAN_TAG = 0xFFFF0000,
648 TXPFC_VLAN_EN = 0x00008000,
649 TXPFC_PF_EN = 0x00000001,
650};
651
652enum jme_txtrhd_bits_masks {
653 TXTRHD_TXPEN = 0x80000000,
654 TXTRHD_TXP = 0x7FFFFF00,
655 TXTRHD_TXREN = 0x00000080,
656 TXTRHD_TXRL = 0x0000007F,
657};
658
659enum jme_txtrhd_shifts {
660 TXTRHD_TXP_SHIFT = 8,
661 TXTRHD_TXRL_SHIFT = 0,
662};
663
664/*
665 * RX Control/Status Bits
666 */
667enum jme_rxcs_bit_masks {
668 /* FIFO full threshold for transmitting Tx Pause Packet */
669 RXCS_FIFOTHTP = 0x30000000,
670 /* FIFO threshold for processing next packet */
671 RXCS_FIFOTHNP = 0x0C000000,
672 RXCS_DMAREQSZ = 0x03000000, /* DMA Request Size */
673 RXCS_QUEUESEL = 0x00030000, /* Queue selection */
674 RXCS_RETRYGAP = 0x0000F000, /* RX Desc full retry gap */
675 RXCS_RETRYCNT = 0x00000F00, /* RX Desc full retry counter */
676 RXCS_WAKEUP = 0x00000040, /* Enable receive wakeup packet */
677 RXCS_MAGIC = 0x00000020, /* Enable receive magic packet */
678 RXCS_SHORT = 0x00000010, /* Enable receive short packet */
679 RXCS_ABORT = 0x00000008, /* Enable receive errorr packet */
680 RXCS_QST = 0x00000004, /* Receive queue start */
681 RXCS_SUSPEND = 0x00000002,
682 RXCS_ENABLE = 0x00000001,
683};
684
685enum jme_rxcs_values {
686 RXCS_FIFOTHTP_16T = 0x00000000,
687 RXCS_FIFOTHTP_32T = 0x10000000,
688 RXCS_FIFOTHTP_64T = 0x20000000,
689 RXCS_FIFOTHTP_128T = 0x30000000,
690
691 RXCS_FIFOTHNP_16QW = 0x00000000,
692 RXCS_FIFOTHNP_32QW = 0x04000000,
693 RXCS_FIFOTHNP_64QW = 0x08000000,
694 RXCS_FIFOTHNP_128QW = 0x0C000000,
695
696 RXCS_DMAREQSZ_16B = 0x00000000,
697 RXCS_DMAREQSZ_32B = 0x01000000,
698 RXCS_DMAREQSZ_64B = 0x02000000,
699 RXCS_DMAREQSZ_128B = 0x03000000,
700
701 RXCS_QUEUESEL_Q0 = 0x00000000,
702 RXCS_QUEUESEL_Q1 = 0x00010000,
703 RXCS_QUEUESEL_Q2 = 0x00020000,
704 RXCS_QUEUESEL_Q3 = 0x00030000,
705
706 RXCS_RETRYGAP_256ns = 0x00000000,
707 RXCS_RETRYGAP_512ns = 0x00001000,
708 RXCS_RETRYGAP_1024ns = 0x00002000,
709 RXCS_RETRYGAP_2048ns = 0x00003000,
710 RXCS_RETRYGAP_4096ns = 0x00004000,
711 RXCS_RETRYGAP_8192ns = 0x00005000,
712 RXCS_RETRYGAP_16384ns = 0x00006000,
713 RXCS_RETRYGAP_32768ns = 0x00007000,
714
715 RXCS_RETRYCNT_0 = 0x00000000,
716 RXCS_RETRYCNT_4 = 0x00000100,
717 RXCS_RETRYCNT_8 = 0x00000200,
718 RXCS_RETRYCNT_12 = 0x00000300,
719 RXCS_RETRYCNT_16 = 0x00000400,
720 RXCS_RETRYCNT_20 = 0x00000500,
721 RXCS_RETRYCNT_24 = 0x00000600,
722 RXCS_RETRYCNT_28 = 0x00000700,
723 RXCS_RETRYCNT_32 = 0x00000800,
724 RXCS_RETRYCNT_36 = 0x00000900,
725 RXCS_RETRYCNT_40 = 0x00000A00,
726 RXCS_RETRYCNT_44 = 0x00000B00,
727 RXCS_RETRYCNT_48 = 0x00000C00,
728 RXCS_RETRYCNT_52 = 0x00000D00,
729 RXCS_RETRYCNT_56 = 0x00000E00,
730 RXCS_RETRYCNT_60 = 0x00000F00,
731
732 RXCS_DEFAULT = RXCS_FIFOTHTP_128T |
733 RXCS_FIFOTHNP_128QW |
734 RXCS_DMAREQSZ_128B |
735 RXCS_RETRYGAP_256ns |
736 RXCS_RETRYCNT_32,
737};
738
739#define JME_RX_DISABLE_TIMEOUT 10 /* 10 msec */
740
741/*
742 * RX MAC Control/Status Bits
743 */
744enum jme_rxmcs_bits {
745 RXMCS_ALLFRAME = 0x00000800,
746 RXMCS_BRDFRAME = 0x00000400,
747 RXMCS_MULFRAME = 0x00000200,
748 RXMCS_UNIFRAME = 0x00000100,
749 RXMCS_ALLMULFRAME = 0x00000080,
750 RXMCS_MULFILTERED = 0x00000040,
751 RXMCS_RXCOLLDEC = 0x00000020,
752 RXMCS_FLOWCTRL = 0x00000008,
753 RXMCS_VTAGRM = 0x00000004,
754 RXMCS_PREPAD = 0x00000002,
755 RXMCS_CHECKSUM = 0x00000001,
756
757 RXMCS_DEFAULT = RXMCS_VTAGRM |
758 RXMCS_PREPAD |
759 RXMCS_FLOWCTRL |
760 RXMCS_CHECKSUM,
761};
762
763/*
764 * Wakeup Frame setup interface registers
765 */
766#define WAKEUP_FRAME_NR 8
767#define WAKEUP_FRAME_MASK_DWNR 4
768
769enum jme_wfoi_bit_masks {
770 WFOI_MASK_SEL = 0x00000070,
771 WFOI_CRC_SEL = 0x00000008,
772 WFOI_FRAME_SEL = 0x00000007,
773};
774
775enum jme_wfoi_shifts {
776 WFOI_MASK_SHIFT = 4,
777};
778
779/*
780 * SMI Related definitions
781 */
782enum jme_smi_bit_mask {
783 SMI_DATA_MASK = 0xFFFF0000,
784 SMI_REG_ADDR_MASK = 0x0000F800,
785 SMI_PHY_ADDR_MASK = 0x000007C0,
786 SMI_OP_WRITE = 0x00000020,
787 /* Set to 1, after req done it'll be cleared to 0 */
788 SMI_OP_REQ = 0x00000010,
789 SMI_OP_MDIO = 0x00000008, /* Software assess In/Out */
790 SMI_OP_MDOE = 0x00000004, /* Software Output Enable */
791 SMI_OP_MDC = 0x00000002, /* Software CLK Control */
792 SMI_OP_MDEN = 0x00000001, /* Software access Enable */
793};
794
795enum jme_smi_bit_shift {
796 SMI_DATA_SHIFT = 16,
797 SMI_REG_ADDR_SHIFT = 11,
798 SMI_PHY_ADDR_SHIFT = 6,
799};
800
801static inline u32 smi_reg_addr(int x)
802{
803 return (x << SMI_REG_ADDR_SHIFT) & SMI_REG_ADDR_MASK;
804}
805
806static inline u32 smi_phy_addr(int x)
807{
808 return (x << SMI_PHY_ADDR_SHIFT) & SMI_PHY_ADDR_MASK;
809}
810
811#define JME_PHY_TIMEOUT 100 /* 100 msec */
812#define JME_PHY_REG_NR 32
813
814/*
815 * Global Host Control
816 */
817enum jme_ghc_bit_mask {
818 GHC_SWRST = 0x40000000,
819 GHC_DPX = 0x00000040,
820 GHC_SPEED = 0x00000030,
821 GHC_LINK_POLL = 0x00000001,
822};
823
824enum jme_ghc_speed_val {
825 GHC_SPEED_10M = 0x00000010,
826 GHC_SPEED_100M = 0x00000020,
827 GHC_SPEED_1000M = 0x00000030,
828};
829
830/*
831 * Power management control and status register
832 */
833enum jme_pmcs_bit_masks {
834 PMCS_WF7DET = 0x80000000,
835 PMCS_WF6DET = 0x40000000,
836 PMCS_WF5DET = 0x20000000,
837 PMCS_WF4DET = 0x10000000,
838 PMCS_WF3DET = 0x08000000,
839 PMCS_WF2DET = 0x04000000,
840 PMCS_WF1DET = 0x02000000,
841 PMCS_WF0DET = 0x01000000,
842 PMCS_LFDET = 0x00040000,
843 PMCS_LRDET = 0x00020000,
844 PMCS_MFDET = 0x00010000,
845 PMCS_WF7EN = 0x00008000,
846 PMCS_WF6EN = 0x00004000,
847 PMCS_WF5EN = 0x00002000,
848 PMCS_WF4EN = 0x00001000,
849 PMCS_WF3EN = 0x00000800,
850 PMCS_WF2EN = 0x00000400,
851 PMCS_WF1EN = 0x00000200,
852 PMCS_WF0EN = 0x00000100,
853 PMCS_LFEN = 0x00000004,
854 PMCS_LREN = 0x00000002,
855 PMCS_MFEN = 0x00000001,
856};
857
858/*
859 * Giga PHY Status Registers
860 */
861enum jme_phy_link_bit_mask {
862 PHY_LINK_SPEED_MASK = 0x0000C000,
863 PHY_LINK_DUPLEX = 0x00002000,
864 PHY_LINK_SPEEDDPU_RESOLVED = 0x00000800,
865 PHY_LINK_UP = 0x00000400,
866 PHY_LINK_AUTONEG_COMPLETE = 0x00000200,
867 PHY_LINK_MDI_STAT = 0x00000040,
868};
869
870enum jme_phy_link_speed_val {
871 PHY_LINK_SPEED_10M = 0x00000000,
872 PHY_LINK_SPEED_100M = 0x00004000,
873 PHY_LINK_SPEED_1000M = 0x00008000,
874};
875
876#define JME_SPDRSV_TIMEOUT 500 /* 500 us */
877
878/*
879 * SMB Control and Status
880 */
881enum jme_smbcsr_bit_mask {
882 SMBCSR_CNACK = 0x00020000,
883 SMBCSR_RELOAD = 0x00010000,
884 SMBCSR_EEPROMD = 0x00000020,
885 SMBCSR_INITDONE = 0x00000010,
886 SMBCSR_BUSY = 0x0000000F,
887};
888
889enum jme_smbintf_bit_mask {
890 SMBINTF_HWDATR = 0xFF000000,
891 SMBINTF_HWDATW = 0x00FF0000,
892 SMBINTF_HWADDR = 0x0000FF00,
893 SMBINTF_HWRWN = 0x00000020,
894 SMBINTF_HWCMD = 0x00000010,
895 SMBINTF_FASTM = 0x00000008,
896 SMBINTF_GPIOSCL = 0x00000004,
897 SMBINTF_GPIOSDA = 0x00000002,
898 SMBINTF_GPIOEN = 0x00000001,
899};
900
901enum jme_smbintf_vals {
902 SMBINTF_HWRWN_READ = 0x00000020,
903 SMBINTF_HWRWN_WRITE = 0x00000000,
904};
905
906enum jme_smbintf_shifts {
907 SMBINTF_HWDATR_SHIFT = 24,
908 SMBINTF_HWDATW_SHIFT = 16,
909 SMBINTF_HWADDR_SHIFT = 8,
910};
911
912#define JME_EEPROM_RELOAD_TIMEOUT 2000 /* 2000 msec */
913#define JME_SMB_BUSY_TIMEOUT 20 /* 20 msec */
914#define JME_SMB_LEN 256
915#define JME_EEPROM_MAGIC 0x250
916
917/*
918 * Timer Control/Status Register
919 */
920enum jme_tmcsr_bit_masks {
921 TMCSR_SWIT = 0x80000000,
922 TMCSR_EN = 0x01000000,
923 TMCSR_CNT = 0x00FFFFFF,
924};
925
926/*
927 * General Purpose REG-0
928 */
929enum jme_gpreg0_masks {
930 GPREG0_DISSH = 0xFF000000,
931 GPREG0_PCIRLMT = 0x00300000,
932 GPREG0_PCCNOMUTCLR = 0x00040000,
933 GPREG0_LNKINTPOLL = 0x00001000,
934 GPREG0_PCCTMR = 0x00000300,
935 GPREG0_PHYADDR = 0x0000001F,
936};
937
938enum jme_gpreg0_vals {
939 GPREG0_DISSH_DW7 = 0x80000000,
940 GPREG0_DISSH_DW6 = 0x40000000,
941 GPREG0_DISSH_DW5 = 0x20000000,
942 GPREG0_DISSH_DW4 = 0x10000000,
943 GPREG0_DISSH_DW3 = 0x08000000,
944 GPREG0_DISSH_DW2 = 0x04000000,
945 GPREG0_DISSH_DW1 = 0x02000000,
946 GPREG0_DISSH_DW0 = 0x01000000,
947 GPREG0_DISSH_ALL = 0xFF000000,
948
949 GPREG0_PCIRLMT_8 = 0x00000000,
950 GPREG0_PCIRLMT_6 = 0x00100000,
951 GPREG0_PCIRLMT_5 = 0x00200000,
952 GPREG0_PCIRLMT_4 = 0x00300000,
953
954 GPREG0_PCCTMR_16ns = 0x00000000,
955 GPREG0_PCCTMR_256ns = 0x00000100,
956 GPREG0_PCCTMR_1us = 0x00000200,
957 GPREG0_PCCTMR_1ms = 0x00000300,
958
959 GPREG0_PHYADDR_1 = 0x00000001,
960
961 GPREG0_DEFAULT = GPREG0_PCIRLMT_4 |
962 GPREG0_PCCTMR_1us |
963 GPREG0_PHYADDR_1,
964};
965
966/*
967 * Interrupt Status Bits
968 */
969enum jme_interrupt_bits {
970 INTR_SWINTR = 0x80000000,
971 INTR_TMINTR = 0x40000000,
972 INTR_LINKCH = 0x20000000,
973 INTR_PAUSERCV = 0x10000000,
974 INTR_MAGICRCV = 0x08000000,
975 INTR_WAKERCV = 0x04000000,
976 INTR_PCCRX0TO = 0x02000000,
977 INTR_PCCRX1TO = 0x01000000,
978 INTR_PCCRX2TO = 0x00800000,
979 INTR_PCCRX3TO = 0x00400000,
980 INTR_PCCTXTO = 0x00200000,
981 INTR_PCCRX0 = 0x00100000,
982 INTR_PCCRX1 = 0x00080000,
983 INTR_PCCRX2 = 0x00040000,
984 INTR_PCCRX3 = 0x00020000,
985 INTR_PCCTX = 0x00010000,
986 INTR_RX3EMP = 0x00008000,
987 INTR_RX2EMP = 0x00004000,
988 INTR_RX1EMP = 0x00002000,
989 INTR_RX0EMP = 0x00001000,
990 INTR_RX3 = 0x00000800,
991 INTR_RX2 = 0x00000400,
992 INTR_RX1 = 0x00000200,
993 INTR_RX0 = 0x00000100,
994 INTR_TX7 = 0x00000080,
995 INTR_TX6 = 0x00000040,
996 INTR_TX5 = 0x00000020,
997 INTR_TX4 = 0x00000010,
998 INTR_TX3 = 0x00000008,
999 INTR_TX2 = 0x00000004,
1000 INTR_TX1 = 0x00000002,
1001 INTR_TX0 = 0x00000001,
1002};
1003
1004static const u32 INTR_ENABLE = INTR_SWINTR |
1005 INTR_TMINTR |
1006 INTR_LINKCH |
1007 INTR_PCCRX0TO |
1008 INTR_PCCRX0 |
1009 INTR_PCCTXTO |
1010 INTR_PCCTX |
1011 INTR_RX0EMP;
1012
1013/*
1014 * PCC Control Registers
1015 */
1016enum jme_pccrx_masks {
1017 PCCRXTO_MASK = 0xFFFF0000,
1018 PCCRX_MASK = 0x0000FF00,
1019};
1020
1021enum jme_pcctx_masks {
1022 PCCTXTO_MASK = 0xFFFF0000,
1023 PCCTX_MASK = 0x0000FF00,
1024 PCCTX_QS_MASK = 0x000000FF,
1025};
1026
1027enum jme_pccrx_shifts {
1028 PCCRXTO_SHIFT = 16,
1029 PCCRX_SHIFT = 8,
1030};
1031
1032enum jme_pcctx_shifts {
1033 PCCTXTO_SHIFT = 16,
1034 PCCTX_SHIFT = 8,
1035};
1036
1037enum jme_pcctx_bits {
1038 PCCTXQ0_EN = 0x00000001,
1039 PCCTXQ1_EN = 0x00000002,
1040 PCCTXQ2_EN = 0x00000004,
1041 PCCTXQ3_EN = 0x00000008,
1042 PCCTXQ4_EN = 0x00000010,
1043 PCCTXQ5_EN = 0x00000020,
1044 PCCTXQ6_EN = 0x00000040,
1045 PCCTXQ7_EN = 0x00000080,
1046};
1047
1048/*
1049 * Chip Mode Register
1050 */
1051enum jme_chipmode_bit_masks {
1052 CM_FPGAVER_MASK = 0xFFFF0000,
1053 CM_CHIPREV_MASK = 0x0000FF00,
1054 CM_CHIPMODE_MASK = 0x0000000F,
1055};
1056
1057enum jme_chipmode_shifts {
1058 CM_FPGAVER_SHIFT = 16,
1059 CM_CHIPREV_SHIFT = 8,
1060};
1061
1062/*
1063 * Shadow base address register bits
1064 */
1065enum jme_shadow_base_address_bits {
1066 SHBA_POSTEN = 0x1,
1067};
1068
1069/*
1070 * Aggressive Power Mode Control
1071 */
1072enum jme_apmc_bits {
1073 JME_APMC_PCIE_SD_EN = 0x40000000,
1074 JME_APMC_PSEUDO_HP_EN = 0x20000000,
1075 JME_APMC_EPIEN = 0x04000000,
1076 JME_APMC_EPIEN_CTRL = 0x03000000,
1077};
1078
1079enum jme_apmc_values {
1080 JME_APMC_EPIEN_CTRL_EN = 0x02000000,
1081 JME_APMC_EPIEN_CTRL_DIS = 0x01000000,
1082};
1083
1084#define APMC_PHP_SHUTDOWN_DELAY (10 * 1000 * 1000)
1085
1086#ifdef REG_DEBUG
1087static char *MAC_REG_NAME[] = {
1088 "JME_TXCS", "JME_TXDBA_LO", "JME_TXDBA_HI", "JME_TXQDC",
1089 "JME_TXNDA", "JME_TXMCS", "JME_TXPFC", "JME_TXTRHD",
1090 "JME_RXCS", "JME_RXDBA_LO", "JME_RXDBA_HI", "JME_RXQDC",
1091 "JME_RXNDA", "JME_RXMCS", "JME_RXUMA_LO", "JME_RXUMA_HI",
1092 "JME_RXMCHT_LO", "JME_RXMCHT_HI", "JME_WFODP", "JME_WFOI",
1093 "JME_SMI", "JME_GHC", "UNKNOWN", "UNKNOWN",
1094 "JME_PMCS"};
1095
1096static char *PE_REG_NAME[] = {
1097 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1098 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1099 "UNKNOWN", "UNKNOWN", "JME_PHY_CS", "UNKNOWN",
1100 "JME_PHY_LINK", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1101 "JME_SMBCSR", "JME_SMBINTF"};
1102
1103static char *MISC_REG_NAME[] = {
1104 "JME_TMCSR", "JME_GPIO", "JME_GPREG0", "JME_GPREG1",
1105 "JME_IEVE", "JME_IREQ", "JME_IENS", "JME_IENC",
1106 "JME_PCCRX0", "JME_PCCRX1", "JME_PCCRX2", "JME_PCCRX3",
1107 "JME_PCCTX0", "JME_CHIPMODE", "JME_SHBA_HI", "JME_SHBA_LO",
1108 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1109 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1110 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN",
1111 "JME_TIMER1", "JME_TIMER2", "UNKNOWN", "JME_APMC",
1112 "JME_PCCSRX0"};
1113
1114static inline void reg_dbg(const struct jme_adapter *jme,
1115 const char *msg, u32 val, u32 reg)
1116{
1117 const char *regname;
1118 switch (reg & 0xF00) {
1119 case 0x000:
1120 regname = MAC_REG_NAME[(reg & 0xFF) >> 2];
1121 break;
1122 case 0x400:
1123 regname = PE_REG_NAME[(reg & 0xFF) >> 2];
1124 break;
1125 case 0x800:
1126 regname = MISC_REG_NAME[(reg & 0xFF) >> 2];
1127 break;
1128 default:
1129 regname = PE_REG_NAME[0];
1130 }
1131 printk(KERN_DEBUG "%s: %-20s %08x@%s\n", jme->dev->name,
1132 msg, val, regname);
1133}
1134#else
1135static inline void reg_dbg(const struct jme_adapter *jme,
1136 const char *msg, u32 val, u32 reg) {}
1137#endif
1138
1139/*
1140 * Read/Write MMaped I/O Registers
1141 */
1142static inline u32 jread32(struct jme_adapter *jme, u32 reg)
1143{
1144 return readl(jme->regs + reg);
1145}
1146
1147static inline void jwrite32(struct jme_adapter *jme, u32 reg, u32 val)
1148{
1149 reg_dbg(jme, "REG WRITE", val, reg);
1150 writel(val, jme->regs + reg);
1151 reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
1152}
1153
1154static inline void jwrite32f(struct jme_adapter *jme, u32 reg, u32 val)
1155{
1156 /*
1157 * Read after write should cause flush
1158 */
1159 reg_dbg(jme, "REG WRITE FLUSH", val, reg);
1160 writel(val, jme->regs + reg);
1161 readl(jme->regs + reg);
1162 reg_dbg(jme, "VAL AFTER WRITE", readl(jme->regs + reg), reg);
1163}
1164
1165/*
1166 * PHY Regs
1167 */
1168enum jme_phy_reg17_bit_masks {
1169 PREG17_SPEED = 0xC000,
1170 PREG17_DUPLEX = 0x2000,
1171 PREG17_SPDRSV = 0x0800,
1172 PREG17_LNKUP = 0x0400,
1173 PREG17_MDI = 0x0040,
1174};
1175
1176enum jme_phy_reg17_vals {
1177 PREG17_SPEED_10M = 0x0000,
1178 PREG17_SPEED_100M = 0x4000,
1179 PREG17_SPEED_1000M = 0x8000,
1180};
1181
1182#define BMSR_ANCOMP 0x0020
1183
1184/*
1185 * Workaround
1186 */
1187static inline int is_buggy250(unsigned short device, unsigned int chiprev)
1188{
1189 return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
1190}
1191
1192/*
1193 * Function prototypes
1194 */
1195static int jme_set_settings(struct net_device *netdev,
1196 struct ethtool_cmd *ecmd);
1197static void jme_set_multi(struct net_device *netdev);
1198
1199#endif
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 0a97c26df6ab..a1e22ed1f6ee 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -41,7 +41,7 @@
41#endif 41#endif
42 42
43#if MFE_DEBUG>=1 43#if MFE_DEBUG>=1
44#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __FUNCTION__ , ## args) 44#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args)
45#define MFE_RX_DEBUG 2 45#define MFE_RX_DEBUG 2
46#else 46#else
47#define DPRINTK(str,args...) 47#define DPRINTK(str,args...)
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 6d343efb2717..4e7a5faf0351 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -203,7 +203,7 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
203 203
204out_badirq: 204out_badirq:
205 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", 205 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
206 dev->name, __FUNCTION__, irq); 206 dev->name, __func__, irq);
207 return ret; 207 return ret;
208} 208}
209 209
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 096bca54bcf7..b411b79d72ad 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/mm.h>
36#include <linux/bitmap.h> 37#include <linux/bitmap.h>
37#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
38#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 0a18b9e96da1..372811ade9f5 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -48,30 +48,28 @@
48#include <linux/kernel.h> 48#include <linux/kernel.h>
49#include <linux/spinlock.h> 49#include <linux/spinlock.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <linux/mii.h> 51#include <linux/phy.h>
52#include <linux/mv643xx_eth.h> 52#include <linux/mv643xx_eth.h>
53#include <asm/io.h> 53#include <asm/io.h>
54#include <asm/types.h> 54#include <asm/types.h>
55#include <asm/system.h> 55#include <asm/system.h>
56 56
57static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 57static char mv643xx_eth_driver_name[] = "mv643xx_eth";
58static char mv643xx_eth_driver_version[] = "1.3"; 58static char mv643xx_eth_driver_version[] = "1.4";
59 59
60#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
61#define MV643XX_ETH_NAPI
62#define MV643XX_ETH_TX_FAST_REFILL
63
64#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
65#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
66#else
67#define MAX_DESCS_PER_SKB 1
68#endif
69 60
70/* 61/*
71 * Registers shared between all ports. 62 * Registers shared between all ports.
72 */ 63 */
73#define PHY_ADDR 0x0000 64#define PHY_ADDR 0x0000
74#define SMI_REG 0x0004 65#define SMI_REG 0x0004
66#define SMI_BUSY 0x10000000
67#define SMI_READ_VALID 0x08000000
68#define SMI_OPCODE_READ 0x04000000
69#define SMI_OPCODE_WRITE 0x00000000
70#define ERR_INT_CAUSE 0x0080
71#define ERR_INT_SMI_DONE 0x00000010
72#define ERR_INT_MASK 0x0084
75#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 73#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
76#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 74#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
77#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 75#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
@@ -104,16 +102,12 @@ static char mv643xx_eth_driver_version[] = "1.3";
104#define TX_BW_MTU(p) (0x0458 + ((p) << 10)) 102#define TX_BW_MTU(p) (0x0458 + ((p) << 10))
105#define TX_BW_BURST(p) (0x045c + ((p) << 10)) 103#define TX_BW_BURST(p) (0x045c + ((p) << 10))
106#define INT_CAUSE(p) (0x0460 + ((p) << 10)) 104#define INT_CAUSE(p) (0x0460 + ((p) << 10))
107#define INT_TX_END_0 0x00080000
108#define INT_TX_END 0x07f80000 105#define INT_TX_END 0x07f80000
109#define INT_RX 0x0007fbfc 106#define INT_RX 0x000003fc
110#define INT_EXT 0x00000002 107#define INT_EXT 0x00000002
111#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10)) 108#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10))
112#define INT_EXT_LINK 0x00100000 109#define INT_EXT_LINK_PHY 0x00110000
113#define INT_EXT_PHY 0x00010000 110#define INT_EXT_TX 0x000000ff
114#define INT_EXT_TX_ERROR_0 0x00000100
115#define INT_EXT_TX_0 0x00000001
116#define INT_EXT_TX 0x0000ffff
117#define INT_MASK(p) (0x0468 + ((p) << 10)) 111#define INT_MASK(p) (0x0468 + ((p) << 10))
118#define INT_MASK_EXT(p) (0x046c + ((p) << 10)) 112#define INT_MASK_EXT(p) (0x046c + ((p) << 10))
119#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) 113#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10))
@@ -171,8 +165,8 @@ static char mv643xx_eth_driver_version[] = "1.3";
171#define FORCE_LINK_PASS (1 << 1) 165#define FORCE_LINK_PASS (1 << 1)
172#define SERIAL_PORT_ENABLE (1 << 0) 166#define SERIAL_PORT_ENABLE (1 << 0)
173 167
174#define DEFAULT_RX_QUEUE_SIZE 400 168#define DEFAULT_RX_QUEUE_SIZE 128
175#define DEFAULT_TX_QUEUE_SIZE 800 169#define DEFAULT_TX_QUEUE_SIZE 256
176 170
177 171
178/* 172/*
@@ -249,9 +243,23 @@ struct mv643xx_eth_shared_private {
249 void __iomem *base; 243 void __iomem *base;
250 244
251 /* 245 /*
252 * Protects access to SMI_REG, which is shared between ports. 246 * Points at the right SMI instance to use.
247 */
248 struct mv643xx_eth_shared_private *smi;
249
250 /*
251 * Provides access to local SMI interface.
252 */
253 struct mii_bus smi_bus;
254
255 /*
256 * If we have access to the error interrupt pin (which is
257 * somewhat misnamed as it not only reflects internal errors
258 * but also reflects SMI completion), use that to wait for
259 * SMI access completion instead of polling the SMI busy bit.
253 */ 260 */
254 spinlock_t phy_lock; 261 int err_interrupt;
262 wait_queue_head_t smi_busy_wait;
255 263
256 /* 264 /*
257 * Per-port MBUS window access register value. 265 * Per-port MBUS window access register value.
@@ -263,9 +271,13 @@ struct mv643xx_eth_shared_private {
263 */ 271 */
264 unsigned int t_clk; 272 unsigned int t_clk;
265 int extended_rx_coal_limit; 273 int extended_rx_coal_limit;
266 int tx_bw_control_moved; 274 int tx_bw_control;
267}; 275};
268 276
277#define TX_BW_CONTROL_ABSENT 0
278#define TX_BW_CONTROL_OLD_LAYOUT 1
279#define TX_BW_CONTROL_NEW_LAYOUT 2
280
269 281
270/* per-port *****************************************************************/ 282/* per-port *****************************************************************/
271struct mib_counters { 283struct mib_counters {
@@ -314,8 +326,6 @@ struct rx_queue {
314 dma_addr_t rx_desc_dma; 326 dma_addr_t rx_desc_dma;
315 int rx_desc_area_size; 327 int rx_desc_area_size;
316 struct sk_buff **rx_skb; 328 struct sk_buff **rx_skb;
317
318 struct timer_list rx_oom;
319}; 329};
320 330
321struct tx_queue { 331struct tx_queue {
@@ -330,7 +340,12 @@ struct tx_queue {
330 struct tx_desc *tx_desc_area; 340 struct tx_desc *tx_desc_area;
331 dma_addr_t tx_desc_dma; 341 dma_addr_t tx_desc_dma;
332 int tx_desc_area_size; 342 int tx_desc_area_size;
333 struct sk_buff **tx_skb; 343
344 struct sk_buff_head tx_skb;
345
346 unsigned long tx_packets;
347 unsigned long tx_bytes;
348 unsigned long tx_dropped;
334}; 349};
335 350
336struct mv643xx_eth_private { 351struct mv643xx_eth_private {
@@ -339,14 +354,24 @@ struct mv643xx_eth_private {
339 354
340 struct net_device *dev; 355 struct net_device *dev;
341 356
342 struct mv643xx_eth_shared_private *shared_smi; 357 struct phy_device *phy;
343 int phy_addr;
344
345 spinlock_t lock;
346 358
359 struct timer_list mib_counters_timer;
360 spinlock_t mib_counters_lock;
347 struct mib_counters mib_counters; 361 struct mib_counters mib_counters;
362
348 struct work_struct tx_timeout_task; 363 struct work_struct tx_timeout_task;
349 struct mii_if_info mii; 364
365 struct napi_struct napi;
366 u8 work_link;
367 u8 work_tx;
368 u8 work_tx_end;
369 u8 work_rx;
370 u8 work_rx_refill;
371 u8 work_rx_oom;
372
373 int skb_size;
374 struct sk_buff_head rx_recycle;
350 375
351 /* 376 /*
352 * RX state. 377 * RX state.
@@ -354,9 +379,8 @@ struct mv643xx_eth_private {
354 int default_rx_ring_size; 379 int default_rx_ring_size;
355 unsigned long rx_desc_sram_addr; 380 unsigned long rx_desc_sram_addr;
356 int rx_desc_sram_size; 381 int rx_desc_sram_size;
357 u8 rxq_mask; 382 int rxq_count;
358 int rxq_primary; 383 struct timer_list rx_oom;
359 struct napi_struct napi;
360 struct rx_queue rxq[8]; 384 struct rx_queue rxq[8];
361 385
362 /* 386 /*
@@ -365,12 +389,8 @@ struct mv643xx_eth_private {
365 int default_tx_ring_size; 389 int default_tx_ring_size;
366 unsigned long tx_desc_sram_addr; 390 unsigned long tx_desc_sram_addr;
367 int tx_desc_sram_size; 391 int tx_desc_sram_size;
368 u8 txq_mask; 392 int txq_count;
369 int txq_primary;
370 struct tx_queue txq[8]; 393 struct tx_queue txq[8];
371#ifdef MV643XX_ETH_TX_FAST_REFILL
372 int tx_clean_threshold;
373#endif
374}; 394};
375 395
376 396
@@ -440,94 +460,21 @@ static void txq_disable(struct tx_queue *txq)
440 udelay(10); 460 udelay(10);
441} 461}
442 462
443static void __txq_maybe_wake(struct tx_queue *txq) 463static void txq_maybe_wake(struct tx_queue *txq)
444{ 464{
445 struct mv643xx_eth_private *mp = txq_to_mp(txq); 465 struct mv643xx_eth_private *mp = txq_to_mp(txq);
466 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
446 467
447 /* 468 if (netif_tx_queue_stopped(nq)) {
448 * netif_{stop,wake}_queue() flow control only applies to 469 __netif_tx_lock(nq, smp_processor_id());
449 * the primary queue. 470 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
450 */ 471 netif_tx_wake_queue(nq);
451 BUG_ON(txq->index != mp->txq_primary); 472 __netif_tx_unlock(nq);
452
453 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
454 netif_wake_queue(mp->dev);
455}
456
457
458/* rx ***********************************************************************/
459static void txq_reclaim(struct tx_queue *txq, int force);
460
461static void rxq_refill(struct rx_queue *rxq)
462{
463 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
464 unsigned long flags;
465
466 spin_lock_irqsave(&mp->lock, flags);
467
468 while (rxq->rx_desc_count < rxq->rx_ring_size) {
469 int skb_size;
470 struct sk_buff *skb;
471 int unaligned;
472 int rx;
473
474 /*
475 * Reserve 2+14 bytes for an ethernet header (the
476 * hardware automatically prepends 2 bytes of dummy
477 * data to each received packet), 16 bytes for up to
478 * four VLAN tags, and 4 bytes for the trailing FCS
479 * -- 36 bytes total.
480 */
481 skb_size = mp->dev->mtu + 36;
482
483 /*
484 * Make sure that the skb size is a multiple of 8
485 * bytes, as the lower three bits of the receive
486 * descriptor's buffer size field are ignored by
487 * the hardware.
488 */
489 skb_size = (skb_size + 7) & ~7;
490
491 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
492 if (skb == NULL)
493 break;
494
495 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
496 if (unaligned)
497 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
498
499 rxq->rx_desc_count++;
500 rx = rxq->rx_used_desc;
501 rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
502
503 rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
504 skb_size, DMA_FROM_DEVICE);
505 rxq->rx_desc_area[rx].buf_size = skb_size;
506 rxq->rx_skb[rx] = skb;
507 wmb();
508 rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
509 RX_ENABLE_INTERRUPT;
510 wmb();
511
512 /*
513 * The hardware automatically prepends 2 bytes of
514 * dummy data to each received packet, so that the
515 * IP header ends up 16-byte aligned.
516 */
517 skb_reserve(skb, 2);
518 } 473 }
519
520 if (rxq->rx_desc_count != rxq->rx_ring_size)
521 mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
522
523 spin_unlock_irqrestore(&mp->lock, flags);
524} 474}
525 475
526static inline void rxq_refill_timer_wrapper(unsigned long data)
527{
528 rxq_refill((struct rx_queue *)data);
529}
530 476
477/* rx napi ******************************************************************/
531static int rxq_process(struct rx_queue *rxq, int budget) 478static int rxq_process(struct rx_queue *rxq, int budget)
532{ 479{
533 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 480 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
@@ -539,31 +486,31 @@ static int rxq_process(struct rx_queue *rxq, int budget)
539 struct rx_desc *rx_desc; 486 struct rx_desc *rx_desc;
540 unsigned int cmd_sts; 487 unsigned int cmd_sts;
541 struct sk_buff *skb; 488 struct sk_buff *skb;
542 unsigned long flags; 489 u16 byte_cnt;
543
544 spin_lock_irqsave(&mp->lock, flags);
545 490
546 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 491 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
547 492
548 cmd_sts = rx_desc->cmd_sts; 493 cmd_sts = rx_desc->cmd_sts;
549 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 494 if (cmd_sts & BUFFER_OWNED_BY_DMA)
550 spin_unlock_irqrestore(&mp->lock, flags);
551 break; 495 break;
552 }
553 rmb(); 496 rmb();
554 497
555 skb = rxq->rx_skb[rxq->rx_curr_desc]; 498 skb = rxq->rx_skb[rxq->rx_curr_desc];
556 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 499 rxq->rx_skb[rxq->rx_curr_desc] = NULL;
557 500
558 rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size; 501 rxq->rx_curr_desc++;
559 502 if (rxq->rx_curr_desc == rxq->rx_ring_size)
560 spin_unlock_irqrestore(&mp->lock, flags); 503 rxq->rx_curr_desc = 0;
561 504
562 dma_unmap_single(NULL, rx_desc->buf_ptr + 2, 505 dma_unmap_single(NULL, rx_desc->buf_ptr,
563 rx_desc->buf_size, DMA_FROM_DEVICE); 506 rx_desc->buf_size, DMA_FROM_DEVICE);
564 rxq->rx_desc_count--; 507 rxq->rx_desc_count--;
565 rx++; 508 rx++;
566 509
510 mp->work_rx_refill |= 1 << rxq->index;
511
512 byte_cnt = rx_desc->byte_cnt;
513
567 /* 514 /*
568 * Update statistics. 515 * Update statistics.
569 * 516 *
@@ -573,7 +520,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
573 * byte CRC at the end of the packet (which we do count). 520 * byte CRC at the end of the packet (which we do count).
574 */ 521 */
575 stats->rx_packets++; 522 stats->rx_packets++;
576 stats->rx_bytes += rx_desc->byte_cnt - 2; 523 stats->rx_bytes += byte_cnt - 2;
577 524
578 /* 525 /*
579 * In case we received a packet without first / last bits 526 * In case we received a packet without first / last bits
@@ -596,72 +543,84 @@ static int rxq_process(struct rx_queue *rxq, int budget)
596 if (cmd_sts & ERROR_SUMMARY) 543 if (cmd_sts & ERROR_SUMMARY)
597 stats->rx_errors++; 544 stats->rx_errors++;
598 545
599 dev_kfree_skb_irq(skb); 546 dev_kfree_skb(skb);
600 } else { 547 } else {
601 /* 548 /*
602 * The -4 is for the CRC in the trailer of the 549 * The -4 is for the CRC in the trailer of the
603 * received packet 550 * received packet
604 */ 551 */
605 skb_put(skb, rx_desc->byte_cnt - 2 - 4); 552 skb_put(skb, byte_cnt - 2 - 4);
606 553
607 if (cmd_sts & LAYER_4_CHECKSUM_OK) { 554 if (cmd_sts & LAYER_4_CHECKSUM_OK)
608 skb->ip_summed = CHECKSUM_UNNECESSARY; 555 skb->ip_summed = CHECKSUM_UNNECESSARY;
609 skb->csum = htons(
610 (cmd_sts & 0x0007fff8) >> 3);
611 }
612 skb->protocol = eth_type_trans(skb, mp->dev); 556 skb->protocol = eth_type_trans(skb, mp->dev);
613#ifdef MV643XX_ETH_NAPI
614 netif_receive_skb(skb); 557 netif_receive_skb(skb);
615#else
616 netif_rx(skb);
617#endif
618 } 558 }
619 559
620 mp->dev->last_rx = jiffies; 560 mp->dev->last_rx = jiffies;
621 } 561 }
622 562
623 rxq_refill(rxq); 563 if (rx < budget)
564 mp->work_rx &= ~(1 << rxq->index);
624 565
625 return rx; 566 return rx;
626} 567}
627 568
628#ifdef MV643XX_ETH_NAPI 569static int rxq_refill(struct rx_queue *rxq, int budget)
629static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
630{ 570{
631 struct mv643xx_eth_private *mp; 571 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
632 int rx; 572 int refilled;
633 int i;
634 573
635 mp = container_of(napi, struct mv643xx_eth_private, napi); 574 refilled = 0;
575 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
576 struct sk_buff *skb;
577 int unaligned;
578 int rx;
579
580 skb = __skb_dequeue(&mp->rx_recycle);
581 if (skb == NULL)
582 skb = dev_alloc_skb(mp->skb_size +
583 dma_get_cache_alignment() - 1);
636 584
637#ifdef MV643XX_ETH_TX_FAST_REFILL 585 if (skb == NULL) {
638 if (++mp->tx_clean_threshold > 5) { 586 mp->work_rx_oom |= 1 << rxq->index;
639 mp->tx_clean_threshold = 0; 587 goto oom;
640 for (i = 0; i < 8; i++)
641 if (mp->txq_mask & (1 << i))
642 txq_reclaim(mp->txq + i, 0);
643
644 if (netif_carrier_ok(mp->dev)) {
645 spin_lock_irq(&mp->lock);
646 __txq_maybe_wake(mp->txq + mp->txq_primary);
647 spin_unlock_irq(&mp->lock);
648 } 588 }
649 }
650#endif
651 589
652 rx = 0; 590 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
653 for (i = 7; rx < budget && i >= 0; i--) 591 if (unaligned)
654 if (mp->rxq_mask & (1 << i)) 592 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
655 rx += rxq_process(mp->rxq + i, budget - rx);
656 593
657 if (rx < budget) { 594 refilled++;
658 netif_rx_complete(mp->dev, napi); 595 rxq->rx_desc_count++;
659 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 596
597 rx = rxq->rx_used_desc++;
598 if (rxq->rx_used_desc == rxq->rx_ring_size)
599 rxq->rx_used_desc = 0;
600
601 rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
602 mp->skb_size, DMA_FROM_DEVICE);
603 rxq->rx_desc_area[rx].buf_size = mp->skb_size;
604 rxq->rx_skb[rx] = skb;
605 wmb();
606 rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
607 RX_ENABLE_INTERRUPT;
608 wmb();
609
610 /*
611 * The hardware automatically prepends 2 bytes of
612 * dummy data to each received packet, so that the
613 * IP header ends up 16-byte aligned.
614 */
615 skb_reserve(skb, 2);
660 } 616 }
661 617
662 return rx; 618 if (refilled < budget)
619 mp->work_rx_refill &= ~(1 << rxq->index);
620
621oom:
622 return refilled;
663} 623}
664#endif
665 624
666 625
667/* tx ***********************************************************************/ 626/* tx ***********************************************************************/
@@ -684,8 +643,9 @@ static int txq_alloc_desc_index(struct tx_queue *txq)
684 643
685 BUG_ON(txq->tx_desc_count >= txq->tx_ring_size); 644 BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
686 645
687 tx_desc_curr = txq->tx_curr_desc; 646 tx_desc_curr = txq->tx_curr_desc++;
688 txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size; 647 if (txq->tx_curr_desc == txq->tx_ring_size)
648 txq->tx_curr_desc = 0;
689 649
690 BUG_ON(txq->tx_curr_desc == txq->tx_used_desc); 650 BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
691 651
@@ -714,10 +674,8 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
714 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 674 desc->cmd_sts = BUFFER_OWNED_BY_DMA |
715 ZERO_PADDING | TX_LAST_DESC | 675 ZERO_PADDING | TX_LAST_DESC |
716 TX_ENABLE_INTERRUPT; 676 TX_ENABLE_INTERRUPT;
717 txq->tx_skb[tx_index] = skb;
718 } else { 677 } else {
719 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 678 desc->cmd_sts = BUFFER_OWNED_BY_DMA;
720 txq->tx_skb[tx_index] = NULL;
721 } 679 }
722 680
723 desc->l4i_chk = 0; 681 desc->l4i_chk = 0;
@@ -734,144 +692,228 @@ static inline __be16 sum16_as_be(__sum16 sum)
734 return (__force __be16)sum; 692 return (__force __be16)sum;
735} 693}
736 694
737static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 695static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
738{ 696{
739 struct mv643xx_eth_private *mp = txq_to_mp(txq); 697 struct mv643xx_eth_private *mp = txq_to_mp(txq);
740 int nr_frags = skb_shinfo(skb)->nr_frags; 698 int nr_frags = skb_shinfo(skb)->nr_frags;
741 int tx_index; 699 int tx_index;
742 struct tx_desc *desc; 700 struct tx_desc *desc;
743 u32 cmd_sts; 701 u32 cmd_sts;
702 u16 l4i_chk;
744 int length; 703 int length;
745 704
746 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 705 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
747 706 l4i_chk = 0;
748 tx_index = txq_alloc_desc_index(txq);
749 desc = &txq->tx_desc_area[tx_index];
750
751 if (nr_frags) {
752 txq_submit_frag_skb(txq, skb);
753
754 length = skb_headlen(skb);
755 txq->tx_skb[tx_index] = NULL;
756 } else {
757 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
758 length = skb->len;
759 txq->tx_skb[tx_index] = skb;
760 }
761
762 desc->byte_cnt = length;
763 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
764 707
765 if (skb->ip_summed == CHECKSUM_PARTIAL) { 708 if (skb->ip_summed == CHECKSUM_PARTIAL) {
766 int mac_hdr_len; 709 int tag_bytes;
767 710
768 BUG_ON(skb->protocol != htons(ETH_P_IP) && 711 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
769 skb->protocol != htons(ETH_P_8021Q)); 712 skb->protocol != htons(ETH_P_8021Q));
770 713
771 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 714 tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
772 GEN_IP_V4_CHECKSUM | 715 if (unlikely(tag_bytes & ~12)) {
773 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 716 if (skb_checksum_help(skb) == 0)
717 goto no_csum;
718 kfree_skb(skb);
719 return 1;
720 }
774 721
775 mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; 722 if (tag_bytes & 4)
776 switch (mac_hdr_len - ETH_HLEN) {
777 case 0:
778 break;
779 case 4:
780 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
781 break;
782 case 8:
783 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
784 break;
785 case 12:
786 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 723 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
724 if (tag_bytes & 8)
787 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 725 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
788 break; 726
789 default: 727 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
790 if (net_ratelimit()) 728 GEN_IP_V4_CHECKSUM |
791 dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev, 729 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
792 "mac header length is %d?!\n", mac_hdr_len);
793 break;
794 }
795 730
796 switch (ip_hdr(skb)->protocol) { 731 switch (ip_hdr(skb)->protocol) {
797 case IPPROTO_UDP: 732 case IPPROTO_UDP:
798 cmd_sts |= UDP_FRAME; 733 cmd_sts |= UDP_FRAME;
799 desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 734 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
800 break; 735 break;
801 case IPPROTO_TCP: 736 case IPPROTO_TCP:
802 desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 737 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
803 break; 738 break;
804 default: 739 default:
805 BUG(); 740 BUG();
806 } 741 }
807 } else { 742 } else {
743no_csum:
808 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 744 /* Errata BTS #50, IHL must be 5 if no HW checksum */
809 cmd_sts |= 5 << TX_IHL_SHIFT; 745 cmd_sts |= 5 << TX_IHL_SHIFT;
810 desc->l4i_chk = 0;
811 } 746 }
812 747
748 tx_index = txq_alloc_desc_index(txq);
749 desc = &txq->tx_desc_area[tx_index];
750
751 if (nr_frags) {
752 txq_submit_frag_skb(txq, skb);
753 length = skb_headlen(skb);
754 } else {
755 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
756 length = skb->len;
757 }
758
759 desc->l4i_chk = l4i_chk;
760 desc->byte_cnt = length;
761 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
762
763 __skb_queue_tail(&txq->tx_skb, skb);
764
813 /* ensure all other descriptors are written before first cmd_sts */ 765 /* ensure all other descriptors are written before first cmd_sts */
814 wmb(); 766 wmb();
815 desc->cmd_sts = cmd_sts; 767 desc->cmd_sts = cmd_sts;
816 768
817 /* clear TX_END interrupt status */ 769 /* clear TX_END status */
818 wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index)); 770 mp->work_tx_end &= ~(1 << txq->index);
819 rdl(mp, INT_CAUSE(mp->port_num));
820 771
821 /* ensure all descriptors are written before poking hardware */ 772 /* ensure all descriptors are written before poking hardware */
822 wmb(); 773 wmb();
823 txq_enable(txq); 774 txq_enable(txq);
824 775
825 txq->tx_desc_count += nr_frags + 1; 776 txq->tx_desc_count += nr_frags + 1;
777
778 return 0;
826} 779}
827 780
828static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 781static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
829{ 782{
830 struct mv643xx_eth_private *mp = netdev_priv(dev); 783 struct mv643xx_eth_private *mp = netdev_priv(dev);
831 struct net_device_stats *stats = &dev->stats; 784 int queue;
832 struct tx_queue *txq; 785 struct tx_queue *txq;
833 unsigned long flags; 786 struct netdev_queue *nq;
787
788 queue = skb_get_queue_mapping(skb);
789 txq = mp->txq + queue;
790 nq = netdev_get_tx_queue(dev, queue);
834 791
835 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 792 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
836 stats->tx_dropped++; 793 txq->tx_dropped++;
837 dev_printk(KERN_DEBUG, &dev->dev, 794 dev_printk(KERN_DEBUG, &dev->dev,
838 "failed to linearize skb with tiny " 795 "failed to linearize skb with tiny "
839 "unaligned fragment\n"); 796 "unaligned fragment\n");
840 return NETDEV_TX_BUSY; 797 return NETDEV_TX_BUSY;
841 } 798 }
842 799
843 spin_lock_irqsave(&mp->lock, flags); 800 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
844 801 if (net_ratelimit())
845 txq = mp->txq + mp->txq_primary; 802 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
846
847 if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
848 spin_unlock_irqrestore(&mp->lock, flags);
849 if (txq->index == mp->txq_primary && net_ratelimit())
850 dev_printk(KERN_ERR, &dev->dev,
851 "primary tx queue full?!\n");
852 kfree_skb(skb); 803 kfree_skb(skb);
853 return NETDEV_TX_OK; 804 return NETDEV_TX_OK;
854 } 805 }
855 806
856 txq_submit_skb(txq, skb); 807 if (!txq_submit_skb(txq, skb)) {
857 stats->tx_bytes += skb->len;
858 stats->tx_packets++;
859 dev->trans_start = jiffies;
860
861 if (txq->index == mp->txq_primary) {
862 int entries_left; 808 int entries_left;
863 809
810 txq->tx_bytes += skb->len;
811 txq->tx_packets++;
812 dev->trans_start = jiffies;
813
864 entries_left = txq->tx_ring_size - txq->tx_desc_count; 814 entries_left = txq->tx_ring_size - txq->tx_desc_count;
865 if (entries_left < MAX_DESCS_PER_SKB) 815 if (entries_left < MAX_SKB_FRAGS + 1)
866 netif_stop_queue(dev); 816 netif_tx_stop_queue(nq);
867 } 817 }
868 818
869 spin_unlock_irqrestore(&mp->lock, flags);
870
871 return NETDEV_TX_OK; 819 return NETDEV_TX_OK;
872} 820}
873 821
874 822
823/* tx napi ******************************************************************/
824static void txq_kick(struct tx_queue *txq)
825{
826 struct mv643xx_eth_private *mp = txq_to_mp(txq);
827 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
828 u32 hw_desc_ptr;
829 u32 expected_ptr;
830
831 __netif_tx_lock(nq, smp_processor_id());
832
833 if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index))
834 goto out;
835
836 hw_desc_ptr = rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index));
837 expected_ptr = (u32)txq->tx_desc_dma +
838 txq->tx_curr_desc * sizeof(struct tx_desc);
839
840 if (hw_desc_ptr != expected_ptr)
841 txq_enable(txq);
842
843out:
844 __netif_tx_unlock(nq);
845
846 mp->work_tx_end &= ~(1 << txq->index);
847}
848
849static int txq_reclaim(struct tx_queue *txq, int budget, int force)
850{
851 struct mv643xx_eth_private *mp = txq_to_mp(txq);
852 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
853 int reclaimed;
854
855 __netif_tx_lock(nq, smp_processor_id());
856
857 reclaimed = 0;
858 while (reclaimed < budget && txq->tx_desc_count > 0) {
859 int tx_index;
860 struct tx_desc *desc;
861 u32 cmd_sts;
862 struct sk_buff *skb;
863
864 tx_index = txq->tx_used_desc;
865 desc = &txq->tx_desc_area[tx_index];
866 cmd_sts = desc->cmd_sts;
867
868 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
869 if (!force)
870 break;
871 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
872 }
873
874 txq->tx_used_desc = tx_index + 1;
875 if (txq->tx_used_desc == txq->tx_ring_size)
876 txq->tx_used_desc = 0;
877
878 reclaimed++;
879 txq->tx_desc_count--;
880
881 skb = NULL;
882 if (cmd_sts & TX_LAST_DESC)
883 skb = __skb_dequeue(&txq->tx_skb);
884
885 if (cmd_sts & ERROR_SUMMARY) {
886 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
887 mp->dev->stats.tx_errors++;
888 }
889
890 if (cmd_sts & TX_FIRST_DESC) {
891 dma_unmap_single(NULL, desc->buf_ptr,
892 desc->byte_cnt, DMA_TO_DEVICE);
893 } else {
894 dma_unmap_page(NULL, desc->buf_ptr,
895 desc->byte_cnt, DMA_TO_DEVICE);
896 }
897
898 if (skb != NULL) {
899 if (skb_queue_len(&mp->rx_recycle) <
900 mp->default_rx_ring_size &&
901 skb_recycle_check(skb, mp->skb_size))
902 __skb_queue_head(&mp->rx_recycle, skb);
903 else
904 dev_kfree_skb(skb);
905 }
906 }
907
908 __netif_tx_unlock(nq);
909
910 if (reclaimed < budget)
911 mp->work_tx &= ~(1 << txq->index);
912
913 return reclaimed;
914}
915
916
875/* tx rate control **********************************************************/ 917/* tx rate control **********************************************************/
876/* 918/*
877 * Set total maximum TX rate (shared by all TX queues for this port) 919 * Set total maximum TX rate (shared by all TX queues for this port)
@@ -895,14 +937,17 @@ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
895 if (bucket_size > 65535) 937 if (bucket_size > 65535)
896 bucket_size = 65535; 938 bucket_size = 65535;
897 939
898 if (mp->shared->tx_bw_control_moved) { 940 switch (mp->shared->tx_bw_control) {
899 wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate); 941 case TX_BW_CONTROL_OLD_LAYOUT:
900 wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
901 wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
902 } else {
903 wrl(mp, TX_BW_RATE(mp->port_num), token_rate); 942 wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
904 wrl(mp, TX_BW_MTU(mp->port_num), mtu); 943 wrl(mp, TX_BW_MTU(mp->port_num), mtu);
905 wrl(mp, TX_BW_BURST(mp->port_num), bucket_size); 944 wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
945 break;
946 case TX_BW_CONTROL_NEW_LAYOUT:
947 wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
948 wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
949 wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
950 break;
906 } 951 }
907} 952}
908 953
@@ -934,14 +979,21 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
934 /* 979 /*
935 * Turn on fixed priority mode. 980 * Turn on fixed priority mode.
936 */ 981 */
937 if (mp->shared->tx_bw_control_moved) 982 off = 0;
938 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 983 switch (mp->shared->tx_bw_control) {
939 else 984 case TX_BW_CONTROL_OLD_LAYOUT:
940 off = TXQ_FIX_PRIO_CONF(mp->port_num); 985 off = TXQ_FIX_PRIO_CONF(mp->port_num);
986 break;
987 case TX_BW_CONTROL_NEW_LAYOUT:
988 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
989 break;
990 }
941 991
942 val = rdl(mp, off); 992 if (off) {
943 val |= 1 << txq->index; 993 val = rdl(mp, off);
944 wrl(mp, off, val); 994 val |= 1 << txq->index;
995 wrl(mp, off, val);
996 }
945} 997}
946 998
947static void txq_set_wrr(struct tx_queue *txq, int weight) 999static void txq_set_wrr(struct tx_queue *txq, int weight)
@@ -953,95 +1005,147 @@ static void txq_set_wrr(struct tx_queue *txq, int weight)
953 /* 1005 /*
954 * Turn off fixed priority mode. 1006 * Turn off fixed priority mode.
955 */ 1007 */
956 if (mp->shared->tx_bw_control_moved) 1008 off = 0;
957 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 1009 switch (mp->shared->tx_bw_control) {
958 else 1010 case TX_BW_CONTROL_OLD_LAYOUT:
959 off = TXQ_FIX_PRIO_CONF(mp->port_num); 1011 off = TXQ_FIX_PRIO_CONF(mp->port_num);
1012 break;
1013 case TX_BW_CONTROL_NEW_LAYOUT:
1014 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
1015 break;
1016 }
960 1017
961 val = rdl(mp, off); 1018 if (off) {
962 val &= ~(1 << txq->index); 1019 val = rdl(mp, off);
963 wrl(mp, off, val); 1020 val &= ~(1 << txq->index);
1021 wrl(mp, off, val);
964 1022
965 /* 1023 /*
966 * Configure WRR weight for this queue. 1024 * Configure WRR weight for this queue.
967 */ 1025 */
968 off = TXQ_BW_WRR_CONF(mp->port_num, txq->index); 1026 off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
969 1027
970 val = rdl(mp, off); 1028 val = rdl(mp, off);
971 val = (val & ~0xff) | (weight & 0xff); 1029 val = (val & ~0xff) | (weight & 0xff);
972 wrl(mp, off, val); 1030 wrl(mp, off, val);
1031 }
973} 1032}
974 1033
975 1034
976/* mii management interface *************************************************/ 1035/* mii management interface *************************************************/
977#define SMI_BUSY 0x10000000 1036static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
978#define SMI_READ_VALID 0x08000000 1037{
979#define SMI_OPCODE_READ 0x04000000 1038 struct mv643xx_eth_shared_private *msp = dev_id;
980#define SMI_OPCODE_WRITE 0x00000000 1039
1040 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
1041 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
1042 wake_up(&msp->smi_busy_wait);
1043 return IRQ_HANDLED;
1044 }
1045
1046 return IRQ_NONE;
1047}
981 1048
982static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr, 1049static int smi_is_done(struct mv643xx_eth_shared_private *msp)
983 unsigned int reg, unsigned int *value)
984{ 1050{
985 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; 1051 return !(readl(msp->base + SMI_REG) & SMI_BUSY);
986 unsigned long flags; 1052}
987 int i;
988 1053
989 /* the SMI register is a shared resource */ 1054static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
990 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); 1055{
1056 if (msp->err_interrupt == NO_IRQ) {
1057 int i;
991 1058
992 /* wait for the SMI register to become available */ 1059 for (i = 0; !smi_is_done(msp); i++) {
993 for (i = 0; readl(smi_reg) & SMI_BUSY; i++) { 1060 if (i == 10)
994 if (i == 1000) { 1061 return -ETIMEDOUT;
995 printk("%s: PHY busy timeout\n", mp->dev->name); 1062 msleep(10);
996 goto out;
997 } 1063 }
998 udelay(10); 1064
1065 return 0;
1066 }
1067
1068 if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1069 msecs_to_jiffies(100)))
1070 return -ETIMEDOUT;
1071
1072 return 0;
1073}
1074
1075static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1076{
1077 struct mv643xx_eth_shared_private *msp = bus->priv;
1078 void __iomem *smi_reg = msp->base + SMI_REG;
1079 int ret;
1080
1081 if (smi_wait_ready(msp)) {
1082 printk("mv643xx_eth: SMI bus busy timeout\n");
1083 return -ETIMEDOUT;
999 } 1084 }
1000 1085
1001 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1086 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1002 1087
1003 /* now wait for the data to be valid */ 1088 if (smi_wait_ready(msp)) {
1004 for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) { 1089 printk("mv643xx_eth: SMI bus busy timeout\n");
1005 if (i == 1000) { 1090 return -ETIMEDOUT;
1006 printk("%s: PHY read timeout\n", mp->dev->name);
1007 goto out;
1008 }
1009 udelay(10);
1010 } 1091 }
1011 1092
1012 *value = readl(smi_reg) & 0xffff; 1093 ret = readl(smi_reg);
1013out: 1094 if (!(ret & SMI_READ_VALID)) {
1014 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); 1095 printk("mv643xx_eth: SMI bus read not valid\n");
1096 return -ENODEV;
1097 }
1098
1099 return ret & 0xffff;
1015} 1100}
1016 1101
1017static void smi_reg_write(struct mv643xx_eth_private *mp, 1102static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1018 unsigned int addr,
1019 unsigned int reg, unsigned int value)
1020{ 1103{
1021 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; 1104 struct mv643xx_eth_shared_private *msp = bus->priv;
1022 unsigned long flags; 1105 void __iomem *smi_reg = msp->base + SMI_REG;
1023 int i;
1024
1025 /* the SMI register is a shared resource */
1026 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
1027 1106
1028 /* wait for the SMI register to become available */ 1107 if (smi_wait_ready(msp)) {
1029 for (i = 0; readl(smi_reg) & SMI_BUSY; i++) { 1108 printk("mv643xx_eth: SMI bus busy timeout\n");
1030 if (i == 1000) { 1109 return -ETIMEDOUT;
1031 printk("%s: PHY busy timeout\n", mp->dev->name);
1032 goto out;
1033 }
1034 udelay(10);
1035 } 1110 }
1036 1111
1037 writel(SMI_OPCODE_WRITE | (reg << 21) | 1112 writel(SMI_OPCODE_WRITE | (reg << 21) |
1038 (addr << 16) | (value & 0xffff), smi_reg); 1113 (addr << 16) | (val & 0xffff), smi_reg);
1039out: 1114
1040 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); 1115 if (smi_wait_ready(msp)) {
1116 printk("mv643xx_eth: SMI bus busy timeout\n");
1117 return -ETIMEDOUT;
1118 }
1119
1120 return 0;
1041} 1121}
1042 1122
1043 1123
1044/* mib counters *************************************************************/ 1124/* statistics ***************************************************************/
1125static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1126{
1127 struct mv643xx_eth_private *mp = netdev_priv(dev);
1128 struct net_device_stats *stats = &dev->stats;
1129 unsigned long tx_packets = 0;
1130 unsigned long tx_bytes = 0;
1131 unsigned long tx_dropped = 0;
1132 int i;
1133
1134 for (i = 0; i < mp->txq_count; i++) {
1135 struct tx_queue *txq = mp->txq + i;
1136
1137 tx_packets += txq->tx_packets;
1138 tx_bytes += txq->tx_bytes;
1139 tx_dropped += txq->tx_dropped;
1140 }
1141
1142 stats->tx_packets = tx_packets;
1143 stats->tx_bytes = tx_bytes;
1144 stats->tx_dropped = tx_dropped;
1145
1146 return stats;
1147}
1148
1045static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1149static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1046{ 1150{
1047 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1151 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
@@ -1059,6 +1163,7 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1059{ 1163{
1060 struct mib_counters *p = &mp->mib_counters; 1164 struct mib_counters *p = &mp->mib_counters;
1061 1165
1166 spin_lock(&mp->mib_counters_lock);
1062 p->good_octets_received += mib_read(mp, 0x00); 1167 p->good_octets_received += mib_read(mp, 0x00);
1063 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; 1168 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
1064 p->bad_octets_received += mib_read(mp, 0x08); 1169 p->bad_octets_received += mib_read(mp, 0x08);
@@ -1091,6 +1196,16 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1091 p->bad_crc_event += mib_read(mp, 0x74); 1196 p->bad_crc_event += mib_read(mp, 0x74);
1092 p->collision += mib_read(mp, 0x78); 1197 p->collision += mib_read(mp, 0x78);
1093 p->late_collision += mib_read(mp, 0x7c); 1198 p->late_collision += mib_read(mp, 0x7c);
1199 spin_unlock(&mp->mib_counters_lock);
1200
1201 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1202}
1203
1204static void mib_counters_timer_wrapper(unsigned long _mp)
1205{
1206 struct mv643xx_eth_private *mp = (void *)_mp;
1207
1208 mib_counters_update(mp);
1094} 1209}
1095 1210
1096 1211
@@ -1156,9 +1271,9 @@ static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *
1156 struct mv643xx_eth_private *mp = netdev_priv(dev); 1271 struct mv643xx_eth_private *mp = netdev_priv(dev);
1157 int err; 1272 int err;
1158 1273
1159 spin_lock_irq(&mp->lock); 1274 err = phy_read_status(mp->phy);
1160 err = mii_ethtool_gset(&mp->mii, cmd); 1275 if (err == 0)
1161 spin_unlock_irq(&mp->lock); 1276 err = phy_ethtool_gset(mp->phy, cmd);
1162 1277
1163 /* 1278 /*
1164 * The MAC does not support 1000baseT_Half. 1279 * The MAC does not support 1000baseT_Half.
@@ -1206,18 +1321,13 @@ static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethto
1206static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1321static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1207{ 1322{
1208 struct mv643xx_eth_private *mp = netdev_priv(dev); 1323 struct mv643xx_eth_private *mp = netdev_priv(dev);
1209 int err;
1210 1324
1211 /* 1325 /*
1212 * The MAC does not support 1000baseT_Half. 1326 * The MAC does not support 1000baseT_Half.
1213 */ 1327 */
1214 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1328 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1215 1329
1216 spin_lock_irq(&mp->lock); 1330 return phy_ethtool_sset(mp->phy, cmd);
1217 err = mii_ethtool_sset(&mp->mii, cmd);
1218 spin_unlock_irq(&mp->lock);
1219
1220 return err;
1221} 1331}
1222 1332
1223static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd) 1333static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1239,7 +1349,7 @@ static int mv643xx_eth_nway_reset(struct net_device *dev)
1239{ 1349{
1240 struct mv643xx_eth_private *mp = netdev_priv(dev); 1350 struct mv643xx_eth_private *mp = netdev_priv(dev);
1241 1351
1242 return mii_nway_restart(&mp->mii); 1352 return genphy_restart_aneg(mp->phy);
1243} 1353}
1244 1354
1245static int mv643xx_eth_nway_reset_phyless(struct net_device *dev) 1355static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
@@ -1249,14 +1359,7 @@ static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
1249 1359
1250static u32 mv643xx_eth_get_link(struct net_device *dev) 1360static u32 mv643xx_eth_get_link(struct net_device *dev)
1251{ 1361{
1252 struct mv643xx_eth_private *mp = netdev_priv(dev); 1362 return !!netif_carrier_ok(dev);
1253
1254 return mii_link_ok(&mp->mii);
1255}
1256
1257static u32 mv643xx_eth_get_link_phyless(struct net_device *dev)
1258{
1259 return 1;
1260} 1363}
1261 1364
1262static void mv643xx_eth_get_strings(struct net_device *dev, 1365static void mv643xx_eth_get_strings(struct net_device *dev,
@@ -1277,9 +1380,10 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1277 struct ethtool_stats *stats, 1380 struct ethtool_stats *stats,
1278 uint64_t *data) 1381 uint64_t *data)
1279{ 1382{
1280 struct mv643xx_eth_private *mp = dev->priv; 1383 struct mv643xx_eth_private *mp = netdev_priv(dev);
1281 int i; 1384 int i;
1282 1385
1386 mv643xx_eth_get_stats(dev);
1283 mib_counters_update(mp); 1387 mib_counters_update(mp);
1284 1388
1285 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1389 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
@@ -1323,7 +1427,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
1323 .set_settings = mv643xx_eth_set_settings_phyless, 1427 .set_settings = mv643xx_eth_set_settings_phyless,
1324 .get_drvinfo = mv643xx_eth_get_drvinfo, 1428 .get_drvinfo = mv643xx_eth_get_drvinfo,
1325 .nway_reset = mv643xx_eth_nway_reset_phyless, 1429 .nway_reset = mv643xx_eth_nway_reset_phyless,
1326 .get_link = mv643xx_eth_get_link_phyless, 1430 .get_link = mv643xx_eth_get_link,
1327 .set_sg = ethtool_op_set_sg, 1431 .set_sg = ethtool_op_set_sg,
1328 .get_strings = mv643xx_eth_get_strings, 1432 .get_strings = mv643xx_eth_get_strings,
1329 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1433 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
@@ -1487,7 +1591,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1487 1591
1488 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1592 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1489 1593
1490 if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) { 1594 if (index == 0 && size <= mp->rx_desc_sram_size) {
1491 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1595 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1492 mp->rx_desc_sram_size); 1596 mp->rx_desc_sram_size);
1493 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1597 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
@@ -1515,20 +1619,21 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1515 1619
1516 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1620 rx_desc = (struct rx_desc *)rxq->rx_desc_area;
1517 for (i = 0; i < rxq->rx_ring_size; i++) { 1621 for (i = 0; i < rxq->rx_ring_size; i++) {
1518 int nexti = (i + 1) % rxq->rx_ring_size; 1622 int nexti;
1623
1624 nexti = i + 1;
1625 if (nexti == rxq->rx_ring_size)
1626 nexti = 0;
1627
1519 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1628 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1520 nexti * sizeof(struct rx_desc); 1629 nexti * sizeof(struct rx_desc);
1521 } 1630 }
1522 1631
1523 init_timer(&rxq->rx_oom);
1524 rxq->rx_oom.data = (unsigned long)rxq;
1525 rxq->rx_oom.function = rxq_refill_timer_wrapper;
1526
1527 return 0; 1632 return 0;
1528 1633
1529 1634
1530out_free: 1635out_free:
1531 if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) 1636 if (index == 0 && size <= mp->rx_desc_sram_size)
1532 iounmap(rxq->rx_desc_area); 1637 iounmap(rxq->rx_desc_area);
1533 else 1638 else
1534 dma_free_coherent(NULL, size, 1639 dma_free_coherent(NULL, size,
@@ -1546,8 +1651,6 @@ static void rxq_deinit(struct rx_queue *rxq)
1546 1651
1547 rxq_disable(rxq); 1652 rxq_disable(rxq);
1548 1653
1549 del_timer_sync(&rxq->rx_oom);
1550
1551 for (i = 0; i < rxq->rx_ring_size; i++) { 1654 for (i = 0; i < rxq->rx_ring_size; i++) {
1552 if (rxq->rx_skb[i]) { 1655 if (rxq->rx_skb[i]) {
1553 dev_kfree_skb(rxq->rx_skb[i]); 1656 dev_kfree_skb(rxq->rx_skb[i]);
@@ -1561,7 +1664,7 @@ static void rxq_deinit(struct rx_queue *rxq)
1561 rxq->rx_desc_count); 1664 rxq->rx_desc_count);
1562 } 1665 }
1563 1666
1564 if (rxq->index == mp->rxq_primary && 1667 if (rxq->index == 0 &&
1565 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1668 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1566 iounmap(rxq->rx_desc_area); 1669 iounmap(rxq->rx_desc_area);
1567 else 1670 else
@@ -1588,7 +1691,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1588 1691
1589 size = txq->tx_ring_size * sizeof(struct tx_desc); 1692 size = txq->tx_ring_size * sizeof(struct tx_desc);
1590 1693
1591 if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) { 1694 if (index == 0 && size <= mp->tx_desc_sram_size) {
1592 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1695 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
1593 mp->tx_desc_sram_size); 1696 mp->tx_desc_sram_size);
1594 txq->tx_desc_dma = mp->tx_desc_sram_addr; 1697 txq->tx_desc_dma = mp->tx_desc_sram_addr;
@@ -1601,120 +1704,97 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1601 if (txq->tx_desc_area == NULL) { 1704 if (txq->tx_desc_area == NULL) {
1602 dev_printk(KERN_ERR, &mp->dev->dev, 1705 dev_printk(KERN_ERR, &mp->dev->dev,
1603 "can't allocate tx ring (%d bytes)\n", size); 1706 "can't allocate tx ring (%d bytes)\n", size);
1604 goto out; 1707 return -ENOMEM;
1605 } 1708 }
1606 memset(txq->tx_desc_area, 0, size); 1709 memset(txq->tx_desc_area, 0, size);
1607 1710
1608 txq->tx_desc_area_size = size; 1711 txq->tx_desc_area_size = size;
1609 txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
1610 GFP_KERNEL);
1611 if (txq->tx_skb == NULL) {
1612 dev_printk(KERN_ERR, &mp->dev->dev,
1613 "can't allocate tx skb ring\n");
1614 goto out_free;
1615 }
1616 1712
1617 tx_desc = (struct tx_desc *)txq->tx_desc_area; 1713 tx_desc = (struct tx_desc *)txq->tx_desc_area;
1618 for (i = 0; i < txq->tx_ring_size; i++) { 1714 for (i = 0; i < txq->tx_ring_size; i++) {
1619 struct tx_desc *txd = tx_desc + i; 1715 struct tx_desc *txd = tx_desc + i;
1620 int nexti = (i + 1) % txq->tx_ring_size; 1716 int nexti;
1717
1718 nexti = i + 1;
1719 if (nexti == txq->tx_ring_size)
1720 nexti = 0;
1621 1721
1622 txd->cmd_sts = 0; 1722 txd->cmd_sts = 0;
1623 txd->next_desc_ptr = txq->tx_desc_dma + 1723 txd->next_desc_ptr = txq->tx_desc_dma +
1624 nexti * sizeof(struct tx_desc); 1724 nexti * sizeof(struct tx_desc);
1625 } 1725 }
1626 1726
1627 return 0; 1727 skb_queue_head_init(&txq->tx_skb);
1628
1629 1728
1630out_free: 1729 return 0;
1631 if (index == mp->txq_primary && size <= mp->tx_desc_sram_size)
1632 iounmap(txq->tx_desc_area);
1633 else
1634 dma_free_coherent(NULL, size,
1635 txq->tx_desc_area,
1636 txq->tx_desc_dma);
1637
1638out:
1639 return -ENOMEM;
1640} 1730}
1641 1731
1642static void txq_reclaim(struct tx_queue *txq, int force) 1732static void txq_deinit(struct tx_queue *txq)
1643{ 1733{
1644 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1734 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1645 unsigned long flags;
1646 1735
1647 spin_lock_irqsave(&mp->lock, flags); 1736 txq_disable(txq);
1648 while (txq->tx_desc_count > 0) { 1737 txq_reclaim(txq, txq->tx_ring_size, 1);
1649 int tx_index;
1650 struct tx_desc *desc;
1651 u32 cmd_sts;
1652 struct sk_buff *skb;
1653 dma_addr_t addr;
1654 int count;
1655
1656 tx_index = txq->tx_used_desc;
1657 desc = &txq->tx_desc_area[tx_index];
1658 cmd_sts = desc->cmd_sts;
1659 1738
1660 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 1739 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
1661 if (!force)
1662 break;
1663 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
1664 }
1665 1740
1666 txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size; 1741 if (txq->index == 0 &&
1667 txq->tx_desc_count--; 1742 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
1743 iounmap(txq->tx_desc_area);
1744 else
1745 dma_free_coherent(NULL, txq->tx_desc_area_size,
1746 txq->tx_desc_area, txq->tx_desc_dma);
1747}
1668 1748
1669 addr = desc->buf_ptr;
1670 count = desc->byte_cnt;
1671 skb = txq->tx_skb[tx_index];
1672 txq->tx_skb[tx_index] = NULL;
1673 1749
1674 if (cmd_sts & ERROR_SUMMARY) { 1750/* netdev ops and related ***************************************************/
1675 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 1751static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
1676 mp->dev->stats.tx_errors++; 1752{
1677 } 1753 u32 int_cause;
1754 u32 int_cause_ext;
1678 1755
1679 /* 1756 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
1680 * Drop mp->lock while we free the skb. 1757 (INT_TX_END | INT_RX | INT_EXT);
1681 */ 1758 if (int_cause == 0)
1682 spin_unlock_irqrestore(&mp->lock, flags); 1759 return 0;
1683 1760
1684 if (cmd_sts & TX_FIRST_DESC) 1761 int_cause_ext = 0;
1685 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); 1762 if (int_cause & INT_EXT)
1686 else 1763 int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num));
1687 dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1688 1764
1689 if (skb) 1765 int_cause &= INT_TX_END | INT_RX;
1690 dev_kfree_skb_irq(skb); 1766 if (int_cause) {
1767 wrl(mp, INT_CAUSE(mp->port_num), ~int_cause);
1768 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
1769 ~(rdl(mp, TXQ_COMMAND(mp->port_num)) & 0xff);
1770 mp->work_rx |= (int_cause & INT_RX) >> 2;
1771 }
1691 1772
1692 spin_lock_irqsave(&mp->lock, flags); 1773 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
1774 if (int_cause_ext) {
1775 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1776 if (int_cause_ext & INT_EXT_LINK_PHY)
1777 mp->work_link = 1;
1778 mp->work_tx |= int_cause_ext & INT_EXT_TX;
1693 } 1779 }
1694 spin_unlock_irqrestore(&mp->lock, flags); 1780
1781 return 1;
1695} 1782}
1696 1783
1697static void txq_deinit(struct tx_queue *txq) 1784static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1698{ 1785{
1699 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1786 struct net_device *dev = (struct net_device *)dev_id;
1700 1787 struct mv643xx_eth_private *mp = netdev_priv(dev);
1701 txq_disable(txq);
1702 txq_reclaim(txq, 1);
1703 1788
1704 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 1789 if (unlikely(!mv643xx_eth_collect_events(mp)))
1790 return IRQ_NONE;
1705 1791
1706 if (txq->index == mp->txq_primary && 1792 wrl(mp, INT_MASK(mp->port_num), 0);
1707 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 1793 napi_schedule(&mp->napi);
1708 iounmap(txq->tx_desc_area);
1709 else
1710 dma_free_coherent(NULL, txq->tx_desc_area_size,
1711 txq->tx_desc_area, txq->tx_desc_dma);
1712 1794
1713 kfree(txq->tx_skb); 1795 return IRQ_HANDLED;
1714} 1796}
1715 1797
1716
1717/* netdev ops and related ***************************************************/
1718static void handle_link_event(struct mv643xx_eth_private *mp) 1798static void handle_link_event(struct mv643xx_eth_private *mp)
1719{ 1799{
1720 struct net_device *dev = mp->dev; 1800 struct net_device *dev = mp->dev;
@@ -1731,15 +1811,12 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
1731 printk(KERN_INFO "%s: link down\n", dev->name); 1811 printk(KERN_INFO "%s: link down\n", dev->name);
1732 1812
1733 netif_carrier_off(dev); 1813 netif_carrier_off(dev);
1734 netif_stop_queue(dev);
1735 1814
1736 for (i = 0; i < 8; i++) { 1815 for (i = 0; i < mp->txq_count; i++) {
1737 struct tx_queue *txq = mp->txq + i; 1816 struct tx_queue *txq = mp->txq + i;
1738 1817
1739 if (mp->txq_mask & (1 << i)) { 1818 txq_reclaim(txq, txq->tx_ring_size, 1);
1740 txq_reclaim(txq, 1); 1819 txq_reset_hw_ptr(txq);
1741 txq_reset_hw_ptr(txq);
1742 }
1743 } 1820 }
1744 } 1821 }
1745 return; 1822 return;
@@ -1767,119 +1844,93 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
1767 speed, duplex ? "full" : "half", 1844 speed, duplex ? "full" : "half",
1768 fc ? "en" : "dis"); 1845 fc ? "en" : "dis");
1769 1846
1770 if (!netif_carrier_ok(dev)) { 1847 if (!netif_carrier_ok(dev))
1771 netif_carrier_on(dev); 1848 netif_carrier_on(dev);
1772 netif_wake_queue(dev);
1773 }
1774} 1849}
1775 1850
1776static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 1851static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
1777{ 1852{
1778 struct net_device *dev = (struct net_device *)dev_id; 1853 struct mv643xx_eth_private *mp;
1779 struct mv643xx_eth_private *mp = netdev_priv(dev); 1854 int work_done;
1780 u32 int_cause;
1781 u32 int_cause_ext;
1782
1783 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
1784 (INT_TX_END | INT_RX | INT_EXT);
1785 if (int_cause == 0)
1786 return IRQ_NONE;
1787
1788 int_cause_ext = 0;
1789 if (int_cause & INT_EXT) {
1790 int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1791 & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1792 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1793 }
1794 1855
1795 if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) 1856 mp = container_of(napi, struct mv643xx_eth_private, napi);
1796 handle_link_event(mp);
1797 1857
1798 /* 1858 mp->work_rx_refill |= mp->work_rx_oom;
1799 * RxBuffer or RxError set for any of the 8 queues? 1859 mp->work_rx_oom = 0;
1800 */
1801#ifdef MV643XX_ETH_NAPI
1802 if (int_cause & INT_RX) {
1803 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
1804 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1805 rdl(mp, INT_MASK(mp->port_num));
1806 1860
1807 netif_rx_schedule(dev, &mp->napi); 1861 work_done = 0;
1808 } 1862 while (work_done < budget) {
1809#else 1863 u8 queue_mask;
1810 if (int_cause & INT_RX) { 1864 int queue;
1811 int i; 1865 int work_tbd;
1812 1866
1813 for (i = 7; i >= 0; i--) 1867 if (mp->work_link) {
1814 if (mp->rxq_mask & (1 << i)) 1868 mp->work_link = 0;
1815 rxq_process(mp->rxq + i, INT_MAX); 1869 handle_link_event(mp);
1816 } 1870 continue;
1817#endif 1871 }
1818
1819 /*
1820 * TxBuffer or TxError set for any of the 8 queues?
1821 */
1822 if (int_cause_ext & INT_EXT_TX) {
1823 int i;
1824 1872
1825 for (i = 0; i < 8; i++) 1873 queue_mask = mp->work_tx | mp->work_tx_end |
1826 if (mp->txq_mask & (1 << i)) 1874 mp->work_rx | mp->work_rx_refill;
1827 txq_reclaim(mp->txq + i, 0); 1875 if (!queue_mask) {
1876 if (mv643xx_eth_collect_events(mp))
1877 continue;
1878 break;
1879 }
1828 1880
1829 /* 1881 queue = fls(queue_mask) - 1;
1830 * Enough space again in the primary TX queue for a 1882 queue_mask = 1 << queue;
1831 * full packet? 1883
1832 */ 1884 work_tbd = budget - work_done;
1833 if (netif_carrier_ok(dev)) { 1885 if (work_tbd > 16)
1834 spin_lock(&mp->lock); 1886 work_tbd = 16;
1835 __txq_maybe_wake(mp->txq + mp->txq_primary); 1887
1836 spin_unlock(&mp->lock); 1888 if (mp->work_tx_end & queue_mask) {
1889 txq_kick(mp->txq + queue);
1890 } else if (mp->work_tx & queue_mask) {
1891 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
1892 txq_maybe_wake(mp->txq + queue);
1893 } else if (mp->work_rx & queue_mask) {
1894 work_done += rxq_process(mp->rxq + queue, work_tbd);
1895 } else if (mp->work_rx_refill & queue_mask) {
1896 work_done += rxq_refill(mp->rxq + queue, work_tbd);
1897 } else {
1898 BUG();
1837 } 1899 }
1838 } 1900 }
1839 1901
1840 /* 1902 if (work_done < budget) {
1841 * Any TxEnd interrupts? 1903 if (mp->work_rx_oom)
1842 */ 1904 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
1843 if (int_cause & INT_TX_END) { 1905 napi_complete(napi);
1844 int i; 1906 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
1845 1907 }
1846 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
1847
1848 spin_lock(&mp->lock);
1849 for (i = 0; i < 8; i++) {
1850 struct tx_queue *txq = mp->txq + i;
1851 u32 hw_desc_ptr;
1852 u32 expected_ptr;
1853
1854 if ((int_cause & (INT_TX_END_0 << i)) == 0)
1855 continue;
1856 1908
1857 hw_desc_ptr = 1909 return work_done;
1858 rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i)); 1910}
1859 expected_ptr = (u32)txq->tx_desc_dma +
1860 txq->tx_curr_desc * sizeof(struct tx_desc);
1861 1911
1862 if (hw_desc_ptr != expected_ptr) 1912static inline void oom_timer_wrapper(unsigned long data)
1863 txq_enable(txq); 1913{
1864 } 1914 struct mv643xx_eth_private *mp = (void *)data;
1865 spin_unlock(&mp->lock);
1866 }
1867 1915
1868 return IRQ_HANDLED; 1916 napi_schedule(&mp->napi);
1869} 1917}
1870 1918
1871static void phy_reset(struct mv643xx_eth_private *mp) 1919static void phy_reset(struct mv643xx_eth_private *mp)
1872{ 1920{
1873 unsigned int data; 1921 int data;
1922
1923 data = phy_read(mp->phy, MII_BMCR);
1924 if (data < 0)
1925 return;
1874 1926
1875 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
1876 data |= BMCR_RESET; 1927 data |= BMCR_RESET;
1877 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data); 1928 if (phy_write(mp->phy, MII_BMCR, data) < 0)
1929 return;
1878 1930
1879 do { 1931 do {
1880 udelay(1); 1932 data = phy_read(mp->phy, MII_BMCR);
1881 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data); 1933 } while (data >= 0 && data & BMCR_RESET);
1882 } while (data & BMCR_RESET);
1883} 1934}
1884 1935
1885static void port_start(struct mv643xx_eth_private *mp) 1936static void port_start(struct mv643xx_eth_private *mp)
@@ -1890,7 +1941,7 @@ static void port_start(struct mv643xx_eth_private *mp)
1890 /* 1941 /*
1891 * Perform PHY reset, if there is a PHY. 1942 * Perform PHY reset, if there is a PHY.
1892 */ 1943 */
1893 if (mp->phy_addr != -1) { 1944 if (mp->phy != NULL) {
1894 struct ethtool_cmd cmd; 1945 struct ethtool_cmd cmd;
1895 1946
1896 mv643xx_eth_get_settings(mp->dev, &cmd); 1947 mv643xx_eth_get_settings(mp->dev, &cmd);
@@ -1907,7 +1958,7 @@ static void port_start(struct mv643xx_eth_private *mp)
1907 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 1958 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1908 1959
1909 pscr |= DO_NOT_FORCE_LINK_FAIL; 1960 pscr |= DO_NOT_FORCE_LINK_FAIL;
1910 if (mp->phy_addr == -1) 1961 if (mp->phy == NULL)
1911 pscr |= FORCE_LINK_PASS; 1962 pscr |= FORCE_LINK_PASS;
1912 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 1963 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1913 1964
@@ -1917,12 +1968,9 @@ static void port_start(struct mv643xx_eth_private *mp)
1917 * Configure TX path and queues. 1968 * Configure TX path and queues.
1918 */ 1969 */
1919 tx_set_rate(mp, 1000000000, 16777216); 1970 tx_set_rate(mp, 1000000000, 16777216);
1920 for (i = 0; i < 8; i++) { 1971 for (i = 0; i < mp->txq_count; i++) {
1921 struct tx_queue *txq = mp->txq + i; 1972 struct tx_queue *txq = mp->txq + i;
1922 1973
1923 if ((mp->txq_mask & (1 << i)) == 0)
1924 continue;
1925
1926 txq_reset_hw_ptr(txq); 1974 txq_reset_hw_ptr(txq);
1927 txq_set_rate(txq, 1000000000, 16777216); 1975 txq_set_rate(txq, 1000000000, 16777216);
1928 txq_set_fixed_prio_mode(txq); 1976 txq_set_fixed_prio_mode(txq);
@@ -1935,9 +1983,10 @@ static void port_start(struct mv643xx_eth_private *mp)
1935 1983
1936 /* 1984 /*
1937 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 1985 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
1938 * frames to RX queue #0. 1986 * frames to RX queue #0, and include the pseudo-header when
1987 * calculating receive checksums.
1939 */ 1988 */
1940 wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000); 1989 wrl(mp, PORT_CONFIG(mp->port_num), 0x02000000);
1941 1990
1942 /* 1991 /*
1943 * Treat BPDUs as normal multicasts, and disable partition mode. 1992 * Treat BPDUs as normal multicasts, and disable partition mode.
@@ -1947,14 +1996,11 @@ static void port_start(struct mv643xx_eth_private *mp)
1947 /* 1996 /*
1948 * Enable the receive queues. 1997 * Enable the receive queues.
1949 */ 1998 */
1950 for (i = 0; i < 8; i++) { 1999 for (i = 0; i < mp->rxq_count; i++) {
1951 struct rx_queue *rxq = mp->rxq + i; 2000 struct rx_queue *rxq = mp->rxq + i;
1952 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i); 2001 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
1953 u32 addr; 2002 u32 addr;
1954 2003
1955 if ((mp->rxq_mask & (1 << i)) == 0)
1956 continue;
1957
1958 addr = (u32)rxq->rx_desc_dma; 2004 addr = (u32)rxq->rx_desc_dma;
1959 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2005 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
1960 wrl(mp, off, addr); 2006 wrl(mp, off, addr);
@@ -1993,6 +2039,26 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
1993 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4); 2039 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
1994} 2040}
1995 2041
2042static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2043{
2044 int skb_size;
2045
2046 /*
2047 * Reserve 2+14 bytes for an ethernet header (the hardware
2048 * automatically prepends 2 bytes of dummy data to each
2049 * received packet), 16 bytes for up to four VLAN tags, and
2050 * 4 bytes for the trailing FCS -- 36 bytes total.
2051 */
2052 skb_size = mp->dev->mtu + 36;
2053
2054 /*
2055 * Make sure that the skb size is a multiple of 8 bytes, as
2056 * the lower three bits of the receive descriptor's buffer
2057 * size field are ignored by the hardware.
2058 */
2059 mp->skb_size = (skb_size + 7) & ~7;
2060}
2061
1996static int mv643xx_eth_open(struct net_device *dev) 2062static int mv643xx_eth_open(struct net_device *dev)
1997{ 2063{
1998 struct mv643xx_eth_private *mp = netdev_priv(dev); 2064 struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -2004,8 +2070,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2004 rdl(mp, INT_CAUSE_EXT(mp->port_num)); 2070 rdl(mp, INT_CAUSE_EXT(mp->port_num));
2005 2071
2006 err = request_irq(dev->irq, mv643xx_eth_irq, 2072 err = request_irq(dev->irq, mv643xx_eth_irq,
2007 IRQF_SHARED | IRQF_SAMPLE_RANDOM, 2073 IRQF_SHARED, dev->name, dev);
2008 dev->name, dev);
2009 if (err) { 2074 if (err) {
2010 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2075 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
2011 return -EAGAIN; 2076 return -EAGAIN;
@@ -2013,58 +2078,53 @@ static int mv643xx_eth_open(struct net_device *dev)
2013 2078
2014 init_mac_tables(mp); 2079 init_mac_tables(mp);
2015 2080
2016 for (i = 0; i < 8; i++) { 2081 mv643xx_eth_recalc_skb_size(mp);
2017 if ((mp->rxq_mask & (1 << i)) == 0)
2018 continue;
2019 2082
2083 napi_enable(&mp->napi);
2084
2085 skb_queue_head_init(&mp->rx_recycle);
2086
2087 for (i = 0; i < mp->rxq_count; i++) {
2020 err = rxq_init(mp, i); 2088 err = rxq_init(mp, i);
2021 if (err) { 2089 if (err) {
2022 while (--i >= 0) 2090 while (--i >= 0)
2023 if (mp->rxq_mask & (1 << i)) 2091 rxq_deinit(mp->rxq + i);
2024 rxq_deinit(mp->rxq + i);
2025 goto out; 2092 goto out;
2026 } 2093 }
2027 2094
2028 rxq_refill(mp->rxq + i); 2095 rxq_refill(mp->rxq + i, INT_MAX);
2029 } 2096 }
2030 2097
2031 for (i = 0; i < 8; i++) { 2098 if (mp->work_rx_oom) {
2032 if ((mp->txq_mask & (1 << i)) == 0) 2099 mp->rx_oom.expires = jiffies + (HZ / 10);
2033 continue; 2100 add_timer(&mp->rx_oom);
2101 }
2034 2102
2103 for (i = 0; i < mp->txq_count; i++) {
2035 err = txq_init(mp, i); 2104 err = txq_init(mp, i);
2036 if (err) { 2105 if (err) {
2037 while (--i >= 0) 2106 while (--i >= 0)
2038 if (mp->txq_mask & (1 << i)) 2107 txq_deinit(mp->txq + i);
2039 txq_deinit(mp->txq + i);
2040 goto out_free; 2108 goto out_free;
2041 } 2109 }
2042 } 2110 }
2043 2111
2044#ifdef MV643XX_ETH_NAPI
2045 napi_enable(&mp->napi);
2046#endif
2047
2048 netif_carrier_off(dev); 2112 netif_carrier_off(dev);
2049 netif_stop_queue(dev);
2050 2113
2051 port_start(mp); 2114 port_start(mp);
2052 2115
2053 set_rx_coal(mp, 0); 2116 set_rx_coal(mp, 0);
2054 set_tx_coal(mp, 0); 2117 set_tx_coal(mp, 0);
2055 2118
2056 wrl(mp, INT_MASK_EXT(mp->port_num), 2119 wrl(mp, INT_MASK_EXT(mp->port_num), INT_EXT_LINK_PHY | INT_EXT_TX);
2057 INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
2058
2059 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 2120 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2060 2121
2061 return 0; 2122 return 0;
2062 2123
2063 2124
2064out_free: 2125out_free:
2065 for (i = 0; i < 8; i++) 2126 for (i = 0; i < mp->rxq_count; i++)
2066 if (mp->rxq_mask & (1 << i)) 2127 rxq_deinit(mp->rxq + i);
2067 rxq_deinit(mp->rxq + i);
2068out: 2128out:
2069 free_irq(dev->irq, dev); 2129 free_irq(dev->irq, dev);
2070 2130
@@ -2076,12 +2136,10 @@ static void port_reset(struct mv643xx_eth_private *mp)
2076 unsigned int data; 2136 unsigned int data;
2077 int i; 2137 int i;
2078 2138
2079 for (i = 0; i < 8; i++) { 2139 for (i = 0; i < mp->rxq_count; i++)
2080 if (mp->rxq_mask & (1 << i)) 2140 rxq_disable(mp->rxq + i);
2081 rxq_disable(mp->rxq + i); 2141 for (i = 0; i < mp->txq_count; i++)
2082 if (mp->txq_mask & (1 << i)) 2142 txq_disable(mp->txq + i);
2083 txq_disable(mp->txq + i);
2084 }
2085 2143
2086 while (1) { 2144 while (1) {
2087 u32 ps = rdl(mp, PORT_STATUS(mp->port_num)); 2145 u32 ps = rdl(mp, PORT_STATUS(mp->port_num));
@@ -2107,23 +2165,26 @@ static int mv643xx_eth_stop(struct net_device *dev)
2107 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 2165 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
2108 rdl(mp, INT_MASK(mp->port_num)); 2166 rdl(mp, INT_MASK(mp->port_num));
2109 2167
2110#ifdef MV643XX_ETH_NAPI 2168 del_timer_sync(&mp->mib_counters_timer);
2169
2111 napi_disable(&mp->napi); 2170 napi_disable(&mp->napi);
2112#endif 2171
2172 del_timer_sync(&mp->rx_oom);
2173
2113 netif_carrier_off(dev); 2174 netif_carrier_off(dev);
2114 netif_stop_queue(dev);
2115 2175
2116 free_irq(dev->irq, dev); 2176 free_irq(dev->irq, dev);
2117 2177
2118 port_reset(mp); 2178 port_reset(mp);
2179 mv643xx_eth_get_stats(dev);
2119 mib_counters_update(mp); 2180 mib_counters_update(mp);
2120 2181
2121 for (i = 0; i < 8; i++) { 2182 skb_queue_purge(&mp->rx_recycle);
2122 if (mp->rxq_mask & (1 << i)) 2183
2123 rxq_deinit(mp->rxq + i); 2184 for (i = 0; i < mp->rxq_count; i++)
2124 if (mp->txq_mask & (1 << i)) 2185 rxq_deinit(mp->rxq + i);
2125 txq_deinit(mp->txq + i); 2186 for (i = 0; i < mp->txq_count; i++)
2126 } 2187 txq_deinit(mp->txq + i);
2127 2188
2128 return 0; 2189 return 0;
2129} 2190}
@@ -2132,8 +2193,8 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2132{ 2193{
2133 struct mv643xx_eth_private *mp = netdev_priv(dev); 2194 struct mv643xx_eth_private *mp = netdev_priv(dev);
2134 2195
2135 if (mp->phy_addr != -1) 2196 if (mp->phy != NULL)
2136 return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL); 2197 return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd);
2137 2198
2138 return -EOPNOTSUPP; 2199 return -EOPNOTSUPP;
2139} 2200}
@@ -2146,6 +2207,7 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2146 return -EINVAL; 2207 return -EINVAL;
2147 2208
2148 dev->mtu = new_mtu; 2209 dev->mtu = new_mtu;
2210 mv643xx_eth_recalc_skb_size(mp);
2149 tx_set_rate(mp, 1000000000, 16777216); 2211 tx_set_rate(mp, 1000000000, 16777216);
2150 2212
2151 if (!netif_running(dev)) 2213 if (!netif_running(dev))
@@ -2173,12 +2235,10 @@ static void tx_timeout_task(struct work_struct *ugly)
2173 2235
2174 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2236 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2175 if (netif_running(mp->dev)) { 2237 if (netif_running(mp->dev)) {
2176 netif_stop_queue(mp->dev); 2238 netif_tx_stop_all_queues(mp->dev);
2177
2178 port_reset(mp); 2239 port_reset(mp);
2179 port_start(mp); 2240 port_start(mp);
2180 2241 netif_tx_wake_all_queues(mp->dev);
2181 __txq_maybe_wake(mp->txq + mp->txq_primary);
2182 } 2242 }
2183} 2243}
2184 2244
@@ -2205,22 +2265,6 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
2205} 2265}
2206#endif 2266#endif
2207 2267
2208static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
2209{
2210 struct mv643xx_eth_private *mp = netdev_priv(dev);
2211 int val;
2212
2213 smi_reg_read(mp, addr, reg, &val);
2214
2215 return val;
2216}
2217
2218static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
2219{
2220 struct mv643xx_eth_private *mp = netdev_priv(dev);
2221 smi_reg_write(mp, addr, reg, val);
2222}
2223
2224 2268
2225/* platform glue ************************************************************/ 2269/* platform glue ************************************************************/
2226static void 2270static void
@@ -2272,14 +2316,20 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2272 msp->extended_rx_coal_limit = 0; 2316 msp->extended_rx_coal_limit = 0;
2273 2317
2274 /* 2318 /*
2275 * Check whether the TX rate control registers are in the 2319 * Check whether the MAC supports TX rate control, and if
2276 * old or the new place. 2320 * yes, whether its associated registers are in the old or
2321 * the new place.
2277 */ 2322 */
2278 writel(1, msp->base + TX_BW_MTU_MOVED(0)); 2323 writel(1, msp->base + TX_BW_MTU_MOVED(0));
2279 if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) 2324 if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) {
2280 msp->tx_bw_control_moved = 1; 2325 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2281 else 2326 } else {
2282 msp->tx_bw_control_moved = 0; 2327 writel(7, msp->base + TX_BW_RATE(0));
2328 if (readl(msp->base + TX_BW_RATE(0)) & 7)
2329 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2330 else
2331 msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2332 }
2283} 2333}
2284 2334
2285static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2335static int mv643xx_eth_shared_probe(struct platform_device *pdev)
@@ -2309,7 +2359,41 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2309 if (msp->base == NULL) 2359 if (msp->base == NULL)
2310 goto out_free; 2360 goto out_free;
2311 2361
2312 spin_lock_init(&msp->phy_lock); 2362 /*
2363 * Set up and register SMI bus.
2364 */
2365 if (pd == NULL || pd->shared_smi == NULL) {
2366 msp->smi_bus.priv = msp;
2367 msp->smi_bus.name = "mv643xx_eth smi";
2368 msp->smi_bus.read = smi_bus_read;
2369 msp->smi_bus.write = smi_bus_write,
2370 snprintf(msp->smi_bus.id, MII_BUS_ID_SIZE, "%d", pdev->id);
2371 msp->smi_bus.dev = &pdev->dev;
2372 msp->smi_bus.phy_mask = 0xffffffff;
2373 if (mdiobus_register(&msp->smi_bus) < 0)
2374 goto out_unmap;
2375 msp->smi = msp;
2376 } else {
2377 msp->smi = platform_get_drvdata(pd->shared_smi);
2378 }
2379
2380 msp->err_interrupt = NO_IRQ;
2381 init_waitqueue_head(&msp->smi_busy_wait);
2382
2383 /*
2384 * Check whether the error interrupt is hooked up.
2385 */
2386 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2387 if (res != NULL) {
2388 int err;
2389
2390 err = request_irq(res->start, mv643xx_eth_err_irq,
2391 IRQF_SHARED, "mv643xx_eth", msp);
2392 if (!err) {
2393 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
2394 msp->err_interrupt = res->start;
2395 }
2396 }
2313 2397
2314 /* 2398 /*
2315 * (Re-)program MBUS remapping windows if we are asked to. 2399 * (Re-)program MBUS remapping windows if we are asked to.
@@ -2327,6 +2411,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2327 2411
2328 return 0; 2412 return 0;
2329 2413
2414out_unmap:
2415 iounmap(msp->base);
2330out_free: 2416out_free:
2331 kfree(msp); 2417 kfree(msp);
2332out: 2418out:
@@ -2336,7 +2422,12 @@ out:
2336static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2422static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2337{ 2423{
2338 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2424 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2425 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2339 2426
2427 if (pd == NULL || pd->shared_smi == NULL)
2428 mdiobus_unregister(&msp->smi_bus);
2429 if (msp->err_interrupt != NO_IRQ)
2430 free_irq(msp->err_interrupt, msp);
2340 iounmap(msp->base); 2431 iounmap(msp->base);
2341 kfree(msp); 2432 kfree(msp);
2342 2433
@@ -2382,33 +2473,13 @@ static void set_params(struct mv643xx_eth_private *mp,
2382 else 2473 else
2383 uc_addr_get(mp, dev->dev_addr); 2474 uc_addr_get(mp, dev->dev_addr);
2384 2475
2385 if (pd->phy_addr == -1) {
2386 mp->shared_smi = NULL;
2387 mp->phy_addr = -1;
2388 } else {
2389 mp->shared_smi = mp->shared;
2390 if (pd->shared_smi != NULL)
2391 mp->shared_smi = platform_get_drvdata(pd->shared_smi);
2392
2393 if (pd->force_phy_addr || pd->phy_addr) {
2394 mp->phy_addr = pd->phy_addr & 0x3f;
2395 phy_addr_set(mp, mp->phy_addr);
2396 } else {
2397 mp->phy_addr = phy_addr_get(mp);
2398 }
2399 }
2400
2401 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2476 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2402 if (pd->rx_queue_size) 2477 if (pd->rx_queue_size)
2403 mp->default_rx_ring_size = pd->rx_queue_size; 2478 mp->default_rx_ring_size = pd->rx_queue_size;
2404 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2479 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2405 mp->rx_desc_sram_size = pd->rx_sram_size; 2480 mp->rx_desc_sram_size = pd->rx_sram_size;
2406 2481
2407 if (pd->rx_queue_mask) 2482 mp->rxq_count = pd->rx_queue_count ? : 1;
2408 mp->rxq_mask = pd->rx_queue_mask;
2409 else
2410 mp->rxq_mask = 0x01;
2411 mp->rxq_primary = fls(mp->rxq_mask) - 1;
2412 2483
2413 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2484 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2414 if (pd->tx_queue_size) 2485 if (pd->tx_queue_size)
@@ -2416,76 +2487,63 @@ static void set_params(struct mv643xx_eth_private *mp,
2416 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2487 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2417 mp->tx_desc_sram_size = pd->tx_sram_size; 2488 mp->tx_desc_sram_size = pd->tx_sram_size;
2418 2489
2419 if (pd->tx_queue_mask) 2490 mp->txq_count = pd->tx_queue_count ? : 1;
2420 mp->txq_mask = pd->tx_queue_mask;
2421 else
2422 mp->txq_mask = 0x01;
2423 mp->txq_primary = fls(mp->txq_mask) - 1;
2424} 2491}
2425 2492
2426static int phy_detect(struct mv643xx_eth_private *mp) 2493static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2494 int phy_addr)
2427{ 2495{
2428 unsigned int data; 2496 struct mii_bus *bus = &mp->shared->smi->smi_bus;
2429 unsigned int data2; 2497 struct phy_device *phydev;
2498 int start;
2499 int num;
2500 int i;
2501
2502 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2503 start = phy_addr_get(mp) & 0x1f;
2504 num = 32;
2505 } else {
2506 start = phy_addr & 0x1f;
2507 num = 1;
2508 }
2430 2509
2431 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data); 2510 phydev = NULL;
2432 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE); 2511 for (i = 0; i < num; i++) {
2512 int addr = (start + i) & 0x1f;
2433 2513
2434 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2); 2514 if (bus->phy_map[addr] == NULL)
2435 if (((data ^ data2) & BMCR_ANENABLE) == 0) 2515 mdiobus_scan(bus, addr);
2436 return -ENODEV;
2437 2516
2438 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data); 2517 if (phydev == NULL) {
2518 phydev = bus->phy_map[addr];
2519 if (phydev != NULL)
2520 phy_addr_set(mp, addr);
2521 }
2522 }
2439 2523
2440 return 0; 2524 return phydev;
2441} 2525}
2442 2526
2443static int phy_init(struct mv643xx_eth_private *mp, 2527static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2444 struct mv643xx_eth_platform_data *pd)
2445{ 2528{
2446 struct ethtool_cmd cmd; 2529 struct phy_device *phy = mp->phy;
2447 int err;
2448 2530
2449 err = phy_detect(mp);
2450 if (err) {
2451 dev_printk(KERN_INFO, &mp->dev->dev,
2452 "no PHY detected at addr %d\n", mp->phy_addr);
2453 return err;
2454 }
2455 phy_reset(mp); 2531 phy_reset(mp);
2456 2532
2457 mp->mii.phy_id = mp->phy_addr; 2533 phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII);
2458 mp->mii.phy_id_mask = 0x3f; 2534
2459 mp->mii.reg_num_mask = 0x1f; 2535 if (speed == 0) {
2460 mp->mii.dev = mp->dev; 2536 phy->autoneg = AUTONEG_ENABLE;
2461 mp->mii.mdio_read = mv643xx_eth_mdio_read; 2537 phy->speed = 0;
2462 mp->mii.mdio_write = mv643xx_eth_mdio_write; 2538 phy->duplex = 0;
2463 2539 phy->advertising = phy->supported | ADVERTISED_Autoneg;
2464 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2465
2466 memset(&cmd, 0, sizeof(cmd));
2467
2468 cmd.port = PORT_MII;
2469 cmd.transceiver = XCVR_INTERNAL;
2470 cmd.phy_address = mp->phy_addr;
2471 if (pd->speed == 0) {
2472 cmd.autoneg = AUTONEG_ENABLE;
2473 cmd.speed = SPEED_100;
2474 cmd.advertising = ADVERTISED_10baseT_Half |
2475 ADVERTISED_10baseT_Full |
2476 ADVERTISED_100baseT_Half |
2477 ADVERTISED_100baseT_Full;
2478 if (mp->mii.supports_gmii)
2479 cmd.advertising |= ADVERTISED_1000baseT_Full;
2480 } else { 2540 } else {
2481 cmd.autoneg = AUTONEG_DISABLE; 2541 phy->autoneg = AUTONEG_DISABLE;
2482 cmd.speed = pd->speed; 2542 phy->advertising = 0;
2483 cmd.duplex = pd->duplex; 2543 phy->speed = speed;
2544 phy->duplex = duplex;
2484 } 2545 }
2485 2546 phy_start_aneg(phy);
2486 mv643xx_eth_set_settings(mp->dev, &cmd);
2487
2488 return 0;
2489} 2547}
2490 2548
2491static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 2549static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
@@ -2499,7 +2557,7 @@ static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2499 } 2557 }
2500 2558
2501 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2559 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
2502 if (mp->phy_addr == -1) { 2560 if (mp->phy == NULL) {
2503 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2561 pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
2504 if (speed == SPEED_1000) 2562 if (speed == SPEED_1000)
2505 pscr |= SET_GMII_SPEED_TO_1000; 2563 pscr |= SET_GMII_SPEED_TO_1000;
@@ -2538,7 +2596,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2538 return -ENODEV; 2596 return -ENODEV;
2539 } 2597 }
2540 2598
2541 dev = alloc_etherdev(sizeof(struct mv643xx_eth_private)); 2599 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
2542 if (!dev) 2600 if (!dev)
2543 return -ENOMEM; 2601 return -ENOMEM;
2544 2602
@@ -2549,33 +2607,47 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2549 mp->port_num = pd->port_number; 2607 mp->port_num = pd->port_number;
2550 2608
2551 mp->dev = dev; 2609 mp->dev = dev;
2552#ifdef MV643XX_ETH_NAPI
2553 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2554#endif
2555 2610
2556 set_params(mp, pd); 2611 set_params(mp, pd);
2612 dev->real_num_tx_queues = mp->txq_count;
2557 2613
2558 spin_lock_init(&mp->lock); 2614 if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
2559 2615 mp->phy = phy_scan(mp, pd->phy_addr);
2560 mib_counters_clear(mp);
2561 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2562
2563 if (mp->phy_addr != -1) {
2564 err = phy_init(mp, pd);
2565 if (err)
2566 goto out;
2567 2616
2617 if (mp->phy != NULL) {
2618 phy_init(mp, pd->speed, pd->duplex);
2568 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2619 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2569 } else { 2620 } else {
2570 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless); 2621 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
2571 } 2622 }
2623
2572 init_pscr(mp, pd->speed, pd->duplex); 2624 init_pscr(mp, pd->speed, pd->duplex);
2573 2625
2574 2626
2627 mib_counters_clear(mp);
2628
2629 init_timer(&mp->mib_counters_timer);
2630 mp->mib_counters_timer.data = (unsigned long)mp;
2631 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2632 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
2633 add_timer(&mp->mib_counters_timer);
2634
2635 spin_lock_init(&mp->mib_counters_lock);
2636
2637 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2638
2639 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
2640
2641 init_timer(&mp->rx_oom);
2642 mp->rx_oom.data = (unsigned long)mp;
2643 mp->rx_oom.function = oom_timer_wrapper;
2644
2645
2575 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2646 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2576 BUG_ON(!res); 2647 BUG_ON(!res);
2577 dev->irq = res->start; 2648 dev->irq = res->start;
2578 2649
2650 dev->get_stats = mv643xx_eth_get_stats;
2579 dev->hard_start_xmit = mv643xx_eth_xmit; 2651 dev->hard_start_xmit = mv643xx_eth_xmit;
2580 dev->open = mv643xx_eth_open; 2652 dev->open = mv643xx_eth_open;
2581 dev->stop = mv643xx_eth_stop; 2653 dev->stop = mv643xx_eth_stop;
@@ -2590,14 +2662,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2590 dev->watchdog_timeo = 2 * HZ; 2662 dev->watchdog_timeo = 2 * HZ;
2591 dev->base_addr = 0; 2663 dev->base_addr = 0;
2592 2664
2593#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2594 /*
2595 * Zero copy can only work if we use Discovery II memory. Else, we will
2596 * have to map the buffers to ISA memory which is only 16 MB
2597 */
2598 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2665 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2599 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2666 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2600#endif
2601 2667
2602 SET_NETDEV_DEV(dev, &pdev->dev); 2668 SET_NETDEV_DEV(dev, &pdev->dev);
2603 2669
@@ -2611,16 +2677,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2611 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n", 2677 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
2612 mp->port_num, print_mac(mac, dev->dev_addr)); 2678 mp->port_num, print_mac(mac, dev->dev_addr));
2613 2679
2614 if (dev->features & NETIF_F_SG)
2615 dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
2616
2617 if (dev->features & NETIF_F_IP_CSUM)
2618 dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
2619
2620#ifdef MV643XX_ETH_NAPI
2621 dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
2622#endif
2623
2624 if (mp->tx_desc_sram_size > 0) 2680 if (mp->tx_desc_sram_size > 0)
2625 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2681 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
2626 2682
@@ -2637,6 +2693,8 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2637 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2693 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2638 2694
2639 unregister_netdev(mp->dev); 2695 unregister_netdev(mp->dev);
2696 if (mp->phy != NULL)
2697 phy_detach(mp->phy);
2640 flush_scheduled_work(); 2698 flush_scheduled_work();
2641 free_netdev(mp->dev); 2699 free_netdev(mp->dev);
2642 2700
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index d6524db321af..005f2aa75019 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -183,7 +183,7 @@ struct myri10ge_slice_state {
183 dma_addr_t fw_stats_bus; 183 dma_addr_t fw_stats_bus;
184 int watchdog_tx_done; 184 int watchdog_tx_done;
185 int watchdog_tx_req; 185 int watchdog_tx_req;
186#ifdef CONFIG_DCA 186#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
187 int cached_dca_tag; 187 int cached_dca_tag;
188 int cpu; 188 int cpu;
189 __be32 __iomem *dca_tag; 189 __be32 __iomem *dca_tag;
@@ -215,7 +215,7 @@ struct myri10ge_priv {
215 int msi_enabled; 215 int msi_enabled;
216 int msix_enabled; 216 int msix_enabled;
217 struct msix_entry *msix_vectors; 217 struct msix_entry *msix_vectors;
218#ifdef CONFIG_DCA 218#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
219 int dca_enabled; 219 int dca_enabled;
220#endif 220#endif
221 u32 link_state; 221 u32 link_state;
@@ -891,7 +891,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
891 struct myri10ge_slice_state *ss; 891 struct myri10ge_slice_state *ss;
892 int i, status; 892 int i, status;
893 size_t bytes; 893 size_t bytes;
894#ifdef CONFIG_DCA 894#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
895 unsigned long dca_tag_off; 895 unsigned long dca_tag_off;
896#endif 896#endif
897 897
@@ -986,7 +986,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
986 } 986 }
987 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 987 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
988 988
989#ifdef CONFIG_DCA 989#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
990 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0); 990 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
991 dca_tag_off = cmd.data0; 991 dca_tag_off = cmd.data0;
992 for (i = 0; i < mgp->num_slices; i++) { 992 for (i = 0; i < mgp->num_slices; i++) {
@@ -1025,7 +1025,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
1025 return status; 1025 return status;
1026} 1026}
1027 1027
1028#ifdef CONFIG_DCA 1028#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1029static void 1029static void
1030myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) 1030myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1031{ 1031{
@@ -1060,8 +1060,9 @@ static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1060 } 1060 }
1061 err = dca_add_requester(&pdev->dev); 1061 err = dca_add_requester(&pdev->dev);
1062 if (err) { 1062 if (err) {
1063 dev_err(&pdev->dev, 1063 if (err != -ENODEV)
1064 "dca_add_requester() failed, err=%d\n", err); 1064 dev_err(&pdev->dev,
1065 "dca_add_requester() failed, err=%d\n", err);
1065 return; 1066 return;
1066 } 1067 }
1067 mgp->dca_enabled = 1; 1068 mgp->dca_enabled = 1;
@@ -1457,7 +1458,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1457 struct net_device *netdev = ss->mgp->dev; 1458 struct net_device *netdev = ss->mgp->dev;
1458 int work_done; 1459 int work_done;
1459 1460
1460#ifdef CONFIG_DCA 1461#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1461 if (ss->mgp->dca_enabled) 1462 if (ss->mgp->dca_enabled)
1462 myri10ge_update_dca(ss); 1463 myri10ge_update_dca(ss);
1463#endif 1464#endif
@@ -1686,8 +1687,8 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1686 "tx_boundary", "WC", "irq", "MSI", "MSIX", 1687 "tx_boundary", "WC", "irq", "MSI", "MSIX",
1687 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1688 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1688 "serial_number", "watchdog_resets", 1689 "serial_number", "watchdog_resets",
1689#ifdef CONFIG_DCA 1690#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1690 "dca_capable", "dca_enabled", 1691 "dca_capable_firmware", "dca_device_present",
1691#endif 1692#endif
1692 "link_changes", "link_up", "dropped_link_overflow", 1693 "link_changes", "link_up", "dropped_link_overflow",
1693 "dropped_link_error_or_filtered", 1694 "dropped_link_error_or_filtered",
@@ -1765,7 +1766,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1765 data[i++] = (unsigned int)mgp->read_write_dma; 1766 data[i++] = (unsigned int)mgp->read_write_dma;
1766 data[i++] = (unsigned int)mgp->serial_number; 1767 data[i++] = (unsigned int)mgp->serial_number;
1767 data[i++] = (unsigned int)mgp->watchdog_resets; 1768 data[i++] = (unsigned int)mgp->watchdog_resets;
1768#ifdef CONFIG_DCA 1769#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
1769 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); 1770 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1770 data[i++] = (unsigned int)(mgp->dca_enabled); 1771 data[i++] = (unsigned int)(mgp->dca_enabled);
1771#endif 1772#endif
@@ -3763,7 +3764,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3763 dev_err(&pdev->dev, "failed reset\n"); 3764 dev_err(&pdev->dev, "failed reset\n");
3764 goto abort_with_slices; 3765 goto abort_with_slices;
3765 } 3766 }
3766#ifdef CONFIG_DCA 3767#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3767 myri10ge_setup_dca(mgp); 3768 myri10ge_setup_dca(mgp);
3768#endif 3769#endif
3769 pci_set_drvdata(pdev, mgp); 3770 pci_set_drvdata(pdev, mgp);
@@ -3866,7 +3867,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
3866 netdev = mgp->dev; 3867 netdev = mgp->dev;
3867 unregister_netdev(netdev); 3868 unregister_netdev(netdev);
3868 3869
3869#ifdef CONFIG_DCA 3870#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3870 myri10ge_teardown_dca(mgp); 3871 myri10ge_teardown_dca(mgp);
3871#endif 3872#endif
3872 myri10ge_dummy_rdma(mgp, 0); 3873 myri10ge_dummy_rdma(mgp, 0);
@@ -3911,7 +3912,7 @@ static struct pci_driver myri10ge_driver = {
3911#endif 3912#endif
3912}; 3913};
3913 3914
3914#ifdef CONFIG_DCA 3915#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3915static int 3916static int
3916myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p) 3917myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
3917{ 3918{
@@ -3943,7 +3944,7 @@ static __init int myri10ge_init_module(void)
3943 myri10ge_driver.name, myri10ge_rss_hash); 3944 myri10ge_driver.name, myri10ge_rss_hash);
3944 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT; 3945 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
3945 } 3946 }
3946#ifdef CONFIG_DCA 3947#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3947 dca_register_notify(&myri10ge_dca_notifier); 3948 dca_register_notify(&myri10ge_dca_notifier);
3948#endif 3949#endif
3949 3950
@@ -3954,7 +3955,7 @@ module_init(myri10ge_init_module);
3954 3955
3955static __exit void myri10ge_cleanup_module(void) 3956static __exit void myri10ge_cleanup_module(void)
3956{ 3957{
3957#ifdef CONFIG_DCA 3958#if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE)
3958 dca_unregister_notify(&myri10ge_dca_notifier); 3959 dca_unregister_notify(&myri10ge_dca_notifier);
3959#endif 3960#endif
3960 pci_unregister_driver(&myri10ge_driver); 3961 pci_unregister_driver(&myri10ge_driver);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index fa3ceca4e15c..eb681c0d51ba 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -64,6 +64,25 @@ static const char version2[] =
64 64
65/* Do we support clones that don't adhere to 14,15 of the SAprom ? */ 65/* Do we support clones that don't adhere to 14,15 of the SAprom ? */
66#define SUPPORT_NE_BAD_CLONES 66#define SUPPORT_NE_BAD_CLONES
67/* 0xbad = bad sig or no reset ack */
68#define BAD 0xbad
69
70#define MAX_NE_CARDS 4 /* Max number of NE cards per module */
71static struct platform_device *pdev_ne[MAX_NE_CARDS];
72static int io[MAX_NE_CARDS];
73static int irq[MAX_NE_CARDS];
74static int bad[MAX_NE_CARDS];
75
76#ifdef MODULE
77module_param_array(io, int, NULL, 0);
78module_param_array(irq, int, NULL, 0);
79module_param_array(bad, int, NULL, 0);
80MODULE_PARM_DESC(io, "I/O base address(es),required");
81MODULE_PARM_DESC(irq, "IRQ number(s)");
82MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
83MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
84MODULE_LICENSE("GPL");
85#endif /* MODULE */
67 86
68/* Do we perform extra sanity checks on stuff ? */ 87/* Do we perform extra sanity checks on stuff ? */
69/* #define NE_SANITY_CHECK */ 88/* #define NE_SANITY_CHECK */
@@ -74,6 +93,10 @@ static const char version2[] =
74/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ 93/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
75/* #define PACKETBUF_MEMSIZE 0x40 */ 94/* #define PACKETBUF_MEMSIZE 0x40 */
76 95
96/* This is set up so that no ISA autoprobe takes place. We can't guarantee
97that the ne2k probe is the last 8390 based probe to take place (as it
98is at boot) and so the probe will get confused by any other 8390 cards.
99ISA device autoprobes on a running machine are not recommended anyway. */
77#if !defined(MODULE) && (defined(CONFIG_ISA) || defined(CONFIG_M32R)) 100#if !defined(MODULE) && (defined(CONFIG_ISA) || defined(CONFIG_M32R))
78/* Do we need a portlist for the ISA auto-probe ? */ 101/* Do we need a portlist for the ISA auto-probe ? */
79#define NEEDS_PORTLIST 102#define NEEDS_PORTLIST
@@ -192,8 +215,13 @@ static int __init do_ne_probe(struct net_device *dev)
192#endif 215#endif
193 216
194 /* First check any supplied i/o locations. User knows best. <cough> */ 217 /* First check any supplied i/o locations. User knows best. <cough> */
195 if (base_addr > 0x1ff) /* Check a single specified location. */ 218 if (base_addr > 0x1ff) { /* Check a single specified location. */
196 return ne_probe1(dev, base_addr); 219 int ret = ne_probe1(dev, base_addr);
220 if (ret)
221 printk(KERN_WARNING "ne.c: No NE*000 card found at "
222 "i/o = %#lx\n", base_addr);
223 return ret;
224 }
197 else if (base_addr != 0) /* Don't probe at all. */ 225 else if (base_addr != 0) /* Don't probe at all. */
198 return -ENXIO; 226 return -ENXIO;
199 227
@@ -214,28 +242,6 @@ static int __init do_ne_probe(struct net_device *dev)
214 return -ENODEV; 242 return -ENODEV;
215} 243}
216 244
217#ifndef MODULE
218struct net_device * __init ne_probe(int unit)
219{
220 struct net_device *dev = alloc_eip_netdev();
221 int err;
222
223 if (!dev)
224 return ERR_PTR(-ENOMEM);
225
226 sprintf(dev->name, "eth%d", unit);
227 netdev_boot_setup_check(dev);
228
229 err = do_ne_probe(dev);
230 if (err)
231 goto out;
232 return dev;
233out:
234 free_netdev(dev);
235 return ERR_PTR(err);
236}
237#endif
238
239static int __init ne_probe_isapnp(struct net_device *dev) 245static int __init ne_probe_isapnp(struct net_device *dev)
240{ 246{
241 int i; 247 int i;
@@ -329,7 +335,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
329 with an otherwise unused dev->mem_end value of "0xBAD" will 335 with an otherwise unused dev->mem_end value of "0xBAD" will
330 cause the driver to skip these parts of the probe. */ 336 cause the driver to skip these parts of the probe. */
331 337
332 bad_card = ((dev->base_addr != 0) && (dev->mem_end == 0xbad)); 338 bad_card = ((dev->base_addr != 0) && (dev->mem_end == BAD));
333 339
334 /* Reset card. Who knows what dain-bramaged state it was left in. */ 340 /* Reset card. Who knows what dain-bramaged state it was left in. */
335 341
@@ -806,46 +812,95 @@ retry:
806static int __init ne_drv_probe(struct platform_device *pdev) 812static int __init ne_drv_probe(struct platform_device *pdev)
807{ 813{
808 struct net_device *dev; 814 struct net_device *dev;
815 int err, this_dev = pdev->id;
809 struct resource *res; 816 struct resource *res;
810 int err, irq;
811
812 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
813 irq = platform_get_irq(pdev, 0);
814 if (!res || irq < 0)
815 return -ENODEV;
816 817
817 dev = alloc_eip_netdev(); 818 dev = alloc_eip_netdev();
818 if (!dev) 819 if (!dev)
819 return -ENOMEM; 820 return -ENOMEM;
820 dev->irq = irq; 821
821 dev->base_addr = res->start; 822 /* ne.c doesn't populate resources in platform_device, but
823 * rbtx4927_ne_init and rbtx4938_ne_init do register devices
824 * with resources.
825 */
826 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
827 if (res) {
828 dev->base_addr = res->start;
829 dev->irq = platform_get_irq(pdev, 0);
830 } else {
831 if (this_dev < 0 || this_dev >= MAX_NE_CARDS)
832 return -EINVAL;
833 dev->base_addr = io[this_dev];
834 dev->irq = irq[this_dev];
835 dev->mem_end = bad[this_dev];
836 }
822 err = do_ne_probe(dev); 837 err = do_ne_probe(dev);
823 if (err) { 838 if (err) {
824 free_netdev(dev); 839 free_netdev(dev);
825 return err; 840 return err;
826 } 841 }
827 platform_set_drvdata(pdev, dev); 842 platform_set_drvdata(pdev, dev);
843
844 /* Update with any values found by probing, don't update if
845 * resources were specified.
846 */
847 if (!res) {
848 io[this_dev] = dev->base_addr;
849 irq[this_dev] = dev->irq;
850 }
828 return 0; 851 return 0;
829} 852}
830 853
831static int __exit ne_drv_remove(struct platform_device *pdev) 854static int ne_drv_remove(struct platform_device *pdev)
832{ 855{
833 struct net_device *dev = platform_get_drvdata(pdev); 856 struct net_device *dev = platform_get_drvdata(pdev);
834 857
835 unregister_netdev(dev); 858 if (dev) {
836 free_irq(dev->irq, dev); 859 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
837 release_region(dev->base_addr, NE_IO_EXTENT); 860 netif_device_detach(dev);
838 free_netdev(dev); 861 unregister_netdev(dev);
862 if (idev)
863 pnp_device_detach(idev);
864 /* Careful ne_drv_remove can be called twice, once from
865 * the platform_driver.remove and again when the
866 * platform_device is being removed.
867 */
868 ei_status.priv = 0;
869 free_irq(dev->irq, dev);
870 release_region(dev->base_addr, NE_IO_EXTENT);
871 free_netdev(dev);
872 platform_set_drvdata(pdev, NULL);
873 }
839 return 0; 874 return 0;
840} 875}
841 876
877/* Remove unused devices or all if true. */
878static void ne_loop_rm_unreg(int all)
879{
880 int this_dev;
881 struct platform_device *pdev;
882 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
883 pdev = pdev_ne[this_dev];
884 /* No network device == unused */
885 if (pdev && (!platform_get_drvdata(pdev) || all)) {
886 ne_drv_remove(pdev);
887 platform_device_unregister(pdev);
888 pdev_ne[this_dev] = NULL;
889 }
890 }
891}
892
842#ifdef CONFIG_PM 893#ifdef CONFIG_PM
843static int ne_drv_suspend(struct platform_device *pdev, pm_message_t state) 894static int ne_drv_suspend(struct platform_device *pdev, pm_message_t state)
844{ 895{
845 struct net_device *dev = platform_get_drvdata(pdev); 896 struct net_device *dev = platform_get_drvdata(pdev);
846 897
847 if (netif_running(dev)) 898 if (netif_running(dev)) {
899 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
848 netif_device_detach(dev); 900 netif_device_detach(dev);
901 if (idev)
902 pnp_stop_dev(idev);
903 }
849 return 0; 904 return 0;
850} 905}
851 906
@@ -854,6 +909,9 @@ static int ne_drv_resume(struct platform_device *pdev)
854 struct net_device *dev = platform_get_drvdata(pdev); 909 struct net_device *dev = platform_get_drvdata(pdev);
855 910
856 if (netif_running(dev)) { 911 if (netif_running(dev)) {
912 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv;
913 if (idev)
914 pnp_start_dev(idev);
857 ne_reset_8390(dev); 915 ne_reset_8390(dev);
858 NS8390p_init(dev, 1); 916 NS8390p_init(dev, 1);
859 netif_device_attach(dev); 917 netif_device_attach(dev);
@@ -866,7 +924,7 @@ static int ne_drv_resume(struct platform_device *pdev)
866#endif 924#endif
867 925
868static struct platform_driver ne_driver = { 926static struct platform_driver ne_driver = {
869 .remove = __exit_p(ne_drv_remove), 927 .remove = ne_drv_remove,
870 .suspend = ne_drv_suspend, 928 .suspend = ne_drv_suspend,
871 .resume = ne_drv_resume, 929 .resume = ne_drv_resume,
872 .driver = { 930 .driver = {
@@ -875,91 +933,96 @@ static struct platform_driver ne_driver = {
875 }, 933 },
876}; 934};
877 935
878static int __init ne_init(void) 936static void __init ne_add_devices(void)
879{ 937{
880 return platform_driver_probe(&ne_driver, ne_drv_probe); 938 int this_dev;
881} 939 struct platform_device *pdev;
882 940
883static void __exit ne_exit(void) 941 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
884{ 942 if (pdev_ne[this_dev])
885 platform_driver_unregister(&ne_driver); 943 continue;
944 pdev = platform_device_register_simple(
945 DRV_NAME, this_dev, NULL, 0);
946 if (IS_ERR(pdev))
947 continue;
948 pdev_ne[this_dev] = pdev;
949 }
886} 950}
887 951
888#ifdef MODULE 952#ifdef MODULE
889#define MAX_NE_CARDS 4 /* Max number of NE cards per module */ 953int __init init_module()
890static struct net_device *dev_ne[MAX_NE_CARDS];
891static int io[MAX_NE_CARDS];
892static int irq[MAX_NE_CARDS];
893static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */
894
895module_param_array(io, int, NULL, 0);
896module_param_array(irq, int, NULL, 0);
897module_param_array(bad, int, NULL, 0);
898MODULE_PARM_DESC(io, "I/O base address(es),required");
899MODULE_PARM_DESC(irq, "IRQ number(s)");
900MODULE_PARM_DESC(bad, "Accept card(s) with bad signatures");
901MODULE_DESCRIPTION("NE1000/NE2000 ISA/PnP Ethernet driver");
902MODULE_LICENSE("GPL");
903
904/* This is set up so that no ISA autoprobe takes place. We can't guarantee
905that the ne2k probe is the last 8390 based probe to take place (as it
906is at boot) and so the probe will get confused by any other 8390 cards.
907ISA device autoprobes on a running machine are not recommended anyway. */
908
909int __init init_module(void)
910{ 954{
911 int this_dev, found = 0; 955 int retval;
912 int plat_found = !ne_init(); 956 ne_add_devices();
913 957 retval = platform_driver_probe(&ne_driver, ne_drv_probe);
914 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { 958 if (retval) {
915 struct net_device *dev = alloc_eip_netdev(); 959 if (io[0] == 0)
916 if (!dev) 960 printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\""
917 break; 961 " value(s) for ISA cards.\n");
918 dev->irq = irq[this_dev]; 962 ne_loop_rm_unreg(1);
919 dev->mem_end = bad[this_dev]; 963 return retval;
920 dev->base_addr = io[this_dev];
921 if (do_ne_probe(dev) == 0) {
922 dev_ne[found++] = dev;
923 continue;
924 }
925 free_netdev(dev);
926 if (found || plat_found)
927 break;
928 if (io[this_dev] != 0)
929 printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", io[this_dev]);
930 else
931 printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
932 return -ENXIO;
933 } 964 }
934 if (found || plat_found)
935 return 0;
936 return -ENODEV;
937}
938 965
939static void cleanup_card(struct net_device *dev) 966 /* Unregister unused platform_devices. */
967 ne_loop_rm_unreg(0);
968 return retval;
969}
970#else /* MODULE */
971static int __init ne_init(void)
940{ 972{
941 struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; 973 int retval = platform_driver_probe(&ne_driver, ne_drv_probe);
942 if (idev) 974
943 pnp_device_detach(idev); 975 /* Unregister unused platform_devices. */
944 free_irq(dev->irq, dev); 976 ne_loop_rm_unreg(0);
945 release_region(dev->base_addr, NE_IO_EXTENT); 977 return retval;
946} 978}
979module_init(ne_init);
947 980
948void __exit cleanup_module(void) 981struct net_device * __init ne_probe(int unit)
949{ 982{
950 int this_dev; 983 int this_dev;
984 struct net_device *dev;
985
986 /* Find an empty slot, that is no net_device and zero io port. */
987 this_dev = 0;
988 while ((pdev_ne[this_dev] && platform_get_drvdata(pdev_ne[this_dev])) ||
989 io[this_dev]) {
990 if (++this_dev == MAX_NE_CARDS)
991 return ERR_PTR(-ENOMEM);
992 }
993
994 /* Get irq, io from kernel command line */
995 dev = alloc_eip_netdev();
996 if (!dev)
997 return ERR_PTR(-ENOMEM);
951 998
952 ne_exit(); 999 sprintf(dev->name, "eth%d", unit);
1000 netdev_boot_setup_check(dev);
1001
1002 io[this_dev] = dev->base_addr;
1003 irq[this_dev] = dev->irq;
1004 bad[this_dev] = dev->mem_end;
1005
1006 free_netdev(dev);
1007
1008 ne_add_devices();
1009
1010 /* return the first device found */
953 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { 1011 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
954 struct net_device *dev = dev_ne[this_dev]; 1012 if (pdev_ne[this_dev]) {
955 if (dev) { 1013 dev = platform_get_drvdata(pdev_ne[this_dev]);
956 unregister_netdev(dev); 1014 if (dev)
957 cleanup_card(dev); 1015 return dev;
958 free_netdev(dev);
959 } 1016 }
960 } 1017 }
1018
1019 return ERR_PTR(-ENODEV);
961} 1020}
962#else /* MODULE */
963module_init(ne_init);
964module_exit(ne_exit);
965#endif /* MODULE */ 1021#endif /* MODULE */
1022
1023static void __exit ne_exit(void)
1024{
1025 platform_driver_unregister(&ne_driver);
1026 ne_loop_rm_unreg(1);
1027}
1028module_exit(ne_exit);
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 3f9af759cb90..b9bed82e1d21 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -189,7 +189,7 @@ netx_eth_interrupt(int irq, void *dev_id)
189 189
190 if ((status & ISR_CON_HI) || (status & ISR_IND_HI)) 190 if ((status & ISR_CON_HI) || (status & ISR_IND_HI))
191 printk("%s: unexpected status: 0x%08x\n", 191 printk("%s: unexpected status: 0x%08x\n",
192 __FUNCTION__, status); 192 __func__, status);
193 193
194 fill_level = 194 fill_level =
195 readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id))); 195 readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id)));
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 244ab49c4337..f8e601c51da7 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -742,7 +742,7 @@ extern char netxen_nic_driver_name[];
742 } while (0) 742 } while (0)
743#else 743#else
744#define DPRINTK(klevel, fmt, args...) do { \ 744#define DPRINTK(klevel, fmt, args...) do { \
745 printk(KERN_##klevel PFX "%s: %s: " fmt, __FUNCTION__,\ 745 printk(KERN_##klevel PFX "%s: %s: " fmt, __func__,\
746 (adapter != NULL && adapter->netdev != NULL) ? \ 746 (adapter != NULL && adapter->netdev != NULL) ? \
747 adapter->netdev->name : NULL, \ 747 adapter->netdev->name : NULL, \
748 ## args); } while(0) 748 ## args); } while(0)
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 008fd6618a5f..6ef3f0d84bcf 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -77,18 +77,18 @@ static irqreturn_t netxen_msi_intr(int irq, void *data);
77 77
78/* PCI Device ID Table */ 78/* PCI Device ID Table */
79#define ENTRY(device) \ 79#define ENTRY(device) \
80 {PCI_DEVICE(0x4040, (device)), \ 80 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
82 82
83static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 83static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
84 ENTRY(0x0001), 84 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
85 ENTRY(0x0002), 85 ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
86 ENTRY(0x0003), 86 ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
87 ENTRY(0x0004), 87 ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
88 ENTRY(0x0005), 88 ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
89 ENTRY(0x0024), 89 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
90 ENTRY(0x0025), 90 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
91 ENTRY(0x0100), 91 ENTRY(PCI_DEVICE_ID_NX3031),
92 {0,} 92 {0,}
93}; 93};
94 94
@@ -241,7 +241,7 @@ static void netxen_check_options(struct netxen_adapter *adapter)
241 case NETXEN_BRDTYPE_P3_REF_QG: 241 case NETXEN_BRDTYPE_P3_REF_QG:
242 case NETXEN_BRDTYPE_P3_4_GB: 242 case NETXEN_BRDTYPE_P3_4_GB:
243 case NETXEN_BRDTYPE_P3_4_GB_MM: 243 case NETXEN_BRDTYPE_P3_4_GB_MM:
244 adapter->msix_supported = 0; 244 adapter->msix_supported = !!use_msi_x;
245 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; 245 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G;
246 break; 246 break;
247 247
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 53451c3b2c0d..0a575fef29e6 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -119,7 +119,7 @@ KERN_INFO " Support available from http://foo.com/bar/baz.html\n";
119 119
120#ifdef NETDRV_DEBUG 120#ifdef NETDRV_DEBUG
121/* note: prints function name for you */ 121/* note: prints function name for you */
122# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args) 122# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
123#else 123#else
124# define DPRINTK(fmt, args...) 124# define DPRINTK(fmt, args...)
125#endif 125#endif
@@ -130,7 +130,7 @@ KERN_INFO " Support available from http://foo.com/bar/baz.html\n";
130# define assert(expr) \ 130# define assert(expr) \
131 if(!(expr)) { \ 131 if(!(expr)) { \
132 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 132 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
133 #expr,__FILE__,__FUNCTION__,__LINE__); \ 133 #expr,__FILE__,__func__,__LINE__); \
134 } 134 }
135#endif 135#endif
136 136
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 94e0b7ed76f1..e7508c10887c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -60,49 +60,14 @@ int mdiobus_register(struct mii_bus *bus)
60 bus->reset(bus); 60 bus->reset(bus);
61 61
62 for (i = 0; i < PHY_MAX_ADDR; i++) { 62 for (i = 0; i < PHY_MAX_ADDR; i++) {
63 struct phy_device *phydev; 63 bus->phy_map[i] = NULL;
64 if ((bus->phy_mask & (1 << i)) == 0) {
65 struct phy_device *phydev;
64 66
65 if (bus->phy_mask & (1 << i)) { 67 phydev = mdiobus_scan(bus, i);
66 bus->phy_map[i] = NULL; 68 if (IS_ERR(phydev))
67 continue; 69 err = PTR_ERR(phydev);
68 } 70 }
69
70 phydev = get_phy_device(bus, i);
71
72 if (IS_ERR(phydev))
73 return PTR_ERR(phydev);
74
75 /* There's a PHY at this address
76 * We need to set:
77 * 1) IRQ
78 * 2) bus_id
79 * 3) parent
80 * 4) bus
81 * 5) mii_bus
82 * And, we need to register it */
83 if (phydev) {
84 phydev->irq = bus->irq[i];
85
86 phydev->dev.parent = bus->dev;
87 phydev->dev.bus = &mdio_bus_type;
88 snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, i);
89
90 phydev->bus = bus;
91
92 /* Run all of the fixups for this PHY */
93 phy_scan_fixups(phydev);
94
95 err = device_register(&phydev->dev);
96
97 if (err) {
98 printk(KERN_ERR "phy %d failed to register\n",
99 i);
100 phy_device_free(phydev);
101 phydev = NULL;
102 }
103 }
104
105 bus->phy_map[i] = phydev;
106 } 71 }
107 72
108 pr_info("%s: probed\n", bus->name); 73 pr_info("%s: probed\n", bus->name);
@@ -122,6 +87,48 @@ void mdiobus_unregister(struct mii_bus *bus)
122} 87}
123EXPORT_SYMBOL(mdiobus_unregister); 88EXPORT_SYMBOL(mdiobus_unregister);
124 89
90struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
91{
92 struct phy_device *phydev;
93 int err;
94
95 phydev = get_phy_device(bus, addr);
96 if (IS_ERR(phydev) || phydev == NULL)
97 return phydev;
98
99 /* There's a PHY at this address
100 * We need to set:
101 * 1) IRQ
102 * 2) bus_id
103 * 3) parent
104 * 4) bus
105 * 5) mii_bus
106 * And, we need to register it */
107
108 phydev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
109
110 phydev->dev.parent = bus->dev;
111 phydev->dev.bus = &mdio_bus_type;
112 snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, addr);
113
114 phydev->bus = bus;
115
116 /* Run all of the fixups for this PHY */
117 phy_scan_fixups(phydev);
118
119 err = device_register(&phydev->dev);
120 if (err) {
121 printk(KERN_ERR "phy %d failed to register\n", addr);
122 phy_device_free(phydev);
123 phydev = NULL;
124 }
125
126 bus->phy_map[addr] = phydev;
127
128 return phydev;
129}
130EXPORT_SYMBOL(mdiobus_scan);
131
125/** 132/**
126 * mdio_bus_match - determine if given PHY driver supports the given PHY device 133 * mdio_bus_match - determine if given PHY driver supports the given PHY device
127 * @dev: target PHY device 134 * @dev: target PHY device
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index ddccc074a76a..5d4d21516a6c 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1833,9 +1833,11 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1833 1833
1834 /* If the queue is getting long, don't wait any longer for packets 1834 /* If the queue is getting long, don't wait any longer for packets
1835 before the start of the queue. */ 1835 before the start of the queue. */
1836 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN 1836 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
1837 && seq_before(ppp->minseq, ppp->mrq.next->sequence)) 1837 struct sk_buff *skb = skb_peek(&ppp->mrq);
1838 ppp->minseq = ppp->mrq.next->sequence; 1838 if (seq_before(ppp->minseq, skb->sequence))
1839 ppp->minseq = skb->sequence;
1840 }
1839 1841
1840 /* Pull completed packets off the queue and receive them. */ 1842 /* Pull completed packets off the queue and receive them. */
1841 while ((skb = ppp_mp_reconstruct(ppp))) 1843 while ((skb = ppp_mp_reconstruct(ppp)))
@@ -1864,7 +1866,7 @@ ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
1864 for (p = list->next; p != (struct sk_buff *)list; p = p->next) 1866 for (p = list->next; p != (struct sk_buff *)list; p = p->next)
1865 if (seq_before(seq, p->sequence)) 1867 if (seq_before(seq, p->sequence))
1866 break; 1868 break;
1867 __skb_insert(skb, p->prev, p, list); 1869 __skb_queue_before(list, p, skb);
1868} 1870}
1869 1871
1870/* 1872/*
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index ff175e8f36b2..185b1dff10a8 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -353,7 +353,7 @@ static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_
353 spin_lock_bh(&session->reorder_q.lock); 353 spin_lock_bh(&session->reorder_q.lock);
354 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 354 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
355 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) { 355 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
356 __skb_insert(skb, skbp->prev, skbp, &session->reorder_q); 356 __skb_queue_before(&session->reorder_q, skbp, skb);
357 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, 357 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
358 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 358 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
359 session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns, 359 session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
diff --git a/drivers/net/qlge/Makefile b/drivers/net/qlge/Makefile
new file mode 100644
index 000000000000..8a197658d76f
--- /dev/null
+++ b/drivers/net/qlge/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Qlogic 10GbE PCI Express ethernet driver
3#
4
5obj-$(CONFIG_QLGE) += qlge.o
6
7qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
new file mode 100644
index 000000000000..c37ea436c918
--- /dev/null
+++ b/drivers/net/qlge/qlge.h
@@ -0,0 +1,1593 @@
1/*
2 * QLogic QLA41xx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qlge for copyright and licensing details.
6 */
7#ifndef _QLGE_H_
8#define _QLGE_H_
9
10#include <linux/pci.h>
11#include <linux/netdevice.h>
12
13/*
14 * General definitions...
15 */
16#define DRV_NAME "qlge"
17#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
18#define DRV_VERSION "v1.00.00-b3"
19
20#define PFX "qlge: "
21#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
22 do { \
23 if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
24 ; \
25 else \
26 dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
27 "%s: " fmt, __func__, ##args); \
28 } while (0)
29
30#define QLGE_VENDOR_ID 0x1077
31#define QLGE_DEVICE_ID1 0x8012
32#define QLGE_DEVICE_ID 0x8000
33
34#define MAX_RX_RINGS 128
35#define MAX_TX_RINGS 128
36
37#define NUM_TX_RING_ENTRIES 256
38#define NUM_RX_RING_ENTRIES 256
39
40#define NUM_SMALL_BUFFERS 512
41#define NUM_LARGE_BUFFERS 512
42
43#define SMALL_BUFFER_SIZE 256
44#define LARGE_BUFFER_SIZE PAGE_SIZE
45#define MAX_SPLIT_SIZE 1023
46#define QLGE_SB_PAD 32
47
48#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
49#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
50#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
51#define UDELAY_COUNT 3
52#define UDELAY_DELAY 10
53
54
55#define TX_DESC_PER_IOCB 8
56/* The maximum number of frags we handle is based
57 * on PAGE_SIZE...
58 */
59#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
60#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
61#elif (PAGE_SHIFT == 16) /* 64k pages */
62#define TX_DESC_PER_OAL 0
63#endif
64
65#define DB_PAGE_SIZE 4096
66
67/*
68 * Processor Address Register (PROC_ADDR) bit definitions.
69 */
70enum {
71
72 /* Misc. stuff */
73 MAILBOX_COUNT = 16,
74
75 PROC_ADDR_RDY = (1 << 31),
76 PROC_ADDR_R = (1 << 30),
77 PROC_ADDR_ERR = (1 << 29),
78 PROC_ADDR_DA = (1 << 28),
79 PROC_ADDR_FUNC0_MBI = 0x00001180,
80 PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
81 PROC_ADDR_FUNC0_CTL = 0x000011a1,
82 PROC_ADDR_FUNC2_MBI = 0x00001280,
83 PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
84 PROC_ADDR_FUNC2_CTL = 0x000012a1,
85 PROC_ADDR_MPI_RISC = 0x00000000,
86 PROC_ADDR_MDE = 0x00010000,
87 PROC_ADDR_REGBLOCK = 0x00020000,
88 PROC_ADDR_RISC_REG = 0x00030000,
89};
90
91/*
92 * System Register (SYS) bit definitions.
93 */
94enum {
95 SYS_EFE = (1 << 0),
96 SYS_FAE = (1 << 1),
97 SYS_MDC = (1 << 2),
98 SYS_DST = (1 << 3),
99 SYS_DWC = (1 << 4),
100 SYS_EVW = (1 << 5),
101 SYS_OMP_DLY_MASK = 0x3f000000,
102 /*
103 * There are no values defined as of edit #15.
104 */
105 SYS_ODI = (1 << 14),
106};
107
108/*
109 * Reset/Failover Register (RST_FO) bit definitions.
110 */
111enum {
112 RST_FO_TFO = (1 << 0),
113 RST_FO_RR_MASK = 0x00060000,
114 RST_FO_RR_CQ_CAM = 0x00000000,
115 RST_FO_RR_DROP = 0x00000001,
116 RST_FO_RR_DQ = 0x00000002,
117 RST_FO_RR_RCV_FUNC_CQ = 0x00000003,
118 RST_FO_FRB = (1 << 12),
119 RST_FO_MOP = (1 << 13),
120 RST_FO_REG = (1 << 14),
121 RST_FO_FR = (1 << 15),
122};
123
124/*
125 * Function Specific Control Register (FSC) bit definitions.
126 */
127enum {
128 FSC_DBRST_MASK = 0x00070000,
129 FSC_DBRST_256 = 0x00000000,
130 FSC_DBRST_512 = 0x00000001,
131 FSC_DBRST_768 = 0x00000002,
132 FSC_DBRST_1024 = 0x00000003,
133 FSC_DBL_MASK = 0x00180000,
134 FSC_DBL_DBRST = 0x00000000,
135 FSC_DBL_MAX_PLD = 0x00000008,
136 FSC_DBL_MAX_BRST = 0x00000010,
137 FSC_DBL_128_BYTES = 0x00000018,
138 FSC_EC = (1 << 5),
139 FSC_EPC_MASK = 0x00c00000,
140 FSC_EPC_INBOUND = (1 << 6),
141 FSC_EPC_OUTBOUND = (1 << 7),
142 FSC_VM_PAGESIZE_MASK = 0x07000000,
143 FSC_VM_PAGE_2K = 0x00000100,
144 FSC_VM_PAGE_4K = 0x00000200,
145 FSC_VM_PAGE_8K = 0x00000300,
146 FSC_VM_PAGE_64K = 0x00000600,
147 FSC_SH = (1 << 11),
148 FSC_DSB = (1 << 12),
149 FSC_STE = (1 << 13),
150 FSC_FE = (1 << 15),
151};
152
153/*
154 * Host Command Status Register (CSR) bit definitions.
155 */
156enum {
157 CSR_ERR_STS_MASK = 0x0000003f,
158 /*
159 * There are no valued defined as of edit #15.
160 */
161 CSR_RR = (1 << 8),
162 CSR_HRI = (1 << 9),
163 CSR_RP = (1 << 10),
164 CSR_CMD_PARM_SHIFT = 22,
165 CSR_CMD_NOP = 0x00000000,
166 CSR_CMD_SET_RST = 0x1000000,
167 CSR_CMD_CLR_RST = 0x20000000,
168 CSR_CMD_SET_PAUSE = 0x30000000,
169 CSR_CMD_CLR_PAUSE = 0x40000000,
170 CSR_CMD_SET_H2R_INT = 0x50000000,
171 CSR_CMD_CLR_H2R_INT = 0x60000000,
172 CSR_CMD_PAR_EN = 0x70000000,
173 CSR_CMD_SET_BAD_PAR = 0x80000000,
174 CSR_CMD_CLR_BAD_PAR = 0x90000000,
175 CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
176};
177
178/*
179 * Configuration Register (CFG) bit definitions.
180 */
181enum {
182 CFG_LRQ = (1 << 0),
183 CFG_DRQ = (1 << 1),
184 CFG_LR = (1 << 2),
185 CFG_DR = (1 << 3),
186 CFG_LE = (1 << 5),
187 CFG_LCQ = (1 << 6),
188 CFG_DCQ = (1 << 7),
189 CFG_Q_SHIFT = 8,
190 CFG_Q_MASK = 0x7f000000,
191};
192
193/*
194 * Status Register (STS) bit definitions.
195 */
196enum {
197 STS_FE = (1 << 0),
198 STS_PI = (1 << 1),
199 STS_PL0 = (1 << 2),
200 STS_PL1 = (1 << 3),
201 STS_PI0 = (1 << 4),
202 STS_PI1 = (1 << 5),
203 STS_FUNC_ID_MASK = 0x000000c0,
204 STS_FUNC_ID_SHIFT = 6,
205 STS_F0E = (1 << 8),
206 STS_F1E = (1 << 9),
207 STS_F2E = (1 << 10),
208 STS_F3E = (1 << 11),
209 STS_NFE = (1 << 12),
210};
211
212/*
213 * Interrupt Enable Register (INTR_EN) bit definitions.
214 */
215enum {
216 INTR_EN_INTR_MASK = 0x007f0000,
217 INTR_EN_TYPE_MASK = 0x03000000,
218 INTR_EN_TYPE_ENABLE = 0x00000100,
219 INTR_EN_TYPE_DISABLE = 0x00000200,
220 INTR_EN_TYPE_READ = 0x00000300,
221 INTR_EN_IHD = (1 << 13),
222 INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
223 INTR_EN_EI = (1 << 14),
224 INTR_EN_EN = (1 << 15),
225};
226
227/*
228 * Interrupt Mask Register (INTR_MASK) bit definitions.
229 */
230enum {
231 INTR_MASK_PI = (1 << 0),
232 INTR_MASK_HL0 = (1 << 1),
233 INTR_MASK_LH0 = (1 << 2),
234 INTR_MASK_HL1 = (1 << 3),
235 INTR_MASK_LH1 = (1 << 4),
236 INTR_MASK_SE = (1 << 5),
237 INTR_MASK_LSC = (1 << 6),
238 INTR_MASK_MC = (1 << 7),
239 INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
240};
241
242/*
243 * Register (REV_ID) bit definitions.
244 */
245enum {
246 REV_ID_MASK = 0x0000000f,
247 REV_ID_NICROLL_SHIFT = 0,
248 REV_ID_NICREV_SHIFT = 4,
249 REV_ID_XGROLL_SHIFT = 8,
250 REV_ID_XGREV_SHIFT = 12,
251 REV_ID_CHIPREV_SHIFT = 28,
252};
253
254/*
255 * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
256 */
257enum {
258 FRC_ECC_ERR_VW = (1 << 12),
259 FRC_ECC_ERR_VB = (1 << 13),
260 FRC_ECC_ERR_NI = (1 << 14),
261 FRC_ECC_ERR_NO = (1 << 15),
262 FRC_ECC_PFE_SHIFT = 16,
263 FRC_ECC_ERR_DO = (1 << 18),
264 FRC_ECC_P14 = (1 << 19),
265};
266
267/*
268 * Error Status Register (ERR_STS) bit definitions.
269 */
270enum {
271 ERR_STS_NOF = (1 << 0),
272 ERR_STS_NIF = (1 << 1),
273 ERR_STS_DRP = (1 << 2),
274 ERR_STS_XGP = (1 << 3),
275 ERR_STS_FOU = (1 << 4),
276 ERR_STS_FOC = (1 << 5),
277 ERR_STS_FOF = (1 << 6),
278 ERR_STS_FIU = (1 << 7),
279 ERR_STS_FIC = (1 << 8),
280 ERR_STS_FIF = (1 << 9),
281 ERR_STS_MOF = (1 << 10),
282 ERR_STS_TA = (1 << 11),
283 ERR_STS_MA = (1 << 12),
284 ERR_STS_MPE = (1 << 13),
285 ERR_STS_SCE = (1 << 14),
286 ERR_STS_STE = (1 << 15),
287 ERR_STS_FOW = (1 << 16),
288 ERR_STS_UE = (1 << 17),
289 ERR_STS_MCH = (1 << 26),
290 ERR_STS_LOC_SHIFT = 27,
291};
292
293/*
294 * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
295 */
296enum {
297 RAM_DBG_ADDR_FW = (1 << 30),
298 RAM_DBG_ADDR_FR = (1 << 31),
299};
300
301/*
302 * Semaphore Register (SEM) bit definitions.
303 */
304enum {
305 /*
306 * Example:
307 * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
308 */
309 SEM_CLEAR = 0,
310 SEM_SET = 1,
311 SEM_FORCE = 3,
312 SEM_XGMAC0_SHIFT = 0,
313 SEM_XGMAC1_SHIFT = 2,
314 SEM_ICB_SHIFT = 4,
315 SEM_MAC_ADDR_SHIFT = 6,
316 SEM_FLASH_SHIFT = 8,
317 SEM_PROBE_SHIFT = 10,
318 SEM_RT_IDX_SHIFT = 12,
319 SEM_PROC_REG_SHIFT = 14,
320 SEM_XGMAC0_MASK = 0x00030000,
321 SEM_XGMAC1_MASK = 0x000c0000,
322 SEM_ICB_MASK = 0x00300000,
323 SEM_MAC_ADDR_MASK = 0x00c00000,
324 SEM_FLASH_MASK = 0x03000000,
325 SEM_PROBE_MASK = 0x0c000000,
326 SEM_RT_IDX_MASK = 0x30000000,
327 SEM_PROC_REG_MASK = 0xc0000000,
328};
329
330/*
331 * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
332 */
333enum {
334 XGMAC_ADDR_RDY = (1 << 31),
335 XGMAC_ADDR_R = (1 << 30),
336 XGMAC_ADDR_XME = (1 << 29),
337
338 /* XGMAC control registers */
339 PAUSE_SRC_LO = 0x00000100,
340 PAUSE_SRC_HI = 0x00000104,
341 GLOBAL_CFG = 0x00000108,
342 GLOBAL_CFG_RESET = (1 << 0),
343 GLOBAL_CFG_JUMBO = (1 << 6),
344 GLOBAL_CFG_TX_STAT_EN = (1 << 10),
345 GLOBAL_CFG_RX_STAT_EN = (1 << 11),
346 TX_CFG = 0x0000010c,
347 TX_CFG_RESET = (1 << 0),
348 TX_CFG_EN = (1 << 1),
349 TX_CFG_PREAM = (1 << 2),
350 RX_CFG = 0x00000110,
351 RX_CFG_RESET = (1 << 0),
352 RX_CFG_EN = (1 << 1),
353 RX_CFG_PREAM = (1 << 2),
354 FLOW_CTL = 0x0000011c,
355 PAUSE_OPCODE = 0x00000120,
356 PAUSE_TIMER = 0x00000124,
357 PAUSE_FRM_DEST_LO = 0x00000128,
358 PAUSE_FRM_DEST_HI = 0x0000012c,
359 MAC_TX_PARAMS = 0x00000134,
360 MAC_TX_PARAMS_JUMBO = (1 << 31),
361 MAC_TX_PARAMS_SIZE_SHIFT = 16,
362 MAC_RX_PARAMS = 0x00000138,
363 MAC_SYS_INT = 0x00000144,
364 MAC_SYS_INT_MASK = 0x00000148,
365 MAC_MGMT_INT = 0x0000014c,
366 MAC_MGMT_IN_MASK = 0x00000150,
367 EXT_ARB_MODE = 0x000001fc,
368
369 /* XGMAC TX statistics registers */
370 TX_PKTS = 0x00000200,
371 TX_BYTES = 0x00000208,
372 TX_MCAST_PKTS = 0x00000210,
373 TX_BCAST_PKTS = 0x00000218,
374 TX_UCAST_PKTS = 0x00000220,
375 TX_CTL_PKTS = 0x00000228,
376 TX_PAUSE_PKTS = 0x00000230,
377 TX_64_PKT = 0x00000238,
378 TX_65_TO_127_PKT = 0x00000240,
379 TX_128_TO_255_PKT = 0x00000248,
380 TX_256_511_PKT = 0x00000250,
381 TX_512_TO_1023_PKT = 0x00000258,
382 TX_1024_TO_1518_PKT = 0x00000260,
383 TX_1519_TO_MAX_PKT = 0x00000268,
384 TX_UNDERSIZE_PKT = 0x00000270,
385 TX_OVERSIZE_PKT = 0x00000278,
386
387 /* XGMAC statistics control registers */
388 RX_HALF_FULL_DET = 0x000002a0,
389 TX_HALF_FULL_DET = 0x000002a4,
390 RX_OVERFLOW_DET = 0x000002a8,
391 TX_OVERFLOW_DET = 0x000002ac,
392 RX_HALF_FULL_MASK = 0x000002b0,
393 TX_HALF_FULL_MASK = 0x000002b4,
394 RX_OVERFLOW_MASK = 0x000002b8,
395 TX_OVERFLOW_MASK = 0x000002bc,
396 STAT_CNT_CTL = 0x000002c0,
397 STAT_CNT_CTL_CLEAR_TX = (1 << 0),
398 STAT_CNT_CTL_CLEAR_RX = (1 << 1),
399 AUX_RX_HALF_FULL_DET = 0x000002d0,
400 AUX_TX_HALF_FULL_DET = 0x000002d4,
401 AUX_RX_OVERFLOW_DET = 0x000002d8,
402 AUX_TX_OVERFLOW_DET = 0x000002dc,
403 AUX_RX_HALF_FULL_MASK = 0x000002f0,
404 AUX_TX_HALF_FULL_MASK = 0x000002f4,
405 AUX_RX_OVERFLOW_MASK = 0x000002f8,
406 AUX_TX_OVERFLOW_MASK = 0x000002fc,
407
408 /* XGMAC RX statistics registers */
409 RX_BYTES = 0x00000300,
410 RX_BYTES_OK = 0x00000308,
411 RX_PKTS = 0x00000310,
412 RX_PKTS_OK = 0x00000318,
413 RX_BCAST_PKTS = 0x00000320,
414 RX_MCAST_PKTS = 0x00000328,
415 RX_UCAST_PKTS = 0x00000330,
416 RX_UNDERSIZE_PKTS = 0x00000338,
417 RX_OVERSIZE_PKTS = 0x00000340,
418 RX_JABBER_PKTS = 0x00000348,
419 RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
420 RX_DROP_EVENTS = 0x00000358,
421 RX_FCERR_PKTS = 0x00000360,
422 RX_ALIGN_ERR = 0x00000368,
423 RX_SYMBOL_ERR = 0x00000370,
424 RX_MAC_ERR = 0x00000378,
425 RX_CTL_PKTS = 0x00000380,
426 RX_PAUSE_PKTS = 0x00000384,
427 RX_64_PKTS = 0x00000390,
428 RX_65_TO_127_PKTS = 0x00000398,
429 RX_128_255_PKTS = 0x000003a0,
430 RX_256_511_PKTS = 0x000003a8,
431 RX_512_TO_1023_PKTS = 0x000003b0,
432 RX_1024_TO_1518_PKTS = 0x000003b8,
433 RX_1519_TO_MAX_PKTS = 0x000003c0,
434 RX_LEN_ERR_PKTS = 0x000003c8,
435
436 /* XGMAC MDIO control registers */
437 MDIO_TX_DATA = 0x00000400,
438 MDIO_RX_DATA = 0x00000410,
439 MDIO_CMD = 0x00000420,
440 MDIO_PHY_ADDR = 0x00000430,
441 MDIO_PORT = 0x00000440,
442 MDIO_STATUS = 0x00000450,
443
444 /* XGMAC AUX statistics registers */
445};
446
447/*
448 * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
449 */
450enum {
451 ETS_QUEUE_SHIFT = 29,
452 ETS_REF = (1 << 26),
453 ETS_RS = (1 << 27),
454 ETS_P = (1 << 28),
455 ETS_FC_COS_SHIFT = 23,
456};
457
458/*
459 * Flash Address Register (FLASH_ADDR) bit definitions.
460 */
461enum {
462 FLASH_ADDR_RDY = (1 << 31),
463 FLASH_ADDR_R = (1 << 30),
464 FLASH_ADDR_ERR = (1 << 29),
465};
466
467/*
468 * Stop CQ Processing Register (CQ_STOP) bit definitions.
469 */
470enum {
471 CQ_STOP_QUEUE_MASK = (0x007f0000),
472 CQ_STOP_TYPE_MASK = (0x03000000),
473 CQ_STOP_TYPE_START = 0x00000100,
474 CQ_STOP_TYPE_STOP = 0x00000200,
475 CQ_STOP_TYPE_READ = 0x00000300,
476 CQ_STOP_EN = (1 << 15),
477};
478
479/*
480 * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
481 */
482enum {
483 MAC_ADDR_IDX_SHIFT = 4,
484 MAC_ADDR_TYPE_SHIFT = 16,
485 MAC_ADDR_TYPE_MASK = 0x000f0000,
486 MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
487 MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
488 MAC_ADDR_TYPE_VLAN = 0x00020000,
489 MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
490 MAC_ADDR_TYPE_FC_MAC = 0x00040000,
491 MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
492 MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
493 MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
494 MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
495 MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
496 MAC_ADDR_ADR = (1 << 25),
497 MAC_ADDR_RS = (1 << 26),
498 MAC_ADDR_E = (1 << 27),
499 MAC_ADDR_MR = (1 << 30),
500 MAC_ADDR_MW = (1 << 31),
501 MAX_MULTICAST_ENTRIES = 32,
502};
503
504/*
505 * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
506 */
507enum {
508 SPLT_HDR_EP = (1 << 31),
509};
510
511/*
512 * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
513 */
514enum {
515 FC_RCV_CFG_ECT = (1 << 15),
516 FC_RCV_CFG_DFH = (1 << 20),
517 FC_RCV_CFG_DVF = (1 << 21),
518 FC_RCV_CFG_RCE = (1 << 27),
519 FC_RCV_CFG_RFE = (1 << 28),
520 FC_RCV_CFG_TEE = (1 << 29),
521 FC_RCV_CFG_TCE = (1 << 30),
522 FC_RCV_CFG_TFE = (1 << 31),
523};
524
525/*
526 * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
527 */
528enum {
529 NIC_RCV_CFG_PPE = (1 << 0),
530 NIC_RCV_CFG_VLAN_MASK = 0x00060000,
531 NIC_RCV_CFG_VLAN_ALL = 0x00000000,
532 NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
533 NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
534 NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
535 NIC_RCV_CFG_RV = (1 << 3),
536 NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
537 NIC_RCV_CFG_DFQ_SHIFT = 8,
538 NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
539};
540
541/*
542 * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
543 */
544enum {
545 MGMT_RCV_CFG_ARP = (1 << 0),
546 MGMT_RCV_CFG_DHC = (1 << 1),
547 MGMT_RCV_CFG_DHS = (1 << 2),
548 MGMT_RCV_CFG_NP = (1 << 3),
549 MGMT_RCV_CFG_I6N = (1 << 4),
550 MGMT_RCV_CFG_I6R = (1 << 5),
551 MGMT_RCV_CFG_DH6 = (1 << 6),
552 MGMT_RCV_CFG_UD1 = (1 << 7),
553 MGMT_RCV_CFG_UD0 = (1 << 8),
554 MGMT_RCV_CFG_BCT = (1 << 9),
555 MGMT_RCV_CFG_MCT = (1 << 10),
556 MGMT_RCV_CFG_DM = (1 << 11),
557 MGMT_RCV_CFG_RM = (1 << 12),
558 MGMT_RCV_CFG_STL = (1 << 13),
559 MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
560 MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
561 MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
562 MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
563 MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
564};
565
566/*
567 * Routing Index Register (RT_IDX) bit definitions.
568 */
569enum {
570 RT_IDX_IDX_SHIFT = 8,
571 RT_IDX_TYPE_MASK = 0x000f0000,
572 RT_IDX_TYPE_RT = 0x00000000,
573 RT_IDX_TYPE_RT_INV = 0x00010000,
574 RT_IDX_TYPE_NICQ = 0x00020000,
575 RT_IDX_TYPE_NICQ_INV = 0x00030000,
576 RT_IDX_DST_MASK = 0x00700000,
577 RT_IDX_DST_RSS = 0x00000000,
578 RT_IDX_DST_CAM_Q = 0x00100000,
579 RT_IDX_DST_COS_Q = 0x00200000,
580 RT_IDX_DST_DFLT_Q = 0x00300000,
581 RT_IDX_DST_DEST_Q = 0x00400000,
582 RT_IDX_RS = (1 << 26),
583 RT_IDX_E = (1 << 27),
584 RT_IDX_MR = (1 << 30),
585 RT_IDX_MW = (1 << 31),
586
587 /* Nic Queue format - type 2 bits */
588 RT_IDX_BCAST = (1 << 0),
589 RT_IDX_MCAST = (1 << 1),
590 RT_IDX_MCAST_MATCH = (1 << 2),
591 RT_IDX_MCAST_REG_MATCH = (1 << 3),
592 RT_IDX_MCAST_HASH_MATCH = (1 << 4),
593 RT_IDX_FC_MACH = (1 << 5),
594 RT_IDX_ETH_FCOE = (1 << 6),
595 RT_IDX_CAM_HIT = (1 << 7),
596 RT_IDX_CAM_BIT0 = (1 << 8),
597 RT_IDX_CAM_BIT1 = (1 << 9),
598 RT_IDX_VLAN_TAG = (1 << 10),
599 RT_IDX_VLAN_MATCH = (1 << 11),
600 RT_IDX_VLAN_FILTER = (1 << 12),
601 RT_IDX_ETH_SKIP1 = (1 << 13),
602 RT_IDX_ETH_SKIP2 = (1 << 14),
603 RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
604 RT_IDX_802_3 = (1 << 16),
605 RT_IDX_LLDP = (1 << 17),
606 RT_IDX_UNUSED018 = (1 << 18),
607 RT_IDX_UNUSED019 = (1 << 19),
608 RT_IDX_UNUSED20 = (1 << 20),
609 RT_IDX_UNUSED21 = (1 << 21),
610 RT_IDX_ERR = (1 << 22),
611 RT_IDX_VALID = (1 << 23),
612 RT_IDX_TU_CSUM_ERR = (1 << 24),
613 RT_IDX_IP_CSUM_ERR = (1 << 25),
614 RT_IDX_MAC_ERR = (1 << 26),
615 RT_IDX_RSS_TCP6 = (1 << 27),
616 RT_IDX_RSS_TCP4 = (1 << 28),
617 RT_IDX_RSS_IPV6 = (1 << 29),
618 RT_IDX_RSS_IPV4 = (1 << 30),
619 RT_IDX_RSS_MATCH = (1 << 31),
620
621 /* Hierarchy for the NIC Queue Mask */
622 RT_IDX_ALL_ERR_SLOT = 0,
623 RT_IDX_MAC_ERR_SLOT = 0,
624 RT_IDX_IP_CSUM_ERR_SLOT = 1,
625 RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
626 RT_IDX_BCAST_SLOT = 3,
627 RT_IDX_MCAST_MATCH_SLOT = 4,
628 RT_IDX_ALLMULTI_SLOT = 5,
629 RT_IDX_UNUSED6_SLOT = 6,
630 RT_IDX_UNUSED7_SLOT = 7,
631 RT_IDX_RSS_MATCH_SLOT = 8,
632 RT_IDX_RSS_IPV4_SLOT = 8,
633 RT_IDX_RSS_IPV6_SLOT = 9,
634 RT_IDX_RSS_TCP4_SLOT = 10,
635 RT_IDX_RSS_TCP6_SLOT = 11,
636 RT_IDX_CAM_HIT_SLOT = 12,
637 RT_IDX_UNUSED013 = 13,
638 RT_IDX_UNUSED014 = 14,
639 RT_IDX_PROMISCUOUS_SLOT = 15,
640 RT_IDX_MAX_SLOTS = 16,
641};
642
643/*
644 * Control Register Set Map
645 */
646enum {
647 PROC_ADDR = 0, /* Use semaphore */
648 PROC_DATA = 0x04, /* Use semaphore */
649 SYS = 0x08,
650 RST_FO = 0x0c,
651 FSC = 0x10,
652 CSR = 0x14,
653 LED = 0x18,
654 ICB_RID = 0x1c, /* Use semaphore */
655 ICB_L = 0x20, /* Use semaphore */
656 ICB_H = 0x24, /* Use semaphore */
657 CFG = 0x28,
658 BIOS_ADDR = 0x2c,
659 STS = 0x30,
660 INTR_EN = 0x34,
661 INTR_MASK = 0x38,
662 ISR1 = 0x3c,
663 ISR2 = 0x40,
664 ISR3 = 0x44,
665 ISR4 = 0x48,
666 REV_ID = 0x4c,
667 FRC_ECC_ERR = 0x50,
668 ERR_STS = 0x54,
669 RAM_DBG_ADDR = 0x58,
670 RAM_DBG_DATA = 0x5c,
671 ECC_ERR_CNT = 0x60,
672 SEM = 0x64,
673 GPIO_1 = 0x68, /* Use semaphore */
674 GPIO_2 = 0x6c, /* Use semaphore */
675 GPIO_3 = 0x70, /* Use semaphore */
676 RSVD2 = 0x74,
677 XGMAC_ADDR = 0x78, /* Use semaphore */
678 XGMAC_DATA = 0x7c, /* Use semaphore */
679 NIC_ETS = 0x80,
680 CNA_ETS = 0x84,
681 FLASH_ADDR = 0x88, /* Use semaphore */
682 FLASH_DATA = 0x8c, /* Use semaphore */
683 CQ_STOP = 0x90,
684 PAGE_TBL_RID = 0x94,
685 WQ_PAGE_TBL_LO = 0x98,
686 WQ_PAGE_TBL_HI = 0x9c,
687 CQ_PAGE_TBL_LO = 0xa0,
688 CQ_PAGE_TBL_HI = 0xa4,
689 MAC_ADDR_IDX = 0xa8, /* Use semaphore */
690 MAC_ADDR_DATA = 0xac, /* Use semaphore */
691 COS_DFLT_CQ1 = 0xb0,
692 COS_DFLT_CQ2 = 0xb4,
693 ETYPE_SKIP1 = 0xb8,
694 ETYPE_SKIP2 = 0xbc,
695 SPLT_HDR = 0xc0,
696 FC_PAUSE_THRES = 0xc4,
697 NIC_PAUSE_THRES = 0xc8,
698 FC_ETHERTYPE = 0xcc,
699 FC_RCV_CFG = 0xd0,
700 NIC_RCV_CFG = 0xd4,
701 FC_COS_TAGS = 0xd8,
702 NIC_COS_TAGS = 0xdc,
703 MGMT_RCV_CFG = 0xe0,
704 RT_IDX = 0xe4,
705 RT_DATA = 0xe8,
706 RSVD7 = 0xec,
707 XG_SERDES_ADDR = 0xf0,
708 XG_SERDES_DATA = 0xf4,
709 PRB_MX_ADDR = 0xf8, /* Use semaphore */
710 PRB_MX_DATA = 0xfc, /* Use semaphore */
711};
712
713/*
714 * CAM output format.
715 */
716enum {
717 CAM_OUT_ROUTE_FC = 0,
718 CAM_OUT_ROUTE_NIC = 1,
719 CAM_OUT_FUNC_SHIFT = 2,
720 CAM_OUT_RV = (1 << 4),
721 CAM_OUT_SH = (1 << 15),
722 CAM_OUT_CQ_ID_SHIFT = 5,
723};
724
725/*
726 * Mailbox definitions
727 */
728enum {
729 /* Asynchronous Event Notifications */
730 AEN_SYS_ERR = 0x00008002,
731 AEN_LINK_UP = 0x00008011,
732 AEN_LINK_DOWN = 0x00008012,
733 AEN_IDC_CMPLT = 0x00008100,
734 AEN_IDC_REQ = 0x00008101,
735 AEN_FW_INIT_DONE = 0x00008400,
736 AEN_FW_INIT_FAIL = 0x00008401,
737
738 /* Mailbox Command Opcodes. */
739 MB_CMD_NOP = 0x00000000,
740 MB_CMD_EX_FW = 0x00000002,
741 MB_CMD_MB_TEST = 0x00000006,
742 MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
743 MB_CMD_ABOUT_FW = 0x00000008,
744 MB_CMD_LOAD_RISC_RAM = 0x0000000b,
745 MB_CMD_DUMP_RISC_RAM = 0x0000000c,
746 MB_CMD_WRITE_RAM = 0x0000000d,
747 MB_CMD_READ_RAM = 0x0000000f,
748 MB_CMD_STOP_FW = 0x00000014,
749 MB_CMD_MAKE_SYS_ERR = 0x0000002a,
750 MB_CMD_INIT_FW = 0x00000060,
751 MB_CMD_GET_INIT_CB = 0x00000061,
752 MB_CMD_GET_FW_STATE = 0x00000069,
753 MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
754 MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
755 MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
756 MB_WOL_DISABLE = 0x00000000,
757 MB_WOL_MAGIC_PKT = 0x00000001,
758 MB_WOL_FLTR = 0x00000002,
759 MB_WOL_UCAST = 0x00000004,
760 MB_WOL_MCAST = 0x00000008,
761 MB_WOL_BCAST = 0x00000010,
762 MB_WOL_LINK_UP = 0x00000020,
763 MB_WOL_LINK_DOWN = 0x00000040,
764 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
765 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
766 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
767 MB_CMD_CLEAR_WOL_MAGIC = 0x00000114, /* Wake On Lan Magic Packet */
768 MB_CMD_PORT_RESET = 0x00000120,
769 MB_CMD_SET_PORT_CFG = 0x00000122,
770 MB_CMD_GET_PORT_CFG = 0x00000123,
771 MB_CMD_SET_ASIC_VOLTS = 0x00000130,
772 MB_CMD_GET_SNS_DATA = 0x00000131, /* Temp and Volt Sense data. */
773
774 /* Mailbox Command Status. */
775 MB_CMD_STS_GOOD = 0x00004000, /* Success. */
776 MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
777 MB_CMD_STS_ERR = 0x00004005, /* Error. */
778};
779
780struct mbox_params {
781 u32 mbox_in[MAILBOX_COUNT];
782 u32 mbox_out[MAILBOX_COUNT];
783 int in_count;
784 int out_count;
785};
786
787struct flash_params {
788 u8 dev_id_str[4];
789 u16 size;
790 u16 csum;
791 u16 ver;
792 u16 sub_dev_id;
793 u8 mac_addr[6];
794 u16 res;
795};
796
797
798/*
799 * doorbell space for the rx ring context
800 */
801struct rx_doorbell_context {
802 u32 cnsmr_idx; /* 0x00 */
803 u32 valid; /* 0x04 */
804 u32 reserved[4]; /* 0x08-0x14 */
805 u32 lbq_prod_idx; /* 0x18 */
806 u32 sbq_prod_idx; /* 0x1c */
807};
808
809/*
810 * doorbell space for the tx ring context
811 */
812struct tx_doorbell_context {
813 u32 prod_idx; /* 0x00 */
814 u32 valid; /* 0x04 */
815 u32 reserved[4]; /* 0x08-0x14 */
816 u32 lbq_prod_idx; /* 0x18 */
817 u32 sbq_prod_idx; /* 0x1c */
818};
819
820/* DATA STRUCTURES SHARED WITH HARDWARE. */
821
822struct bq_element {
823 u32 addr_lo;
824#define BQ_END 0x00000001
825#define BQ_CONT 0x00000002
826#define BQ_MASK 0x00000003
827 u32 addr_hi;
828} __attribute((packed));
829
830struct tx_buf_desc {
831 __le64 addr;
832 __le32 len;
833#define TX_DESC_LEN_MASK 0x000fffff
834#define TX_DESC_C 0x40000000
835#define TX_DESC_E 0x80000000
836} __attribute((packed));
837
838/*
839 * IOCB Definitions...
840 */
841
842#define OPCODE_OB_MAC_IOCB 0x01
843#define OPCODE_OB_MAC_TSO_IOCB 0x02
844#define OPCODE_IB_MAC_IOCB 0x20
845#define OPCODE_IB_MPI_IOCB 0x21
846#define OPCODE_IB_AE_IOCB 0x3f
847
848struct ob_mac_iocb_req {
849 u8 opcode;
850 u8 flags1;
851#define OB_MAC_IOCB_REQ_OI 0x01
852#define OB_MAC_IOCB_REQ_I 0x02
853#define OB_MAC_IOCB_REQ_D 0x08
854#define OB_MAC_IOCB_REQ_F 0x10
855 u8 flags2;
856 u8 flags3;
857#define OB_MAC_IOCB_DFP 0x02
858#define OB_MAC_IOCB_V 0x04
859 __le32 reserved1[2];
860 __le16 frame_len;
861#define OB_MAC_IOCB_LEN_MASK 0x3ffff
862 __le16 reserved2;
863 __le32 tid;
864 __le32 txq_idx;
865 __le32 reserved3;
866 __le16 vlan_tci;
867 __le16 reserved4;
868 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
869} __attribute((packed));
870
871struct ob_mac_iocb_rsp {
872 u8 opcode; /* */
873 u8 flags1; /* */
874#define OB_MAC_IOCB_RSP_OI 0x01 /* */
875#define OB_MAC_IOCB_RSP_I 0x02 /* */
876#define OB_MAC_IOCB_RSP_E 0x08 /* */
877#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
878#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
879#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
880 u8 flags2; /* */
881 u8 flags3; /* */
882#define OB_MAC_IOCB_RSP_B 0x80 /* */
883 __le32 tid;
884 __le32 txq_idx;
885 __le32 reserved[13];
886} __attribute((packed));
887
888struct ob_mac_tso_iocb_req {
889 u8 opcode;
890 u8 flags1;
891#define OB_MAC_TSO_IOCB_OI 0x01
892#define OB_MAC_TSO_IOCB_I 0x02
893#define OB_MAC_TSO_IOCB_D 0x08
894#define OB_MAC_TSO_IOCB_IP4 0x40
895#define OB_MAC_TSO_IOCB_IP6 0x80
896 u8 flags2;
897#define OB_MAC_TSO_IOCB_LSO 0x20
898#define OB_MAC_TSO_IOCB_UC 0x40
899#define OB_MAC_TSO_IOCB_TC 0x80
900 u8 flags3;
901#define OB_MAC_TSO_IOCB_IC 0x01
902#define OB_MAC_TSO_IOCB_DFP 0x02
903#define OB_MAC_TSO_IOCB_V 0x04
904 __le32 reserved1[2];
905 __le32 frame_len;
906 __le32 tid;
907 __le32 txq_idx;
908 __le16 total_hdrs_len;
909 __le16 net_trans_offset;
910#define OB_MAC_TRANSPORT_HDR_SHIFT 6
911 __le16 vlan_tci;
912 __le16 mss;
913 struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
914} __attribute((packed));
915
916struct ob_mac_tso_iocb_rsp {
917 u8 opcode;
918 u8 flags1;
919#define OB_MAC_TSO_IOCB_RSP_OI 0x01
920#define OB_MAC_TSO_IOCB_RSP_I 0x02
921#define OB_MAC_TSO_IOCB_RSP_E 0x08
922#define OB_MAC_TSO_IOCB_RSP_S 0x10
923#define OB_MAC_TSO_IOCB_RSP_L 0x20
924#define OB_MAC_TSO_IOCB_RSP_P 0x40
925 u8 flags2; /* */
926 u8 flags3; /* */
927#define OB_MAC_TSO_IOCB_RSP_B 0x8000
928 __le32 tid;
929 __le32 txq_idx;
930 __le32 reserved2[13];
931} __attribute((packed));
932
933struct ib_mac_iocb_rsp {
934 u8 opcode; /* 0x20 */
935 u8 flags1;
936#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
937#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
938#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
939#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
940#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
941#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
942#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
943#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
944#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
945#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
946#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
947 u8 flags2;
948#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
949#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
950#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
951#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
952#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
953#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
954#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
955#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
956#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
957#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
958#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
959#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
960 u8 flags3;
961#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
962#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
963#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
964#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
965#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
966#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
967#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
968#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
969#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
970#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
971#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
972 __le32 data_len; /* */
973 __le32 data_addr_lo; /* */
974 __le32 data_addr_hi; /* */
975 __le32 rss; /* */
976 __le16 vlan_id; /* 12 bits */
977#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
978#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
979
980 __le16 reserved1;
981 __le32 reserved2[6];
982 __le32 flags4;
983#define IB_MAC_IOCB_RSP_HV 0x20000000 /* */
984#define IB_MAC_IOCB_RSP_HS 0x40000000 /* */
985#define IB_MAC_IOCB_RSP_HL 0x80000000 /* */
986 __le32 hdr_len; /* */
987 __le32 hdr_addr_lo; /* */
988 __le32 hdr_addr_hi; /* */
989} __attribute((packed));
990
991struct ib_ae_iocb_rsp {
992 u8 opcode;
993 u8 flags1;
994#define IB_AE_IOCB_RSP_OI 0x01
995#define IB_AE_IOCB_RSP_I 0x02
996 u8 event;
997#define LINK_UP_EVENT 0x00
998#define LINK_DOWN_EVENT 0x01
999#define CAM_LOOKUP_ERR_EVENT 0x06
1000#define SOFT_ECC_ERROR_EVENT 0x07
1001#define MGMT_ERR_EVENT 0x08
1002#define TEN_GIG_MAC_EVENT 0x09
1003#define GPI0_H2L_EVENT 0x10
1004#define GPI0_L2H_EVENT 0x20
1005#define GPI1_H2L_EVENT 0x11
1006#define GPI1_L2H_EVENT 0x21
1007#define PCI_ERR_ANON_BUF_RD 0x40
1008 u8 q_id;
1009 __le32 reserved[15];
1010} __attribute((packed));
1011
1012/*
1013 * These three structures are for generic
1014 * handling of ib and ob iocbs.
1015 */
1016struct ql_net_rsp_iocb {
1017 u8 opcode;
1018 u8 flags0;
1019 __le16 length;
1020 __le32 tid;
1021 __le32 reserved[14];
1022} __attribute((packed));
1023
1024struct net_req_iocb {
1025 u8 opcode;
1026 u8 flags0;
1027 __le16 flags1;
1028 __le32 tid;
1029 __le32 reserved1[30];
1030} __attribute((packed));
1031
1032/*
1033 * tx ring initialization control block for chip.
1034 * It is defined as:
1035 * "Work Queue Initialization Control Block"
1036 */
1037struct wqicb {
1038 __le16 len;
1039#define Q_LEN_V (1 << 4)
1040#define Q_LEN_CPP_CONT 0x0000
1041#define Q_LEN_CPP_16 0x0001
1042#define Q_LEN_CPP_32 0x0002
1043#define Q_LEN_CPP_64 0x0003
1044 __le16 flags;
1045#define Q_PRI_SHIFT 1
1046#define Q_FLAGS_LC 0x1000
1047#define Q_FLAGS_LB 0x2000
1048#define Q_FLAGS_LI 0x4000
1049#define Q_FLAGS_LO 0x8000
1050 __le16 cq_id_rss;
1051#define Q_CQ_ID_RSS_RV 0x8000
1052 __le16 rid;
1053 __le32 addr_lo;
1054 __le32 addr_hi;
1055 __le32 cnsmr_idx_addr_lo;
1056 __le32 cnsmr_idx_addr_hi;
1057} __attribute((packed));
1058
1059/*
1060 * rx ring initialization control block for chip.
1061 * It is defined as:
1062 * "Completion Queue Initialization Control Block"
1063 */
1064struct cqicb {
1065 u8 msix_vect;
1066 u8 reserved1;
1067 u8 reserved2;
1068 u8 flags;
1069#define FLAGS_LV 0x08
1070#define FLAGS_LS 0x10
1071#define FLAGS_LL 0x20
1072#define FLAGS_LI 0x40
1073#define FLAGS_LC 0x80
1074 __le16 len;
1075#define LEN_V (1 << 4)
1076#define LEN_CPP_CONT 0x0000
1077#define LEN_CPP_32 0x0001
1078#define LEN_CPP_64 0x0002
1079#define LEN_CPP_128 0x0003
1080 __le16 rid;
1081 __le32 addr_lo;
1082 __le32 addr_hi;
1083 __le32 prod_idx_addr_lo;
1084 __le32 prod_idx_addr_hi;
1085 __le16 pkt_delay;
1086 __le16 irq_delay;
1087 __le32 lbq_addr_lo;
1088 __le32 lbq_addr_hi;
1089 __le16 lbq_buf_size;
1090 __le16 lbq_len; /* entry count */
1091 __le32 sbq_addr_lo;
1092 __le32 sbq_addr_hi;
1093 __le16 sbq_buf_size;
1094 __le16 sbq_len; /* entry count */
1095} __attribute((packed));
1096
1097struct ricb {
1098 u8 base_cq;
1099#define RSS_L4K 0x80
1100 u8 flags;
1101#define RSS_L6K 0x01
1102#define RSS_LI 0x02
1103#define RSS_LB 0x04
1104#define RSS_LM 0x08
1105#define RSS_RI4 0x10
1106#define RSS_RT4 0x20
1107#define RSS_RI6 0x40
1108#define RSS_RT6 0x80
1109 __le16 mask;
1110 __le32 hash_cq_id[256];
1111 __le32 ipv6_hash_key[10];
1112 __le32 ipv4_hash_key[4];
1113} __attribute((packed));
1114
1115/* SOFTWARE/DRIVER DATA STRUCTURES. */
1116
1117struct oal {
1118 struct tx_buf_desc oal[TX_DESC_PER_OAL];
1119};
1120
1121struct map_list {
1122 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1123 DECLARE_PCI_UNMAP_LEN(maplen);
1124};
1125
1126struct tx_ring_desc {
1127 struct sk_buff *skb;
1128 struct ob_mac_iocb_req *queue_entry;
1129 int index;
1130 struct oal oal;
1131 struct map_list map[MAX_SKB_FRAGS + 1];
1132 int map_cnt;
1133 struct tx_ring_desc *next;
1134};
1135
1136struct bq_desc {
1137 union {
1138 struct page *lbq_page;
1139 struct sk_buff *skb;
1140 } p;
1141 struct bq_element *bq;
1142 int index;
1143 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1144 DECLARE_PCI_UNMAP_LEN(maplen);
1145};
1146
1147#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
1148
1149struct tx_ring {
1150 /*
1151 * queue info.
1152 */
1153 struct wqicb wqicb; /* structure used to inform chip of new queue */
1154 void *wq_base; /* pci_alloc:virtual addr for tx */
1155 dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
1156 u32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
1157 dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
1158 u32 wq_size; /* size in bytes of queue area */
1159 u32 wq_len; /* number of entries in queue */
1160 void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
1161 void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
1162 u16 prod_idx; /* current value for prod idx */
1163 u16 cq_id; /* completion (rx) queue for tx completions */
1164 u8 wq_id; /* queue id for this entry */
1165 u8 reserved1[3];
1166 struct tx_ring_desc *q; /* descriptor list for the queue */
1167 spinlock_t lock;
1168 atomic_t tx_count; /* counts down for every outstanding IO */
1169 atomic_t queue_stopped; /* Turns queue off when full. */
1170 struct delayed_work tx_work;
1171 struct ql_adapter *qdev;
1172};
1173
1174/*
1175 * Type of inbound queue.
1176 */
1177enum {
1178 DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
1179 TX_Q = 3, /* Handles outbound completions. */
1180 RX_Q = 4, /* Handles inbound completions. */
1181};
1182
1183struct rx_ring {
1184 struct cqicb cqicb; /* The chip's completion queue init control block. */
1185
1186 /* Completion queue elements. */
1187 void *cq_base;
1188 dma_addr_t cq_base_dma;
1189 u32 cq_size;
1190 u32 cq_len;
1191 u16 cq_id;
1192 u32 *prod_idx_sh_reg; /* Shadowed producer register. */
1193 dma_addr_t prod_idx_sh_reg_dma;
1194 void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
1195 u32 cnsmr_idx; /* current sw idx */
1196 struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
1197 void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
1198
1199 /* Large buffer queue elements. */
1200 u32 lbq_len; /* entry count */
1201 u32 lbq_size; /* size in bytes of queue */
1202 u32 lbq_buf_size;
1203 void *lbq_base;
1204 dma_addr_t lbq_base_dma;
1205 void *lbq_base_indirect;
1206 dma_addr_t lbq_base_indirect_dma;
1207 struct bq_desc *lbq; /* array of control blocks */
1208 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
1209 u32 lbq_prod_idx; /* current sw prod idx */
1210 u32 lbq_curr_idx; /* next entry we expect */
1211 u32 lbq_clean_idx; /* beginning of new descs */
1212 u32 lbq_free_cnt; /* free buffer desc cnt */
1213
1214 /* Small buffer queue elements. */
1215 u32 sbq_len; /* entry count */
1216 u32 sbq_size; /* size in bytes of queue */
1217 u32 sbq_buf_size;
1218 void *sbq_base;
1219 dma_addr_t sbq_base_dma;
1220 void *sbq_base_indirect;
1221 dma_addr_t sbq_base_indirect_dma;
1222 struct bq_desc *sbq; /* array of control blocks */
1223 void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
1224 u32 sbq_prod_idx; /* current sw prod idx */
1225 u32 sbq_curr_idx; /* next entry we expect */
1226 u32 sbq_clean_idx; /* beginning of new descs */
1227 u32 sbq_free_cnt; /* free buffer desc cnt */
1228
1229 /* Misc. handler elements. */
1230 u32 type; /* Type of queue, tx, rx, or default. */
1231 u32 irq; /* Which vector this ring is assigned. */
1232 u32 cpu; /* Which CPU this should run on. */
1233 char name[IFNAMSIZ + 5];
1234 struct napi_struct napi;
1235 struct delayed_work rx_work;
1236 u8 reserved;
1237 struct ql_adapter *qdev;
1238};
1239
1240/*
1241 * RSS Initialization Control Block
1242 */
1243struct hash_id {
1244 u8 value[4];
1245};
1246
1247struct nic_stats {
1248 /*
1249 * These stats come from offset 200h to 278h
1250 * in the XGMAC register.
1251 */
1252 u64 tx_pkts;
1253 u64 tx_bytes;
1254 u64 tx_mcast_pkts;
1255 u64 tx_bcast_pkts;
1256 u64 tx_ucast_pkts;
1257 u64 tx_ctl_pkts;
1258 u64 tx_pause_pkts;
1259 u64 tx_64_pkt;
1260 u64 tx_65_to_127_pkt;
1261 u64 tx_128_to_255_pkt;
1262 u64 tx_256_511_pkt;
1263 u64 tx_512_to_1023_pkt;
1264 u64 tx_1024_to_1518_pkt;
1265 u64 tx_1519_to_max_pkt;
1266 u64 tx_undersize_pkt;
1267 u64 tx_oversize_pkt;
1268
1269 /*
1270 * These stats come from offset 300h to 3C8h
1271 * in the XGMAC register.
1272 */
1273 u64 rx_bytes;
1274 u64 rx_bytes_ok;
1275 u64 rx_pkts;
1276 u64 rx_pkts_ok;
1277 u64 rx_bcast_pkts;
1278 u64 rx_mcast_pkts;
1279 u64 rx_ucast_pkts;
1280 u64 rx_undersize_pkts;
1281 u64 rx_oversize_pkts;
1282 u64 rx_jabber_pkts;
1283 u64 rx_undersize_fcerr_pkts;
1284 u64 rx_drop_events;
1285 u64 rx_fcerr_pkts;
1286 u64 rx_align_err;
1287 u64 rx_symbol_err;
1288 u64 rx_mac_err;
1289 u64 rx_ctl_pkts;
1290 u64 rx_pause_pkts;
1291 u64 rx_64_pkts;
1292 u64 rx_65_to_127_pkts;
1293 u64 rx_128_255_pkts;
1294 u64 rx_256_511_pkts;
1295 u64 rx_512_to_1023_pkts;
1296 u64 rx_1024_to_1518_pkts;
1297 u64 rx_1519_to_max_pkts;
1298 u64 rx_len_err_pkts;
1299};
1300
1301/*
1302 * intr_context structure is used during initialization
1303 * to hook the interrupts. It is also used in a single
1304 * irq environment as a context to the ISR.
1305 */
1306struct intr_context {
1307 struct ql_adapter *qdev;
1308 u32 intr;
1309 u32 hooked;
1310 u32 intr_en_mask; /* value/mask used to enable this intr */
1311 u32 intr_dis_mask; /* value/mask used to disable this intr */
1312 u32 intr_read_mask; /* value/mask used to read this intr */
1313 char name[IFNAMSIZ * 2];
1314 atomic_t irq_cnt; /* irq_cnt is used in single vector
1315 * environment. It's incremented for each
1316 * irq handler that is scheduled. When each
1317 * handler finishes it decrements irq_cnt and
1318 * enables interrupts if it's zero. */
1319 irq_handler_t handler;
1320};
1321
1322/* adapter flags definitions. */
1323enum {
1324 QL_ADAPTER_UP = (1 << 0), /* Adapter has been brought up. */
1325 QL_LEGACY_ENABLED = (1 << 3),
1326 QL_MSI_ENABLED = (1 << 3),
1327 QL_MSIX_ENABLED = (1 << 4),
1328 QL_DMA64 = (1 << 5),
1329 QL_PROMISCUOUS = (1 << 6),
1330 QL_ALLMULTI = (1 << 7),
1331};
1332
1333/* link_status bit definitions */
1334enum {
1335 LOOPBACK_MASK = 0x00000700,
1336 LOOPBACK_PCS = 0x00000100,
1337 LOOPBACK_HSS = 0x00000200,
1338 LOOPBACK_EXT = 0x00000300,
1339 PAUSE_MASK = 0x000000c0,
1340 PAUSE_STD = 0x00000040,
1341 PAUSE_PRI = 0x00000080,
1342 SPEED_MASK = 0x00000038,
1343 SPEED_100Mb = 0x00000000,
1344 SPEED_1Gb = 0x00000008,
1345 SPEED_10Gb = 0x00000010,
1346 LINK_TYPE_MASK = 0x00000007,
1347 LINK_TYPE_XFI = 0x00000001,
1348 LINK_TYPE_XAUI = 0x00000002,
1349 LINK_TYPE_XFI_BP = 0x00000003,
1350 LINK_TYPE_XAUI_BP = 0x00000004,
1351 LINK_TYPE_10GBASET = 0x00000005,
1352};
1353
1354/*
1355 * The main Adapter structure definition.
1356 * This structure has all fields relevant to the hardware.
1357 */
1358struct ql_adapter {
1359 struct ricb ricb;
1360 unsigned long flags;
1361 u32 wol;
1362
1363 struct nic_stats nic_stats;
1364
1365 struct vlan_group *vlgrp;
1366
1367 /* PCI Configuration information for this device */
1368 struct pci_dev *pdev;
1369 struct net_device *ndev; /* Parent NET device */
1370
1371 /* Hardware information */
1372 u32 chip_rev_id;
1373 u32 func; /* PCI function for this adapter */
1374
1375 spinlock_t adapter_lock;
1376 spinlock_t hw_lock;
1377 spinlock_t stats_lock;
1378 spinlock_t legacy_lock; /* used for maintaining legacy intr sync */
1379
1380 /* PCI Bus Relative Register Addresses */
1381 void __iomem *reg_base;
1382 void __iomem *doorbell_area;
1383 u32 doorbell_area_size;
1384
1385 u32 msg_enable;
1386
1387 /* Page for Shadow Registers */
1388 void *rx_ring_shadow_reg_area;
1389 dma_addr_t rx_ring_shadow_reg_dma;
1390 void *tx_ring_shadow_reg_area;
1391 dma_addr_t tx_ring_shadow_reg_dma;
1392
1393 u32 mailbox_in;
1394 u32 mailbox_out;
1395
1396 int tx_ring_size;
1397 int rx_ring_size;
1398 u32 intr_count;
1399 struct msix_entry *msi_x_entry;
1400 struct intr_context intr_context[MAX_RX_RINGS];
1401
1402 int (*legacy_check) (struct ql_adapter *);
1403
1404 int tx_ring_count; /* One per online CPU. */
1405 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
1406 u32 rss_ring_count; /* One per online CPU. */
1407 /*
1408 * rx_ring_count =
1409 * one default queue +
1410 * (CPU count * outbound completion rx_ring) +
1411 * (CPU count * inbound (RSS) completion rx_ring)
1412 */
1413 int rx_ring_count;
1414 int ring_mem_size;
1415 void *ring_mem;
1416 struct rx_ring *rx_ring;
1417 int rx_csum;
1418 struct tx_ring *tx_ring;
1419 u32 default_rx_queue;
1420
1421 u16 rx_coalesce_usecs; /* cqicb->int_delay */
1422 u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
1423 u16 tx_coalesce_usecs; /* cqicb->int_delay */
1424 u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
1425
1426 u32 xg_sem_mask;
1427 u32 port_link_up;
1428 u32 port_init;
1429 u32 link_status;
1430
1431 struct flash_params flash;
1432
1433 struct net_device_stats stats;
1434 struct workqueue_struct *q_workqueue;
1435 struct workqueue_struct *workqueue;
1436 struct delayed_work asic_reset_work;
1437 struct delayed_work mpi_reset_work;
1438 struct delayed_work mpi_work;
1439};
1440
1441/*
1442 * Typical Register accessor for memory mapped device.
1443 */
1444static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
1445{
1446 return readl(qdev->reg_base + reg);
1447}
1448
1449/*
1450 * Typical Register accessor for memory mapped device.
1451 */
1452static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
1453{
1454 writel(val, qdev->reg_base + reg);
1455}
1456
1457/*
1458 * Doorbell Registers:
1459 * Doorbell registers are virtual registers in the PCI memory space.
1460 * The space is allocated by the chip during PCI initialization. The
1461 * device driver finds the doorbell address in BAR 3 in PCI config space.
1462 * The registers are used to control outbound and inbound queues. For
1463 * example, the producer index for an outbound queue. Each queue uses
1464 * 1 4k chunk of memory. The lower half of the space is for outbound
1465 * queues. The upper half is for inbound queues.
1466 */
1467static inline void ql_write_db_reg(u32 val, void __iomem *addr)
1468{
1469 writel(val, addr);
1470 mmiowb();
1471}
1472
1473/*
1474 * Shadow Registers:
1475 * Outbound queues have a consumer index that is maintained by the chip.
1476 * Inbound queues have a producer index that is maintained by the chip.
1477 * For lower overhead, these registers are "shadowed" to host memory
1478 * which allows the device driver to track the queue progress without
1479 * PCI reads. When an entry is placed on an inbound queue, the chip will
1480 * update the relevant index register and then copy the value to the
1481 * shadow register in host memory.
1482 */
1483static inline unsigned int ql_read_sh_reg(const volatile void *addr)
1484{
1485 return *(volatile unsigned int __force *)addr;
1486}
1487
1488extern char qlge_driver_name[];
1489extern const char qlge_driver_version[];
1490extern const struct ethtool_ops qlge_ethtool_ops;
1491
1492extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
1493extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
1494extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
1495extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
1496 u32 *value);
1497extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
1498extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
1499 u16 q_id);
1500void ql_queue_fw_error(struct ql_adapter *qdev);
1501void ql_mpi_work(struct work_struct *work);
1502void ql_mpi_reset_work(struct work_struct *work);
1503int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
1504void ql_queue_asic_error(struct ql_adapter *qdev);
1505void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
1506void ql_set_ethtool_ops(struct net_device *ndev);
1507int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
1508
1509#if 1
1510#define QL_ALL_DUMP
1511#define QL_REG_DUMP
1512#define QL_DEV_DUMP
1513#define QL_CB_DUMP
1514/* #define QL_IB_DUMP */
1515/* #define QL_OB_DUMP */
1516#endif
1517
1518#ifdef QL_REG_DUMP
1519extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
1520extern void ql_dump_routing_entries(struct ql_adapter *qdev);
1521extern void ql_dump_regs(struct ql_adapter *qdev);
1522#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
1523#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
1524#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
1525#else
1526#define QL_DUMP_REGS(qdev)
1527#define QL_DUMP_ROUTE(qdev)
1528#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
1529#endif
1530
1531#ifdef QL_STAT_DUMP
1532extern void ql_dump_stat(struct ql_adapter *qdev);
1533#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
1534#else
1535#define QL_DUMP_STAT(qdev)
1536#endif
1537
1538#ifdef QL_DEV_DUMP
1539extern void ql_dump_qdev(struct ql_adapter *qdev);
1540#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
1541#else
1542#define QL_DUMP_QDEV(qdev)
1543#endif
1544
1545#ifdef QL_CB_DUMP
1546extern void ql_dump_wqicb(struct wqicb *wqicb);
1547extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
1548extern void ql_dump_ricb(struct ricb *ricb);
1549extern void ql_dump_cqicb(struct cqicb *cqicb);
1550extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
1551extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
1552#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
1553#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
1554#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
1555#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
1556#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
1557#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
1558 ql_dump_hw_cb(qdev, size, bit, q_id)
1559#else
1560#define QL_DUMP_RICB(ricb)
1561#define QL_DUMP_WQICB(wqicb)
1562#define QL_DUMP_TX_RING(tx_ring)
1563#define QL_DUMP_CQICB(cqicb)
1564#define QL_DUMP_RX_RING(rx_ring)
1565#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
1566#endif
1567
1568#ifdef QL_OB_DUMP
1569extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
1570extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
1571extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
1572#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
1573#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
1574#else
1575#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
1576#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
1577#endif
1578
1579#ifdef QL_IB_DUMP
1580extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
1581#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
1582#else
1583#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
1584#endif
1585
1586#ifdef QL_ALL_DUMP
1587extern void ql_dump_all(struct ql_adapter *qdev);
1588#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
1589#else
1590#define QL_DUMP_ALL(qdev)
1591#endif
1592
1593#endif /* _QLGE_H_ */
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
new file mode 100644
index 000000000000..47df304a02c8
--- /dev/null
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -0,0 +1,858 @@
1#include "qlge.h"
2
3#ifdef QL_REG_DUMP
4static void ql_dump_intr_states(struct ql_adapter *qdev)
5{
6 int i;
7 u32 value;
8 for (i = 0; i < qdev->intr_count; i++) {
9 ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
10 value = ql_read32(qdev, INTR_EN);
11 printk(KERN_ERR PFX
12 "%s: Interrupt %d is %s.\n",
13 qdev->ndev->name, i,
14 (value & INTR_EN_EN ? "enabled" : "disabled"));
15 }
16}
17
18void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
19{
20 u32 data;
21 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
22 printk(KERN_ERR "%s: Couldn't get xgmac sem.\n", __func__);
23 return;
24 }
25 ql_read_xgmac_reg(qdev, PAUSE_SRC_LO, &data);
26 printk(KERN_ERR PFX "%s: PAUSE_SRC_LO = 0x%.08x.\n", qdev->ndev->name,
27 data);
28 ql_read_xgmac_reg(qdev, PAUSE_SRC_HI, &data);
29 printk(KERN_ERR PFX "%s: PAUSE_SRC_HI = 0x%.08x.\n", qdev->ndev->name,
30 data);
31 ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
32 printk(KERN_ERR PFX "%s: GLOBAL_CFG = 0x%.08x.\n", qdev->ndev->name,
33 data);
34 ql_read_xgmac_reg(qdev, TX_CFG, &data);
35 printk(KERN_ERR PFX "%s: TX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
36 ql_read_xgmac_reg(qdev, RX_CFG, &data);
37 printk(KERN_ERR PFX "%s: RX_CFG = 0x%.08x.\n", qdev->ndev->name, data);
38 ql_read_xgmac_reg(qdev, FLOW_CTL, &data);
39 printk(KERN_ERR PFX "%s: FLOW_CTL = 0x%.08x.\n", qdev->ndev->name,
40 data);
41 ql_read_xgmac_reg(qdev, PAUSE_OPCODE, &data);
42 printk(KERN_ERR PFX "%s: PAUSE_OPCODE = 0x%.08x.\n", qdev->ndev->name,
43 data);
44 ql_read_xgmac_reg(qdev, PAUSE_TIMER, &data);
45 printk(KERN_ERR PFX "%s: PAUSE_TIMER = 0x%.08x.\n", qdev->ndev->name,
46 data);
47 ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_LO, &data);
48 printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_LO = 0x%.08x.\n",
49 qdev->ndev->name, data);
50 ql_read_xgmac_reg(qdev, PAUSE_FRM_DEST_HI, &data);
51 printk(KERN_ERR PFX "%s: PAUSE_FRM_DEST_HI = 0x%.08x.\n",
52 qdev->ndev->name, data);
53 ql_read_xgmac_reg(qdev, MAC_TX_PARAMS, &data);
54 printk(KERN_ERR PFX "%s: MAC_TX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
55 data);
56 ql_read_xgmac_reg(qdev, MAC_RX_PARAMS, &data);
57 printk(KERN_ERR PFX "%s: MAC_RX_PARAMS = 0x%.08x.\n", qdev->ndev->name,
58 data);
59 ql_read_xgmac_reg(qdev, MAC_SYS_INT, &data);
60 printk(KERN_ERR PFX "%s: MAC_SYS_INT = 0x%.08x.\n", qdev->ndev->name,
61 data);
62 ql_read_xgmac_reg(qdev, MAC_SYS_INT_MASK, &data);
63 printk(KERN_ERR PFX "%s: MAC_SYS_INT_MASK = 0x%.08x.\n",
64 qdev->ndev->name, data);
65 ql_read_xgmac_reg(qdev, MAC_MGMT_INT, &data);
66 printk(KERN_ERR PFX "%s: MAC_MGMT_INT = 0x%.08x.\n", qdev->ndev->name,
67 data);
68 ql_read_xgmac_reg(qdev, MAC_MGMT_IN_MASK, &data);
69 printk(KERN_ERR PFX "%s: MAC_MGMT_IN_MASK = 0x%.08x.\n",
70 qdev->ndev->name, data);
71 ql_read_xgmac_reg(qdev, EXT_ARB_MODE, &data);
72 printk(KERN_ERR PFX "%s: EXT_ARB_MODE = 0x%.08x.\n", qdev->ndev->name,
73 data);
74 ql_sem_unlock(qdev, qdev->xg_sem_mask);
75
76}
77
78static void ql_dump_ets_regs(struct ql_adapter *qdev)
79{
80}
81
82static void ql_dump_cam_entries(struct ql_adapter *qdev)
83{
84 int i;
85 u32 value[3];
86 for (i = 0; i < 4; i++) {
87 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
88 printk(KERN_ERR PFX
89 "%s: Failed read of mac index register.\n",
90 __func__);
91 return;
92 } else {
93 if (value[0])
94 printk(KERN_ERR PFX
95 "%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x.\n",
96 qdev->ndev->name, i, value[1], value[0],
97 value[2]);
98 }
99 }
100 for (i = 0; i < 32; i++) {
101 if (ql_get_mac_addr_reg
102 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
103 printk(KERN_ERR PFX
104 "%s: Failed read of mac index register.\n",
105 __func__);
106 return;
107 } else {
108 if (value[0])
109 printk(KERN_ERR PFX
110 "%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x.\n",
111 qdev->ndev->name, i, value[1], value[0]);
112 }
113 }
114}
115
116void ql_dump_routing_entries(struct ql_adapter *qdev)
117{
118 int i;
119 u32 value;
120 for (i = 0; i < 16; i++) {
121 value = 0;
122 if (ql_get_routing_reg(qdev, i, &value)) {
123 printk(KERN_ERR PFX
124 "%s: Failed read of routing index register.\n",
125 __func__);
126 return;
127 } else {
128 if (value)
129 printk(KERN_ERR PFX
130 "%s: Routing Mask %d = 0x%.08x.\n",
131 qdev->ndev->name, i, value);
132 }
133 }
134}
135
136void ql_dump_regs(struct ql_adapter *qdev)
137{
138 printk(KERN_ERR PFX "reg dump for function #%d.\n", qdev->func);
139 printk(KERN_ERR PFX "SYS = 0x%x.\n",
140 ql_read32(qdev, SYS));
141 printk(KERN_ERR PFX "RST_FO = 0x%x.\n",
142 ql_read32(qdev, RST_FO));
143 printk(KERN_ERR PFX "FSC = 0x%x.\n",
144 ql_read32(qdev, FSC));
145 printk(KERN_ERR PFX "CSR = 0x%x.\n",
146 ql_read32(qdev, CSR));
147 printk(KERN_ERR PFX "ICB_RID = 0x%x.\n",
148 ql_read32(qdev, ICB_RID));
149 printk(KERN_ERR PFX "ICB_L = 0x%x.\n",
150 ql_read32(qdev, ICB_L));
151 printk(KERN_ERR PFX "ICB_H = 0x%x.\n",
152 ql_read32(qdev, ICB_H));
153 printk(KERN_ERR PFX "CFG = 0x%x.\n",
154 ql_read32(qdev, CFG));
155 printk(KERN_ERR PFX "BIOS_ADDR = 0x%x.\n",
156 ql_read32(qdev, BIOS_ADDR));
157 printk(KERN_ERR PFX "STS = 0x%x.\n",
158 ql_read32(qdev, STS));
159 printk(KERN_ERR PFX "INTR_EN = 0x%x.\n",
160 ql_read32(qdev, INTR_EN));
161 printk(KERN_ERR PFX "INTR_MASK = 0x%x.\n",
162 ql_read32(qdev, INTR_MASK));
163 printk(KERN_ERR PFX "ISR1 = 0x%x.\n",
164 ql_read32(qdev, ISR1));
165 printk(KERN_ERR PFX "ISR2 = 0x%x.\n",
166 ql_read32(qdev, ISR2));
167 printk(KERN_ERR PFX "ISR3 = 0x%x.\n",
168 ql_read32(qdev, ISR3));
169 printk(KERN_ERR PFX "ISR4 = 0x%x.\n",
170 ql_read32(qdev, ISR4));
171 printk(KERN_ERR PFX "REV_ID = 0x%x.\n",
172 ql_read32(qdev, REV_ID));
173 printk(KERN_ERR PFX "FRC_ECC_ERR = 0x%x.\n",
174 ql_read32(qdev, FRC_ECC_ERR));
175 printk(KERN_ERR PFX "ERR_STS = 0x%x.\n",
176 ql_read32(qdev, ERR_STS));
177 printk(KERN_ERR PFX "RAM_DBG_ADDR = 0x%x.\n",
178 ql_read32(qdev, RAM_DBG_ADDR));
179 printk(KERN_ERR PFX "RAM_DBG_DATA = 0x%x.\n",
180 ql_read32(qdev, RAM_DBG_DATA));
181 printk(KERN_ERR PFX "ECC_ERR_CNT = 0x%x.\n",
182 ql_read32(qdev, ECC_ERR_CNT));
183 printk(KERN_ERR PFX "SEM = 0x%x.\n",
184 ql_read32(qdev, SEM));
185 printk(KERN_ERR PFX "GPIO_1 = 0x%x.\n",
186 ql_read32(qdev, GPIO_1));
187 printk(KERN_ERR PFX "GPIO_2 = 0x%x.\n",
188 ql_read32(qdev, GPIO_2));
189 printk(KERN_ERR PFX "GPIO_3 = 0x%x.\n",
190 ql_read32(qdev, GPIO_3));
191 printk(KERN_ERR PFX "XGMAC_ADDR = 0x%x.\n",
192 ql_read32(qdev, XGMAC_ADDR));
193 printk(KERN_ERR PFX "XGMAC_DATA = 0x%x.\n",
194 ql_read32(qdev, XGMAC_DATA));
195 printk(KERN_ERR PFX "NIC_ETS = 0x%x.\n",
196 ql_read32(qdev, NIC_ETS));
197 printk(KERN_ERR PFX "CNA_ETS = 0x%x.\n",
198 ql_read32(qdev, CNA_ETS));
199 printk(KERN_ERR PFX "FLASH_ADDR = 0x%x.\n",
200 ql_read32(qdev, FLASH_ADDR));
201 printk(KERN_ERR PFX "FLASH_DATA = 0x%x.\n",
202 ql_read32(qdev, FLASH_DATA));
203 printk(KERN_ERR PFX "CQ_STOP = 0x%x.\n",
204 ql_read32(qdev, CQ_STOP));
205 printk(KERN_ERR PFX "PAGE_TBL_RID = 0x%x.\n",
206 ql_read32(qdev, PAGE_TBL_RID));
207 printk(KERN_ERR PFX "WQ_PAGE_TBL_LO = 0x%x.\n",
208 ql_read32(qdev, WQ_PAGE_TBL_LO));
209 printk(KERN_ERR PFX "WQ_PAGE_TBL_HI = 0x%x.\n",
210 ql_read32(qdev, WQ_PAGE_TBL_HI));
211 printk(KERN_ERR PFX "CQ_PAGE_TBL_LO = 0x%x.\n",
212 ql_read32(qdev, CQ_PAGE_TBL_LO));
213 printk(KERN_ERR PFX "CQ_PAGE_TBL_HI = 0x%x.\n",
214 ql_read32(qdev, CQ_PAGE_TBL_HI));
215 printk(KERN_ERR PFX "COS_DFLT_CQ1 = 0x%x.\n",
216 ql_read32(qdev, COS_DFLT_CQ1));
217 printk(KERN_ERR PFX "COS_DFLT_CQ2 = 0x%x.\n",
218 ql_read32(qdev, COS_DFLT_CQ2));
219 printk(KERN_ERR PFX "SPLT_HDR = 0x%x.\n",
220 ql_read32(qdev, SPLT_HDR));
221 printk(KERN_ERR PFX "FC_PAUSE_THRES = 0x%x.\n",
222 ql_read32(qdev, FC_PAUSE_THRES));
223 printk(KERN_ERR PFX "NIC_PAUSE_THRES = 0x%x.\n",
224 ql_read32(qdev, NIC_PAUSE_THRES));
225 printk(KERN_ERR PFX "FC_ETHERTYPE = 0x%x.\n",
226 ql_read32(qdev, FC_ETHERTYPE));
227 printk(KERN_ERR PFX "FC_RCV_CFG = 0x%x.\n",
228 ql_read32(qdev, FC_RCV_CFG));
229 printk(KERN_ERR PFX "NIC_RCV_CFG = 0x%x.\n",
230 ql_read32(qdev, NIC_RCV_CFG));
231 printk(KERN_ERR PFX "FC_COS_TAGS = 0x%x.\n",
232 ql_read32(qdev, FC_COS_TAGS));
233 printk(KERN_ERR PFX "NIC_COS_TAGS = 0x%x.\n",
234 ql_read32(qdev, NIC_COS_TAGS));
235 printk(KERN_ERR PFX "MGMT_RCV_CFG = 0x%x.\n",
236 ql_read32(qdev, MGMT_RCV_CFG));
237 printk(KERN_ERR PFX "XG_SERDES_ADDR = 0x%x.\n",
238 ql_read32(qdev, XG_SERDES_ADDR));
239 printk(KERN_ERR PFX "XG_SERDES_DATA = 0x%x.\n",
240 ql_read32(qdev, XG_SERDES_DATA));
241 printk(KERN_ERR PFX "PRB_MX_ADDR = 0x%x.\n",
242 ql_read32(qdev, PRB_MX_ADDR));
243 printk(KERN_ERR PFX "PRB_MX_DATA = 0x%x.\n",
244 ql_read32(qdev, PRB_MX_DATA));
245 ql_dump_intr_states(qdev);
246 ql_dump_xgmac_control_regs(qdev);
247 ql_dump_ets_regs(qdev);
248 ql_dump_cam_entries(qdev);
249 ql_dump_routing_entries(qdev);
250}
251#endif
252
253#ifdef QL_STAT_DUMP
254void ql_dump_stat(struct ql_adapter *qdev)
255{
256 printk(KERN_ERR "%s: Enter.\n", __func__);
257 printk(KERN_ERR "tx_pkts = %ld\n",
258 (unsigned long)qdev->nic_stats.tx_pkts);
259 printk(KERN_ERR "tx_bytes = %ld\n",
260 (unsigned long)qdev->nic_stats.tx_bytes);
261 printk(KERN_ERR "tx_mcast_pkts = %ld.\n",
262 (unsigned long)qdev->nic_stats.tx_mcast_pkts);
263 printk(KERN_ERR "tx_bcast_pkts = %ld.\n",
264 (unsigned long)qdev->nic_stats.tx_bcast_pkts);
265 printk(KERN_ERR "tx_ucast_pkts = %ld.\n",
266 (unsigned long)qdev->nic_stats.tx_ucast_pkts);
267 printk(KERN_ERR "tx_ctl_pkts = %ld.\n",
268 (unsigned long)qdev->nic_stats.tx_ctl_pkts);
269 printk(KERN_ERR "tx_pause_pkts = %ld.\n",
270 (unsigned long)qdev->nic_stats.tx_pause_pkts);
271 printk(KERN_ERR "tx_64_pkt = %ld.\n",
272 (unsigned long)qdev->nic_stats.tx_64_pkt);
273 printk(KERN_ERR "tx_65_to_127_pkt = %ld.\n",
274 (unsigned long)qdev->nic_stats.tx_65_to_127_pkt);
275 printk(KERN_ERR "tx_128_to_255_pkt = %ld.\n",
276 (unsigned long)qdev->nic_stats.tx_128_to_255_pkt);
277 printk(KERN_ERR "tx_256_511_pkt = %ld.\n",
278 (unsigned long)qdev->nic_stats.tx_256_511_pkt);
279 printk(KERN_ERR "tx_512_to_1023_pkt = %ld.\n",
280 (unsigned long)qdev->nic_stats.tx_512_to_1023_pkt);
281 printk(KERN_ERR "tx_1024_to_1518_pkt = %ld.\n",
282 (unsigned long)qdev->nic_stats.tx_1024_to_1518_pkt);
283 printk(KERN_ERR "tx_1519_to_max_pkt = %ld.\n",
284 (unsigned long)qdev->nic_stats.tx_1519_to_max_pkt);
285 printk(KERN_ERR "tx_undersize_pkt = %ld.\n",
286 (unsigned long)qdev->nic_stats.tx_undersize_pkt);
287 printk(KERN_ERR "tx_oversize_pkt = %ld.\n",
288 (unsigned long)qdev->nic_stats.tx_oversize_pkt);
289 printk(KERN_ERR "rx_bytes = %ld.\n",
290 (unsigned long)qdev->nic_stats.rx_bytes);
291 printk(KERN_ERR "rx_bytes_ok = %ld.\n",
292 (unsigned long)qdev->nic_stats.rx_bytes_ok);
293 printk(KERN_ERR "rx_pkts = %ld.\n",
294 (unsigned long)qdev->nic_stats.rx_pkts);
295 printk(KERN_ERR "rx_pkts_ok = %ld.\n",
296 (unsigned long)qdev->nic_stats.rx_pkts_ok);
297 printk(KERN_ERR "rx_bcast_pkts = %ld.\n",
298 (unsigned long)qdev->nic_stats.rx_bcast_pkts);
299 printk(KERN_ERR "rx_mcast_pkts = %ld.\n",
300 (unsigned long)qdev->nic_stats.rx_mcast_pkts);
301 printk(KERN_ERR "rx_ucast_pkts = %ld.\n",
302 (unsigned long)qdev->nic_stats.rx_ucast_pkts);
303 printk(KERN_ERR "rx_undersize_pkts = %ld.\n",
304 (unsigned long)qdev->nic_stats.rx_undersize_pkts);
305 printk(KERN_ERR "rx_oversize_pkts = %ld.\n",
306 (unsigned long)qdev->nic_stats.rx_oversize_pkts);
307 printk(KERN_ERR "rx_jabber_pkts = %ld.\n",
308 (unsigned long)qdev->nic_stats.rx_jabber_pkts);
309 printk(KERN_ERR "rx_undersize_fcerr_pkts = %ld.\n",
310 (unsigned long)qdev->nic_stats.rx_undersize_fcerr_pkts);
311 printk(KERN_ERR "rx_drop_events = %ld.\n",
312 (unsigned long)qdev->nic_stats.rx_drop_events);
313 printk(KERN_ERR "rx_fcerr_pkts = %ld.\n",
314 (unsigned long)qdev->nic_stats.rx_fcerr_pkts);
315 printk(KERN_ERR "rx_align_err = %ld.\n",
316 (unsigned long)qdev->nic_stats.rx_align_err);
317 printk(KERN_ERR "rx_symbol_err = %ld.\n",
318 (unsigned long)qdev->nic_stats.rx_symbol_err);
319 printk(KERN_ERR "rx_mac_err = %ld.\n",
320 (unsigned long)qdev->nic_stats.rx_mac_err);
321 printk(KERN_ERR "rx_ctl_pkts = %ld.\n",
322 (unsigned long)qdev->nic_stats.rx_ctl_pkts);
323 printk(KERN_ERR "rx_pause_pkts = %ld.\n",
324 (unsigned long)qdev->nic_stats.rx_pause_pkts);
325 printk(KERN_ERR "rx_64_pkts = %ld.\n",
326 (unsigned long)qdev->nic_stats.rx_64_pkts);
327 printk(KERN_ERR "rx_65_to_127_pkts = %ld.\n",
328 (unsigned long)qdev->nic_stats.rx_65_to_127_pkts);
329 printk(KERN_ERR "rx_128_255_pkts = %ld.\n",
330 (unsigned long)qdev->nic_stats.rx_128_255_pkts);
331 printk(KERN_ERR "rx_256_511_pkts = %ld.\n",
332 (unsigned long)qdev->nic_stats.rx_256_511_pkts);
333 printk(KERN_ERR "rx_512_to_1023_pkts = %ld.\n",
334 (unsigned long)qdev->nic_stats.rx_512_to_1023_pkts);
335 printk(KERN_ERR "rx_1024_to_1518_pkts = %ld.\n",
336 (unsigned long)qdev->nic_stats.rx_1024_to_1518_pkts);
337 printk(KERN_ERR "rx_1519_to_max_pkts = %ld.\n",
338 (unsigned long)qdev->nic_stats.rx_1519_to_max_pkts);
339 printk(KERN_ERR "rx_len_err_pkts = %ld.\n",
340 (unsigned long)qdev->nic_stats.rx_len_err_pkts);
341};
342#endif
343
344#ifdef QL_DEV_DUMP
345void ql_dump_qdev(struct ql_adapter *qdev)
346{
347 int i;
348 printk(KERN_ERR PFX "qdev->flags = %lx.\n",
349 qdev->flags);
350 printk(KERN_ERR PFX "qdev->vlgrp = %p.\n",
351 qdev->vlgrp);
352 printk(KERN_ERR PFX "qdev->pdev = %p.\n",
353 qdev->pdev);
354 printk(KERN_ERR PFX "qdev->ndev = %p.\n",
355 qdev->ndev);
356 printk(KERN_ERR PFX "qdev->chip_rev_id = %d.\n",
357 qdev->chip_rev_id);
358 printk(KERN_ERR PFX "qdev->reg_base = %p.\n",
359 qdev->reg_base);
360 printk(KERN_ERR PFX "qdev->doorbell_area = %p.\n",
361 qdev->doorbell_area);
362 printk(KERN_ERR PFX "qdev->doorbell_area_size = %d.\n",
363 qdev->doorbell_area_size);
364 printk(KERN_ERR PFX "msg_enable = %x.\n",
365 qdev->msg_enable);
366 printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_area = %p.\n",
367 qdev->rx_ring_shadow_reg_area);
368 printk(KERN_ERR PFX "qdev->rx_ring_shadow_reg_dma = %llx.\n",
369 (unsigned long long) qdev->rx_ring_shadow_reg_dma);
370 printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_area = %p.\n",
371 qdev->tx_ring_shadow_reg_area);
372 printk(KERN_ERR PFX "qdev->tx_ring_shadow_reg_dma = %llx.\n",
373 (unsigned long long) qdev->tx_ring_shadow_reg_dma);
374 printk(KERN_ERR PFX "qdev->intr_count = %d.\n",
375 qdev->intr_count);
376 if (qdev->msi_x_entry)
377 for (i = 0; i < qdev->intr_count; i++) {
378 printk(KERN_ERR PFX
379 "msi_x_entry.[%d]vector = %d.\n", i,
380 qdev->msi_x_entry[i].vector);
381 printk(KERN_ERR PFX
382 "msi_x_entry.[%d]entry = %d.\n", i,
383 qdev->msi_x_entry[i].entry);
384 }
385 for (i = 0; i < qdev->intr_count; i++) {
386 printk(KERN_ERR PFX
387 "intr_context[%d].qdev = %p.\n", i,
388 qdev->intr_context[i].qdev);
389 printk(KERN_ERR PFX
390 "intr_context[%d].intr = %d.\n", i,
391 qdev->intr_context[i].intr);
392 printk(KERN_ERR PFX
393 "intr_context[%d].hooked = %d.\n", i,
394 qdev->intr_context[i].hooked);
395 printk(KERN_ERR PFX
396 "intr_context[%d].intr_en_mask = 0x%08x.\n", i,
397 qdev->intr_context[i].intr_en_mask);
398 printk(KERN_ERR PFX
399 "intr_context[%d].intr_dis_mask = 0x%08x.\n", i,
400 qdev->intr_context[i].intr_dis_mask);
401 printk(KERN_ERR PFX
402 "intr_context[%d].intr_read_mask = 0x%08x.\n", i,
403 qdev->intr_context[i].intr_read_mask);
404 }
405 printk(KERN_ERR PFX "qdev->tx_ring_count = %d.\n", qdev->tx_ring_count);
406 printk(KERN_ERR PFX "qdev->rx_ring_count = %d.\n", qdev->rx_ring_count);
407 printk(KERN_ERR PFX "qdev->ring_mem_size = %d.\n", qdev->ring_mem_size);
408 printk(KERN_ERR PFX "qdev->ring_mem = %p.\n", qdev->ring_mem);
409 printk(KERN_ERR PFX "qdev->intr_count = %d.\n", qdev->intr_count);
410 printk(KERN_ERR PFX "qdev->tx_ring = %p.\n",
411 qdev->tx_ring);
412 printk(KERN_ERR PFX "qdev->rss_ring_first_cq_id = %d.\n",
413 qdev->rss_ring_first_cq_id);
414 printk(KERN_ERR PFX "qdev->rss_ring_count = %d.\n",
415 qdev->rss_ring_count);
416 printk(KERN_ERR PFX "qdev->rx_ring = %p.\n", qdev->rx_ring);
417 printk(KERN_ERR PFX "qdev->default_rx_queue = %d.\n",
418 qdev->default_rx_queue);
419 printk(KERN_ERR PFX "qdev->xg_sem_mask = 0x%08x.\n",
420 qdev->xg_sem_mask);
421 printk(KERN_ERR PFX "qdev->port_link_up = 0x%08x.\n",
422 qdev->port_link_up);
423 printk(KERN_ERR PFX "qdev->port_init = 0x%08x.\n",
424 qdev->port_init);
425
426}
427#endif
428
429#ifdef QL_CB_DUMP
430void ql_dump_wqicb(struct wqicb *wqicb)
431{
432 printk(KERN_ERR PFX "Dumping wqicb stuff...\n");
433 printk(KERN_ERR PFX "wqicb->len = 0x%x.\n", le16_to_cpu(wqicb->len));
434 printk(KERN_ERR PFX "wqicb->flags = %x.\n", le16_to_cpu(wqicb->flags));
435 printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n",
436 le16_to_cpu(wqicb->cq_id_rss));
437 printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid));
438 printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n",
439 le32_to_cpu(wqicb->addr_lo));
440 printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n",
441 le32_to_cpu(wqicb->addr_hi));
442 printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n",
443 le32_to_cpu(wqicb->cnsmr_idx_addr_lo));
444 printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n",
445 le32_to_cpu(wqicb->cnsmr_idx_addr_hi));
446}
447
448void ql_dump_tx_ring(struct tx_ring *tx_ring)
449{
450 if (tx_ring == NULL)
451 return;
452 printk(KERN_ERR PFX
453 "===================== Dumping tx_ring %d ===============.\n",
454 tx_ring->wq_id);
455 printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base);
456 printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n",
457 (unsigned long long) tx_ring->wq_base_dma);
458 printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n",
459 tx_ring->cnsmr_idx_sh_reg);
460 printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n",
461 (unsigned long long) tx_ring->cnsmr_idx_sh_reg_dma);
462 printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size);
463 printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len);
464 printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n",
465 tx_ring->prod_idx_db_reg);
466 printk(KERN_ERR PFX "tx_ring->valid_db_reg = %p.\n",
467 tx_ring->valid_db_reg);
468 printk(KERN_ERR PFX "tx_ring->prod_idx = %d.\n", tx_ring->prod_idx);
469 printk(KERN_ERR PFX "tx_ring->cq_id = %d.\n", tx_ring->cq_id);
470 printk(KERN_ERR PFX "tx_ring->wq_id = %d.\n", tx_ring->wq_id);
471 printk(KERN_ERR PFX "tx_ring->q = %p.\n", tx_ring->q);
472 printk(KERN_ERR PFX "tx_ring->tx_count = %d.\n",
473 atomic_read(&tx_ring->tx_count));
474}
475
476void ql_dump_ricb(struct ricb *ricb)
477{
478 int i;
479 printk(KERN_ERR PFX
480 "===================== Dumping ricb ===============.\n");
481 printk(KERN_ERR PFX "Dumping ricb stuff...\n");
482
483 printk(KERN_ERR PFX "ricb->base_cq = %d.\n", ricb->base_cq & 0x1f);
484 printk(KERN_ERR PFX "ricb->flags = %s%s%s%s%s%s%s%s%s.\n",
485 ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
486 ricb->flags & RSS_L6K ? "RSS_L6K " : "",
487 ricb->flags & RSS_LI ? "RSS_LI " : "",
488 ricb->flags & RSS_LB ? "RSS_LB " : "",
489 ricb->flags & RSS_LM ? "RSS_LM " : "",
490 ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
491 ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
492 ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
493 ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
494 printk(KERN_ERR PFX "ricb->mask = 0x%.04x.\n", le16_to_cpu(ricb->mask));
495 for (i = 0; i < 16; i++)
496 printk(KERN_ERR PFX "ricb->hash_cq_id[%d] = 0x%.08x.\n", i,
497 le32_to_cpu(ricb->hash_cq_id[i]));
498 for (i = 0; i < 10; i++)
499 printk(KERN_ERR PFX "ricb->ipv6_hash_key[%d] = 0x%.08x.\n", i,
500 le32_to_cpu(ricb->ipv6_hash_key[i]));
501 for (i = 0; i < 4; i++)
502 printk(KERN_ERR PFX "ricb->ipv4_hash_key[%d] = 0x%.08x.\n", i,
503 le32_to_cpu(ricb->ipv4_hash_key[i]));
504}
505
506void ql_dump_cqicb(struct cqicb *cqicb)
507{
508 printk(KERN_ERR PFX "Dumping cqicb stuff...\n");
509
510 printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect);
511 printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags);
512 printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len));
513 printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n",
514 le32_to_cpu(cqicb->addr_lo));
515 printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n",
516 le32_to_cpu(cqicb->addr_hi));
517 printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n",
518 le32_to_cpu(cqicb->prod_idx_addr_lo));
519 printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n",
520 le32_to_cpu(cqicb->prod_idx_addr_hi));
521 printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n",
522 le16_to_cpu(cqicb->pkt_delay));
523 printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n",
524 le16_to_cpu(cqicb->irq_delay));
525 printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n",
526 le32_to_cpu(cqicb->lbq_addr_lo));
527 printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n",
528 le32_to_cpu(cqicb->lbq_addr_hi));
529 printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n",
530 le16_to_cpu(cqicb->lbq_buf_size));
531 printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n",
532 le16_to_cpu(cqicb->lbq_len));
533 printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n",
534 le32_to_cpu(cqicb->sbq_addr_lo));
535 printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n",
536 le32_to_cpu(cqicb->sbq_addr_hi));
537 printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n",
538 le16_to_cpu(cqicb->sbq_buf_size));
539 printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n",
540 le16_to_cpu(cqicb->sbq_len));
541}
542
543void ql_dump_rx_ring(struct rx_ring *rx_ring)
544{
545 if (rx_ring == NULL)
546 return;
547 printk(KERN_ERR PFX
548 "===================== Dumping rx_ring %d ===============.\n",
549 rx_ring->cq_id);
550 printk(KERN_ERR PFX "Dumping rx_ring %d, type = %s%s%s.\n",
551 rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
552 rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
553 rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
554 printk(KERN_ERR PFX "rx_ring->cqicb = %p.\n", &rx_ring->cqicb);
555 printk(KERN_ERR PFX "rx_ring->cq_base = %p.\n", rx_ring->cq_base);
556 printk(KERN_ERR PFX "rx_ring->cq_base_dma = %llx.\n",
557 (unsigned long long) rx_ring->cq_base_dma);
558 printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size);
559 printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len);
560 printk(KERN_ERR PFX
561 "rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n",
562 rx_ring->prod_idx_sh_reg,
563 rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0);
564 printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n",
565 (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
566 printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n",
567 rx_ring->cnsmr_idx_db_reg);
568 printk(KERN_ERR PFX "rx_ring->cnsmr_idx = %d.\n", rx_ring->cnsmr_idx);
569 printk(KERN_ERR PFX "rx_ring->curr_entry = %p.\n", rx_ring->curr_entry);
570 printk(KERN_ERR PFX "rx_ring->valid_db_reg = %p.\n",
571 rx_ring->valid_db_reg);
572
573 printk(KERN_ERR PFX "rx_ring->lbq_base = %p.\n", rx_ring->lbq_base);
574 printk(KERN_ERR PFX "rx_ring->lbq_base_dma = %llx.\n",
575 (unsigned long long) rx_ring->lbq_base_dma);
576 printk(KERN_ERR PFX "rx_ring->lbq_base_indirect = %p.\n",
577 rx_ring->lbq_base_indirect);
578 printk(KERN_ERR PFX "rx_ring->lbq_base_indirect_dma = %llx.\n",
579 (unsigned long long) rx_ring->lbq_base_indirect_dma);
580 printk(KERN_ERR PFX "rx_ring->lbq = %p.\n", rx_ring->lbq);
581 printk(KERN_ERR PFX "rx_ring->lbq_len = %d.\n", rx_ring->lbq_len);
582 printk(KERN_ERR PFX "rx_ring->lbq_size = %d.\n", rx_ring->lbq_size);
583 printk(KERN_ERR PFX "rx_ring->lbq_prod_idx_db_reg = %p.\n",
584 rx_ring->lbq_prod_idx_db_reg);
585 printk(KERN_ERR PFX "rx_ring->lbq_prod_idx = %d.\n",
586 rx_ring->lbq_prod_idx);
587 printk(KERN_ERR PFX "rx_ring->lbq_curr_idx = %d.\n",
588 rx_ring->lbq_curr_idx);
589 printk(KERN_ERR PFX "rx_ring->lbq_clean_idx = %d.\n",
590 rx_ring->lbq_clean_idx);
591 printk(KERN_ERR PFX "rx_ring->lbq_free_cnt = %d.\n",
592 rx_ring->lbq_free_cnt);
593 printk(KERN_ERR PFX "rx_ring->lbq_buf_size = %d.\n",
594 rx_ring->lbq_buf_size);
595
596 printk(KERN_ERR PFX "rx_ring->sbq_base = %p.\n", rx_ring->sbq_base);
597 printk(KERN_ERR PFX "rx_ring->sbq_base_dma = %llx.\n",
598 (unsigned long long) rx_ring->sbq_base_dma);
599 printk(KERN_ERR PFX "rx_ring->sbq_base_indirect = %p.\n",
600 rx_ring->sbq_base_indirect);
601 printk(KERN_ERR PFX "rx_ring->sbq_base_indirect_dma = %llx.\n",
602 (unsigned long long) rx_ring->sbq_base_indirect_dma);
603 printk(KERN_ERR PFX "rx_ring->sbq = %p.\n", rx_ring->sbq);
604 printk(KERN_ERR PFX "rx_ring->sbq_len = %d.\n", rx_ring->sbq_len);
605 printk(KERN_ERR PFX "rx_ring->sbq_size = %d.\n", rx_ring->sbq_size);
606 printk(KERN_ERR PFX "rx_ring->sbq_prod_idx_db_reg addr = %p.\n",
607 rx_ring->sbq_prod_idx_db_reg);
608 printk(KERN_ERR PFX "rx_ring->sbq_prod_idx = %d.\n",
609 rx_ring->sbq_prod_idx);
610 printk(KERN_ERR PFX "rx_ring->sbq_curr_idx = %d.\n",
611 rx_ring->sbq_curr_idx);
612 printk(KERN_ERR PFX "rx_ring->sbq_clean_idx = %d.\n",
613 rx_ring->sbq_clean_idx);
614 printk(KERN_ERR PFX "rx_ring->sbq_free_cnt = %d.\n",
615 rx_ring->sbq_free_cnt);
616 printk(KERN_ERR PFX "rx_ring->sbq_buf_size = %d.\n",
617 rx_ring->sbq_buf_size);
618 printk(KERN_ERR PFX "rx_ring->cq_id = %d.\n", rx_ring->cq_id);
619 printk(KERN_ERR PFX "rx_ring->irq = %d.\n", rx_ring->irq);
620 printk(KERN_ERR PFX "rx_ring->cpu = %d.\n", rx_ring->cpu);
621 printk(KERN_ERR PFX "rx_ring->qdev = %p.\n", rx_ring->qdev);
622}
623
624void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
625{
626 void *ptr;
627
628 printk(KERN_ERR PFX "%s: Enter.\n", __func__);
629
630 ptr = kmalloc(size, GFP_ATOMIC);
631 if (ptr == NULL) {
632 printk(KERN_ERR PFX "%s: Couldn't allocate a buffer.\n",
633 __func__);
634 return;
635 }
636
637 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
638 printk(KERN_ERR "%s: Failed to upload control block!\n",
639 __func__);
640 goto fail_it;
641 }
642 switch (bit) {
643 case CFG_DRQ:
644 ql_dump_wqicb((struct wqicb *)ptr);
645 break;
646 case CFG_DCQ:
647 ql_dump_cqicb((struct cqicb *)ptr);
648 break;
649 case CFG_DR:
650 ql_dump_ricb((struct ricb *)ptr);
651 break;
652 default:
653 printk(KERN_ERR PFX "%s: Invalid bit value = %x.\n",
654 __func__, bit);
655 break;
656 }
657fail_it:
658 kfree(ptr);
659}
660#endif
661
662#ifdef QL_OB_DUMP
663void ql_dump_tx_desc(struct tx_buf_desc *tbd)
664{
665 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
666 le64_to_cpu((u64) tbd->addr));
667 printk(KERN_ERR PFX "tbd->len = %d\n",
668 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
669 printk(KERN_ERR PFX "tbd->flags = %s %s\n",
670 tbd->len & TX_DESC_C ? "C" : ".",
671 tbd->len & TX_DESC_E ? "E" : ".");
672 tbd++;
673 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
674 le64_to_cpu((u64) tbd->addr));
675 printk(KERN_ERR PFX "tbd->len = %d\n",
676 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
677 printk(KERN_ERR PFX "tbd->flags = %s %s\n",
678 tbd->len & TX_DESC_C ? "C" : ".",
679 tbd->len & TX_DESC_E ? "E" : ".");
680 tbd++;
681 printk(KERN_ERR PFX "tbd->addr = 0x%llx\n",
682 le64_to_cpu((u64) tbd->addr));
683 printk(KERN_ERR PFX "tbd->len = %d\n",
684 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
685 printk(KERN_ERR PFX "tbd->flags = %s %s\n",
686 tbd->len & TX_DESC_C ? "C" : ".",
687 tbd->len & TX_DESC_E ? "E" : ".");
688
689}
690
691void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
692{
693 struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
694 (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
695 struct tx_buf_desc *tbd;
696 u16 frame_len;
697
698 printk(KERN_ERR PFX "%s\n", __func__);
699 printk(KERN_ERR PFX "opcode = %s\n",
700 (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
701 printk(KERN_ERR PFX "flags1 = %s %s %s %s %s\n",
702 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
703 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
704 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
705 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
706 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
707 printk(KERN_ERR PFX "flags2 = %s %s %s\n",
708 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
709 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
710 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
711 printk(KERN_ERR PFX "flags3 = %s %s %s \n",
712 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
713 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
714 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
715 printk(KERN_ERR PFX "tid = %x\n", ob_mac_iocb->tid);
716 printk(KERN_ERR PFX "txq_idx = %d\n", ob_mac_iocb->txq_idx);
717 printk(KERN_ERR PFX "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
718 if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
719 printk(KERN_ERR PFX "frame_len = %d\n",
720 le32_to_cpu(ob_mac_tso_iocb->frame_len));
721 printk(KERN_ERR PFX "mss = %d\n",
722 le16_to_cpu(ob_mac_tso_iocb->mss));
723 printk(KERN_ERR PFX "prot_hdr_len = %d\n",
724 le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
725 printk(KERN_ERR PFX "hdr_offset = 0x%.04x\n",
726 le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
727 frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
728 } else {
729 printk(KERN_ERR PFX "frame_len = %d\n",
730 le16_to_cpu(ob_mac_iocb->frame_len));
731 frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
732 }
733 tbd = &ob_mac_iocb->tbd[0];
734 ql_dump_tx_desc(tbd);
735}
736
737void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
738{
739 printk(KERN_ERR PFX "%s\n", __func__);
740 printk(KERN_ERR PFX "opcode = %d\n", ob_mac_rsp->opcode);
741 printk(KERN_ERR PFX "flags = %s %s %s %s %s %s %s\n",
742 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
743 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
744 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
745 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
746 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
747 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
748 ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
749 printk(KERN_ERR PFX "tid = %x\n", ob_mac_rsp->tid);
750}
751#endif
752
753#ifdef QL_IB_DUMP
754void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
755{
756 printk(KERN_ERR PFX "%s\n", __func__);
757 printk(KERN_ERR PFX "opcode = 0x%x\n", ib_mac_rsp->opcode);
758 printk(KERN_ERR PFX "flags1 = %s%s%s%s%s%s\n",
759 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
760 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
761 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
762 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
763 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
764 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
765
766 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
767 printk(KERN_ERR PFX "%s%s%s Multicast.\n",
768 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
769 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
770 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
771 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
772 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
773 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
774
775 printk(KERN_ERR PFX "flags2 = %s%s%s%s%s\n",
776 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
777 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
778 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
779 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
780 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
781
782 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
783 printk(KERN_ERR PFX "%s%s%s%s%s error.\n",
784 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
785 IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
786 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
787 IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
788 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
789 IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
790 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
791 IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
792 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
793 IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
794
795 printk(KERN_ERR PFX "flags3 = %s%s.\n",
796 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
797 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
798
799 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
800 printk(KERN_ERR PFX "RSS flags = %s%s%s%s.\n",
801 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
802 IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
803 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
804 IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
805 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
806 IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
807 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
808 IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
809
810 printk(KERN_ERR PFX "data_len = %d\n",
811 le32_to_cpu(ib_mac_rsp->data_len));
812 printk(KERN_ERR PFX "data_addr_hi = 0x%x\n",
813 le32_to_cpu(ib_mac_rsp->data_addr_hi));
814 printk(KERN_ERR PFX "data_addr_lo = 0x%x\n",
815 le32_to_cpu(ib_mac_rsp->data_addr_lo));
816 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
817 printk(KERN_ERR PFX "rss = %x\n",
818 le32_to_cpu(ib_mac_rsp->rss));
819 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
820 printk(KERN_ERR PFX "vlan_id = %x\n",
821 le16_to_cpu(ib_mac_rsp->vlan_id));
822
823 printk(KERN_ERR PFX "flags4 = %s%s%s.\n",
824 le32_to_cpu(ib_mac_rsp->
825 flags4) & IB_MAC_IOCB_RSP_HV ? "HV " : "",
826 le32_to_cpu(ib_mac_rsp->
827 flags4) & IB_MAC_IOCB_RSP_HS ? "HS " : "",
828 le32_to_cpu(ib_mac_rsp->
829 flags4) & IB_MAC_IOCB_RSP_HL ? "HL " : "");
830
831 if (le32_to_cpu(ib_mac_rsp->flags4) & IB_MAC_IOCB_RSP_HV) {
832 printk(KERN_ERR PFX "hdr length = %d.\n",
833 le32_to_cpu(ib_mac_rsp->hdr_len));
834 printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n",
835 le32_to_cpu(ib_mac_rsp->hdr_addr_hi));
836 printk(KERN_ERR PFX "hdr addr_lo = 0x%x.\n",
837 le32_to_cpu(ib_mac_rsp->hdr_addr_lo));
838 }
839}
840#endif
841
842#ifdef QL_ALL_DUMP
843void ql_dump_all(struct ql_adapter *qdev)
844{
845 int i;
846
847 QL_DUMP_REGS(qdev);
848 QL_DUMP_QDEV(qdev);
849 for (i = 0; i < qdev->tx_ring_count; i++) {
850 QL_DUMP_TX_RING(&qdev->tx_ring[i]);
851 QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
852 }
853 for (i = 0; i < qdev->rx_ring_count; i++) {
854 QL_DUMP_RX_RING(&qdev->rx_ring[i]);
855 QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
856 }
857}
858#endif
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
new file mode 100644
index 000000000000..6457f8c4fdaa
--- /dev/null
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -0,0 +1,415 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/types.h>
4#include <linux/module.h>
5#include <linux/list.h>
6#include <linux/pci.h>
7#include <linux/dma-mapping.h>
8#include <linux/pagemap.h>
9#include <linux/sched.h>
10#include <linux/slab.h>
11#include <linux/dmapool.h>
12#include <linux/mempool.h>
13#include <linux/spinlock.h>
14#include <linux/kthread.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/in.h>
19#include <linux/ip.h>
20#include <linux/ipv6.h>
21#include <net/ipv6.h>
22#include <linux/tcp.h>
23#include <linux/udp.h>
24#include <linux/if_arp.h>
25#include <linux/if_ether.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/skbuff.h>
30#include <linux/rtnetlink.h>
31#include <linux/if_vlan.h>
32#include <linux/init.h>
33#include <linux/delay.h>
34#include <linux/mm.h>
35#include <linux/vmalloc.h>
36
37#include <linux/version.h>
38
39#include "qlge.h"
40
41static int ql_update_ring_coalescing(struct ql_adapter *qdev)
42{
43 int i, status = 0;
44 struct rx_ring *rx_ring;
45 struct cqicb *cqicb;
46
47 if (!netif_running(qdev->ndev))
48 return status;
49
50 spin_lock(&qdev->hw_lock);
51 /* Skip the default queue, and update the outbound handler
52 * queues if they changed.
53 */
54 cqicb = (struct cqicb *)&qdev->rx_ring[1];
55 if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
56 le16_to_cpu(cqicb->pkt_delay) != qdev->tx_max_coalesced_frames) {
57 for (i = 1; i < qdev->rss_ring_first_cq_id; i++, rx_ring++) {
58 rx_ring = &qdev->rx_ring[i];
59 cqicb = (struct cqicb *)rx_ring;
60 cqicb->irq_delay = le16_to_cpu(qdev->tx_coalesce_usecs);
61 cqicb->pkt_delay =
62 le16_to_cpu(qdev->tx_max_coalesced_frames);
63 cqicb->flags = FLAGS_LI;
64 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
65 CFG_LCQ, rx_ring->cq_id);
66 if (status) {
67 QPRINTK(qdev, IFUP, ERR,
68 "Failed to load CQICB.\n");
69 goto exit;
70 }
71 }
72 }
73
74 /* Update the inbound (RSS) handler queues if they changed. */
75 cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_first_cq_id];
76 if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
77 le16_to_cpu(cqicb->pkt_delay) != qdev->rx_max_coalesced_frames) {
78 for (i = qdev->rss_ring_first_cq_id;
79 i <= qdev->rss_ring_first_cq_id + qdev->rss_ring_count;
80 i++) {
81 rx_ring = &qdev->rx_ring[i];
82 cqicb = (struct cqicb *)rx_ring;
83 cqicb->irq_delay = le16_to_cpu(qdev->rx_coalesce_usecs);
84 cqicb->pkt_delay =
85 le16_to_cpu(qdev->rx_max_coalesced_frames);
86 cqicb->flags = FLAGS_LI;
87 status = ql_write_cfg(qdev, cqicb, sizeof(cqicb),
88 CFG_LCQ, rx_ring->cq_id);
89 if (status) {
90 QPRINTK(qdev, IFUP, ERR,
91 "Failed to load CQICB.\n");
92 goto exit;
93 }
94 }
95 }
96exit:
97 spin_unlock(&qdev->hw_lock);
98 return status;
99}
100
101void ql_update_stats(struct ql_adapter *qdev)
102{
103 u32 i;
104 u64 data;
105 u64 *iter = &qdev->nic_stats.tx_pkts;
106
107 spin_lock(&qdev->stats_lock);
108 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
109 QPRINTK(qdev, DRV, ERR,
110 "Couldn't get xgmac sem.\n");
111 goto quit;
112 }
113 /*
114 * Get TX statistics.
115 */
116 for (i = 0x200; i < 0x280; i += 8) {
117 if (ql_read_xgmac_reg64(qdev, i, &data)) {
118 QPRINTK(qdev, DRV, ERR,
119 "Error reading status register 0x%.04x.\n", i);
120 goto end;
121 } else
122 *iter = data;
123 iter++;
124 }
125
126 /*
127 * Get RX statistics.
128 */
129 for (i = 0x300; i < 0x3d0; i += 8) {
130 if (ql_read_xgmac_reg64(qdev, i, &data)) {
131 QPRINTK(qdev, DRV, ERR,
132 "Error reading status register 0x%.04x.\n", i);
133 goto end;
134 } else
135 *iter = data;
136 iter++;
137 }
138
139end:
140 ql_sem_unlock(qdev, qdev->xg_sem_mask);
141quit:
142 spin_unlock(&qdev->stats_lock);
143
144 QL_DUMP_STAT(qdev);
145
146 return;
147}
148
149static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
150 {"tx_pkts"},
151 {"tx_bytes"},
152 {"tx_mcast_pkts"},
153 {"tx_bcast_pkts"},
154 {"tx_ucast_pkts"},
155 {"tx_ctl_pkts"},
156 {"tx_pause_pkts"},
157 {"tx_64_pkts"},
158 {"tx_65_to_127_pkts"},
159 {"tx_128_to_255_pkts"},
160 {"tx_256_511_pkts"},
161 {"tx_512_to_1023_pkts"},
162 {"tx_1024_to_1518_pkts"},
163 {"tx_1519_to_max_pkts"},
164 {"tx_undersize_pkts"},
165 {"tx_oversize_pkts"},
166 {"rx_bytes"},
167 {"rx_bytes_ok"},
168 {"rx_pkts"},
169 {"rx_pkts_ok"},
170 {"rx_bcast_pkts"},
171 {"rx_mcast_pkts"},
172 {"rx_ucast_pkts"},
173 {"rx_undersize_pkts"},
174 {"rx_oversize_pkts"},
175 {"rx_jabber_pkts"},
176 {"rx_undersize_fcerr_pkts"},
177 {"rx_drop_events"},
178 {"rx_fcerr_pkts"},
179 {"rx_align_err"},
180 {"rx_symbol_err"},
181 {"rx_mac_err"},
182 {"rx_ctl_pkts"},
183 {"rx_pause_pkts"},
184 {"rx_64_pkts"},
185 {"rx_65_to_127_pkts"},
186 {"rx_128_255_pkts"},
187 {"rx_256_511_pkts"},
188 {"rx_512_to_1023_pkts"},
189 {"rx_1024_to_1518_pkts"},
190 {"rx_1519_to_max_pkts"},
191 {"rx_len_err_pkts"},
192};
193
194static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
195{
196 switch (stringset) {
197 case ETH_SS_STATS:
198 memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
199 break;
200 }
201}
202
203static int ql_get_sset_count(struct net_device *dev, int sset)
204{
205 switch (sset) {
206 case ETH_SS_STATS:
207 return ARRAY_SIZE(ql_stats_str_arr);
208 default:
209 return -EOPNOTSUPP;
210 }
211}
212
213static void
214ql_get_ethtool_stats(struct net_device *ndev,
215 struct ethtool_stats *stats, u64 *data)
216{
217 struct ql_adapter *qdev = netdev_priv(ndev);
218 struct nic_stats *s = &qdev->nic_stats;
219
220 ql_update_stats(qdev);
221
222 *data++ = s->tx_pkts;
223 *data++ = s->tx_bytes;
224 *data++ = s->tx_mcast_pkts;
225 *data++ = s->tx_bcast_pkts;
226 *data++ = s->tx_ucast_pkts;
227 *data++ = s->tx_ctl_pkts;
228 *data++ = s->tx_pause_pkts;
229 *data++ = s->tx_64_pkt;
230 *data++ = s->tx_65_to_127_pkt;
231 *data++ = s->tx_128_to_255_pkt;
232 *data++ = s->tx_256_511_pkt;
233 *data++ = s->tx_512_to_1023_pkt;
234 *data++ = s->tx_1024_to_1518_pkt;
235 *data++ = s->tx_1519_to_max_pkt;
236 *data++ = s->tx_undersize_pkt;
237 *data++ = s->tx_oversize_pkt;
238 *data++ = s->rx_bytes;
239 *data++ = s->rx_bytes_ok;
240 *data++ = s->rx_pkts;
241 *data++ = s->rx_pkts_ok;
242 *data++ = s->rx_bcast_pkts;
243 *data++ = s->rx_mcast_pkts;
244 *data++ = s->rx_ucast_pkts;
245 *data++ = s->rx_undersize_pkts;
246 *data++ = s->rx_oversize_pkts;
247 *data++ = s->rx_jabber_pkts;
248 *data++ = s->rx_undersize_fcerr_pkts;
249 *data++ = s->rx_drop_events;
250 *data++ = s->rx_fcerr_pkts;
251 *data++ = s->rx_align_err;
252 *data++ = s->rx_symbol_err;
253 *data++ = s->rx_mac_err;
254 *data++ = s->rx_ctl_pkts;
255 *data++ = s->rx_pause_pkts;
256 *data++ = s->rx_64_pkts;
257 *data++ = s->rx_65_to_127_pkts;
258 *data++ = s->rx_128_255_pkts;
259 *data++ = s->rx_256_511_pkts;
260 *data++ = s->rx_512_to_1023_pkts;
261 *data++ = s->rx_1024_to_1518_pkts;
262 *data++ = s->rx_1519_to_max_pkts;
263 *data++ = s->rx_len_err_pkts;
264}
265
266static int ql_get_settings(struct net_device *ndev,
267 struct ethtool_cmd *ecmd)
268{
269 struct ql_adapter *qdev = netdev_priv(ndev);
270
271 ecmd->supported = SUPPORTED_10000baseT_Full;
272 ecmd->advertising = ADVERTISED_10000baseT_Full;
273 ecmd->autoneg = AUTONEG_ENABLE;
274 ecmd->transceiver = XCVR_EXTERNAL;
275 if ((qdev->link_status & LINK_TYPE_MASK) == LINK_TYPE_10GBASET) {
276 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
277 ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
278 ecmd->port = PORT_TP;
279 } else {
280 ecmd->supported |= SUPPORTED_FIBRE;
281 ecmd->advertising |= ADVERTISED_FIBRE;
282 ecmd->port = PORT_FIBRE;
283 }
284
285 ecmd->speed = SPEED_10000;
286 ecmd->duplex = DUPLEX_FULL;
287
288 return 0;
289}
290
291static void ql_get_drvinfo(struct net_device *ndev,
292 struct ethtool_drvinfo *drvinfo)
293{
294 struct ql_adapter *qdev = netdev_priv(ndev);
295 strncpy(drvinfo->driver, qlge_driver_name, 32);
296 strncpy(drvinfo->version, qlge_driver_version, 32);
297 strncpy(drvinfo->fw_version, "N/A", 32);
298 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
299 drvinfo->n_stats = 0;
300 drvinfo->testinfo_len = 0;
301 drvinfo->regdump_len = 0;
302 drvinfo->eedump_len = 0;
303}
304
305static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
306{
307 struct ql_adapter *qdev = netdev_priv(dev);
308
309 c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
310 c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
311
312 /* This chip coalesces as follows:
313 * If a packet arrives, hold off interrupts until
314 * cqicb->int_delay expires, but if no other packets arrive don't
315 * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
316 * timer to coalesce on a frame basis. So, we have to take ethtool's
317 * max_coalesced_frames value and convert it to a delay in microseconds.
318 * We do this by using a basic thoughput of 1,000,000 frames per
319 * second @ (1024 bytes). This means one frame per usec. So it's a
320 * simple one to one ratio.
321 */
322 c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
323 c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
324
325 return 0;
326}
327
328static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
329{
330 struct ql_adapter *qdev = netdev_priv(ndev);
331
332 /* Validate user parameters. */
333 if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
334 return -EINVAL;
335 /* Don't wait more than 10 usec. */
336 if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
337 return -EINVAL;
338 if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
339 return -EINVAL;
340 if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
341 return -EINVAL;
342
343 /* Verify a change took place before updating the hardware. */
344 if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
345 qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
346 qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
347 qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
348 return 0;
349
350 qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
351 qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
352 qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
353 qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
354
355 return ql_update_ring_coalescing(qdev);
356}
357
358static u32 ql_get_rx_csum(struct net_device *netdev)
359{
360 struct ql_adapter *qdev = netdev_priv(netdev);
361 return qdev->rx_csum;
362}
363
364static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
365{
366 struct ql_adapter *qdev = netdev_priv(netdev);
367 qdev->rx_csum = data;
368 return 0;
369}
370
371static int ql_set_tso(struct net_device *ndev, uint32_t data)
372{
373
374 if (data) {
375 ndev->features |= NETIF_F_TSO;
376 ndev->features |= NETIF_F_TSO6;
377 } else {
378 ndev->features &= ~NETIF_F_TSO;
379 ndev->features &= ~NETIF_F_TSO6;
380 }
381 return 0;
382}
383
384static u32 ql_get_msglevel(struct net_device *ndev)
385{
386 struct ql_adapter *qdev = netdev_priv(ndev);
387 return qdev->msg_enable;
388}
389
390static void ql_set_msglevel(struct net_device *ndev, u32 value)
391{
392 struct ql_adapter *qdev = netdev_priv(ndev);
393 qdev->msg_enable = value;
394}
395
396const struct ethtool_ops qlge_ethtool_ops = {
397 .get_settings = ql_get_settings,
398 .get_drvinfo = ql_get_drvinfo,
399 .get_msglevel = ql_get_msglevel,
400 .set_msglevel = ql_set_msglevel,
401 .get_link = ethtool_op_get_link,
402 .get_rx_csum = ql_get_rx_csum,
403 .set_rx_csum = ql_set_rx_csum,
404 .get_tx_csum = ethtool_op_get_tx_csum,
405 .get_sg = ethtool_op_get_sg,
406 .set_sg = ethtool_op_set_sg,
407 .get_tso = ethtool_op_get_tso,
408 .set_tso = ql_set_tso,
409 .get_coalesce = ql_get_coalesce,
410 .set_coalesce = ql_set_coalesce,
411 .get_sset_count = ql_get_sset_count,
412 .get_strings = ql_get_strings,
413 .get_ethtool_stats = ql_get_ethtool_stats,
414};
415
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
new file mode 100644
index 000000000000..3af822b6226e
--- /dev/null
+++ b/drivers/net/qlge/qlge_main.c
@@ -0,0 +1,3956 @@
1/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
37#include <linux/rtnetlink.h>
38#include <linux/if_vlan.h>
39#include <linux/init.h>
40#include <linux/delay.h>
41#include <linux/mm.h>
42#include <linux/vmalloc.h>
43
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56/* NETIF_MSG_TIMER | */
57 NETIF_MSG_IFDOWN |
58 NETIF_MSG_IFUP |
59 NETIF_MSG_RX_ERR |
60 NETIF_MSG_TX_ERR |
61 NETIF_MSG_TX_QUEUED |
62 NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS |
63/* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
65
66static int debug = 0x00007fff; /* defaults above */
67module_param(debug, int, 0);
68MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70#define MSIX_IRQ 0
71#define MSI_IRQ 1
72#define LEG_IRQ 2
73static int irq_type = MSIX_IRQ;
74module_param(irq_type, int, MSIX_IRQ);
75MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
76
77static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
78 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)},
79 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)},
80 /* required last entry */
81 {0,}
82};
83
84MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
85
86/* This hardware semaphore causes exclusive access to
87 * resources shared between the NIC driver, MPI firmware,
88 * FCOE firmware and the FC driver.
89 */
90static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
91{
92 u32 sem_bits = 0;
93
94 switch (sem_mask) {
95 case SEM_XGMAC0_MASK:
96 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
97 break;
98 case SEM_XGMAC1_MASK:
99 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
100 break;
101 case SEM_ICB_MASK:
102 sem_bits = SEM_SET << SEM_ICB_SHIFT;
103 break;
104 case SEM_MAC_ADDR_MASK:
105 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
106 break;
107 case SEM_FLASH_MASK:
108 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
109 break;
110 case SEM_PROBE_MASK:
111 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
112 break;
113 case SEM_RT_IDX_MASK:
114 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
115 break;
116 case SEM_PROC_REG_MASK:
117 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
118 break;
119 default:
120 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
121 return -EINVAL;
122 }
123
124 ql_write32(qdev, SEM, sem_bits | sem_mask);
125 return !(ql_read32(qdev, SEM) & sem_bits);
126}
127
128int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
129{
130 unsigned int seconds = 3;
131 do {
132 if (!ql_sem_trylock(qdev, sem_mask))
133 return 0;
134 ssleep(1);
135 } while (--seconds);
136 return -ETIMEDOUT;
137}
138
139void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
140{
141 ql_write32(qdev, SEM, sem_mask);
142 ql_read32(qdev, SEM); /* flush */
143}
144
145/* This function waits for a specific bit to come ready
146 * in a given register. It is used mostly by the initialize
147 * process, but is also used in kernel thread API such as
148 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
149 */
150int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
151{
152 u32 temp;
153 int count = UDELAY_COUNT;
154
155 while (count) {
156 temp = ql_read32(qdev, reg);
157
158 /* check for errors */
159 if (temp & err_bit) {
160 QPRINTK(qdev, PROBE, ALERT,
161 "register 0x%.08x access error, value = 0x%.08x!.\n",
162 reg, temp);
163 return -EIO;
164 } else if (temp & bit)
165 return 0;
166 udelay(UDELAY_DELAY);
167 count--;
168 }
169 QPRINTK(qdev, PROBE, ALERT,
170 "Timed out waiting for reg %x to come ready.\n", reg);
171 return -ETIMEDOUT;
172}
173
174/* The CFG register is used to download TX and RX control blocks
175 * to the chip. This function waits for an operation to complete.
176 */
177static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
178{
179 int count = UDELAY_COUNT;
180 u32 temp;
181
182 while (count) {
183 temp = ql_read32(qdev, CFG);
184 if (temp & CFG_LE)
185 return -EIO;
186 if (!(temp & bit))
187 return 0;
188 udelay(UDELAY_DELAY);
189 count--;
190 }
191 return -ETIMEDOUT;
192}
193
194
195/* Used to issue init control blocks to hw. Maps control block,
196 * sets address, triggers download, waits for completion.
197 */
198int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
199 u16 q_id)
200{
201 u64 map;
202 int status = 0;
203 int direction;
204 u32 mask;
205 u32 value;
206
207 direction =
208 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
209 PCI_DMA_FROMDEVICE;
210
211 map = pci_map_single(qdev->pdev, ptr, size, direction);
212 if (pci_dma_mapping_error(qdev->pdev, map)) {
213 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
214 return -ENOMEM;
215 }
216
217 status = ql_wait_cfg(qdev, bit);
218 if (status) {
219 QPRINTK(qdev, IFUP, ERR,
220 "Timed out waiting for CFG to come ready.\n");
221 goto exit;
222 }
223
224 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
225 if (status)
226 goto exit;
227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
229 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
230
231 mask = CFG_Q_MASK | (bit << 16);
232 value = bit | (q_id << CFG_Q_SHIFT);
233 ql_write32(qdev, CFG, (mask | value));
234
235 /*
236 * Wait for the bit to clear after signaling hw.
237 */
238 status = ql_wait_cfg(qdev, bit);
239exit:
240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
251 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
252 if (status)
253 return status;
254 switch (type) {
255 case MAC_ADDR_TYPE_MULTI_MAC:
256 case MAC_ADDR_TYPE_CAM_MAC:
257 {
258 status =
259 ql_wait_reg_rdy(qdev,
260 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
261 if (status)
262 goto exit;
263 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
264 (index << MAC_ADDR_IDX_SHIFT) | /* index */
265 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
266 status =
267 ql_wait_reg_rdy(qdev,
268 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
269 if (status)
270 goto exit;
271 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
272 status =
273 ql_wait_reg_rdy(qdev,
274 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
275 if (status)
276 goto exit;
277 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 status =
281 ql_wait_reg_rdy(qdev,
282 MAC_ADDR_IDX, MAC_ADDR_MR, MAC_ADDR_E);
283 if (status)
284 goto exit;
285 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 if (type == MAC_ADDR_TYPE_CAM_MAC) {
287 status =
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
290 if (status)
291 goto exit;
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
295 status =
296 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
297 MAC_ADDR_MR, MAC_ADDR_E);
298 if (status)
299 goto exit;
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 }
302 break;
303 }
304 case MAC_ADDR_TYPE_VLAN:
305 case MAC_ADDR_TYPE_MULTI_FLTR:
306 default:
307 QPRINTK(qdev, IFUP, CRIT,
308 "Address type %d not yet supported.\n", type);
309 status = -EPERM;
310 }
311exit:
312 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
313 return status;
314}
315
316/* Set up a MAC, multicast or VLAN address for the
317 * inbound frame matching.
318 */
319static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
320 u16 index)
321{
322 u32 offset = 0;
323 int status = 0;
324
325 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
326 if (status)
327 return status;
328 switch (type) {
329 case MAC_ADDR_TYPE_MULTI_MAC:
330 case MAC_ADDR_TYPE_CAM_MAC:
331 {
332 u32 cam_output;
333 u32 upper = (addr[0] << 8) | addr[1];
334 u32 lower =
335 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
336 (addr[5]);
337
338 QPRINTK(qdev, IFUP, INFO,
339 "Adding %s address %02x:%02x:%02x:%02x:%02x:%02x"
340 " at index %d in the CAM.\n",
341 ((type ==
342 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
343 "UNICAST"), addr[0], addr[1], addr[2], addr[3],
344 addr[4], addr[5], index);
345
346 status =
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
349 if (status)
350 goto exit;
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
352 (index << MAC_ADDR_IDX_SHIFT) | /* index */
353 type); /* type */
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
355 status =
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
358 if (status)
359 goto exit;
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
361 (index << MAC_ADDR_IDX_SHIFT) | /* index */
362 type); /* type */
363 ql_write32(qdev, MAC_ADDR_DATA, upper);
364 status =
365 ql_wait_reg_rdy(qdev,
366 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
367 if (status)
368 goto exit;
369 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
370 (index << MAC_ADDR_IDX_SHIFT) | /* index */
371 type); /* type */
372 /* This field should also include the queue id
373 and possibly the function id. Right now we hardcode
374 the route field to NIC core.
375 */
376 if (type == MAC_ADDR_TYPE_CAM_MAC) {
377 cam_output = (CAM_OUT_ROUTE_NIC |
378 (qdev->
379 func << CAM_OUT_FUNC_SHIFT) |
380 (qdev->
381 rss_ring_first_cq_id <<
382 CAM_OUT_CQ_ID_SHIFT));
383 if (qdev->vlgrp)
384 cam_output |= CAM_OUT_RV;
385 /* route to NIC core */
386 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
387 }
388 break;
389 }
390 case MAC_ADDR_TYPE_VLAN:
391 {
392 u32 enable_bit = *((u32 *) &addr[0]);
393 /* For VLAN, the addr actually holds a bit that
394 * either enables or disables the vlan id we are
395 * addressing. It's either MAC_ADDR_E on or off.
396 * That's bit-27 we're talking about.
397 */
398 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
399 (enable_bit ? "Adding" : "Removing"),
400 index, (enable_bit ? "to" : "from"));
401
402 status =
403 ql_wait_reg_rdy(qdev,
404 MAC_ADDR_IDX, MAC_ADDR_MW, MAC_ADDR_E);
405 if (status)
406 goto exit;
407 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
408 (index << MAC_ADDR_IDX_SHIFT) | /* index */
409 type | /* type */
410 enable_bit); /* enable/disable */
411 break;
412 }
413 case MAC_ADDR_TYPE_MULTI_FLTR:
414 default:
415 QPRINTK(qdev, IFUP, CRIT,
416 "Address type %d not yet supported.\n", type);
417 status = -EPERM;
418 }
419exit:
420 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
421 return status;
422}
423
424/* Get a specific frame routing value from the CAM.
425 * Used for debug and reg dump.
426 */
427int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
428{
429 int status = 0;
430
431 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
432 if (status)
433 goto exit;
434
435 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, RT_IDX_E);
436 if (status)
437 goto exit;
438
439 ql_write32(qdev, RT_IDX,
440 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
441 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, RT_IDX_E);
442 if (status)
443 goto exit;
444 *value = ql_read32(qdev, RT_DATA);
445exit:
446 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
447 return status;
448}
449
450/* The NIC function for this chip has 16 routing indexes. Each one can be used
451 * to route different frame types to various inbound queues. We send broadcast/
452 * multicast/error frames to the default queue for slow handling,
453 * and CAM hit/RSS frames to the fast handling queues.
454 */
455static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
456 int enable)
457{
458 int status;
459 u32 value = 0;
460
461 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
462 if (status)
463 return status;
464
465 QPRINTK(qdev, IFUP, DEBUG,
466 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
467 (enable ? "Adding" : "Removing"),
468 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
469 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
470 ((index ==
471 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
472 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
473 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
474 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
475 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
476 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
477 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
478 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
479 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
480 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
481 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
482 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
483 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
484 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
485 (enable ? "to" : "from"));
486
487 switch (mask) {
488 case RT_IDX_CAM_HIT:
489 {
490 value = RT_IDX_DST_CAM_Q | /* dest */
491 RT_IDX_TYPE_NICQ | /* type */
492 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
493 break;
494 }
495 case RT_IDX_VALID: /* Promiscuous Mode frames. */
496 {
497 value = RT_IDX_DST_DFLT_Q | /* dest */
498 RT_IDX_TYPE_NICQ | /* type */
499 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
500 break;
501 }
502 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
503 {
504 value = RT_IDX_DST_DFLT_Q | /* dest */
505 RT_IDX_TYPE_NICQ | /* type */
506 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
507 break;
508 }
509 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
510 {
511 value = RT_IDX_DST_DFLT_Q | /* dest */
512 RT_IDX_TYPE_NICQ | /* type */
513 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
514 break;
515 }
516 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
517 {
518 value = RT_IDX_DST_CAM_Q | /* dest */
519 RT_IDX_TYPE_NICQ | /* type */
520 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
521 break;
522 }
523 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
524 {
525 value = RT_IDX_DST_CAM_Q | /* dest */
526 RT_IDX_TYPE_NICQ | /* type */
527 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
528 break;
529 }
530 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
531 {
532 value = RT_IDX_DST_RSS | /* dest */
533 RT_IDX_TYPE_NICQ | /* type */
534 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
535 break;
536 }
537 case 0: /* Clear the E-bit on an entry. */
538 {
539 value = RT_IDX_DST_DFLT_Q | /* dest */
540 RT_IDX_TYPE_NICQ | /* type */
541 (index << RT_IDX_IDX_SHIFT);/* index */
542 break;
543 }
544 default:
545 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
546 mask);
547 status = -EPERM;
548 goto exit;
549 }
550
551 if (value) {
552 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
553 if (status)
554 goto exit;
555 value |= (enable ? RT_IDX_E : 0);
556 ql_write32(qdev, RT_IDX, value);
557 ql_write32(qdev, RT_DATA, enable ? mask : 0);
558 }
559exit:
560 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
561 return status;
562}
563
564static void ql_enable_interrupts(struct ql_adapter *qdev)
565{
566 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
567}
568
569static void ql_disable_interrupts(struct ql_adapter *qdev)
570{
571 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
572}
573
574/* If we're running with multiple MSI-X vectors then we enable on the fly.
575 * Otherwise, we may have multiple outstanding workers and don't want to
576 * enable until the last one finishes. In this case, the irq_cnt gets
577 * incremented everytime we queue a worker and decremented everytime
578 * a worker finishes. Once it hits zero we enable the interrupt.
579 */
580void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
581{
582 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
583 ql_write32(qdev, INTR_EN,
584 qdev->intr_context[intr].intr_en_mask);
585 else {
586 if (qdev->legacy_check)
587 spin_lock(&qdev->legacy_lock);
588 if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
589 QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
590 intr);
591 ql_write32(qdev, INTR_EN,
592 qdev->intr_context[intr].intr_en_mask);
593 } else {
594 QPRINTK(qdev, INTR, ERR,
595 "Skip enable, other queue(s) are active.\n");
596 }
597 if (qdev->legacy_check)
598 spin_unlock(&qdev->legacy_lock);
599 }
600}
601
602static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
603{
604 u32 var = 0;
605
606 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
607 goto exit;
608 else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) {
609 ql_write32(qdev, INTR_EN,
610 qdev->intr_context[intr].intr_dis_mask);
611 var = ql_read32(qdev, STS);
612 }
613 atomic_inc(&qdev->intr_context[intr].irq_cnt);
614exit:
615 return var;
616}
617
618static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
619{
620 int i;
621 for (i = 0; i < qdev->intr_count; i++) {
622 /* The enable call does a atomic_dec_and_test
623 * and enables only if the result is zero.
624 * So we precharge it here.
625 */
626 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
627 ql_enable_completion_interrupt(qdev, i);
628 }
629
630}
631
632int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
633{
634 int status = 0;
635 /* wait for reg to come ready */
636 status = ql_wait_reg_rdy(qdev,
637 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
638 if (status)
639 goto exit;
640 /* set up for reg read */
641 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
642 /* wait for reg to come ready */
643 status = ql_wait_reg_rdy(qdev,
644 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
645 if (status)
646 goto exit;
647 /* get the data */
648 *data = ql_read32(qdev, FLASH_DATA);
649exit:
650 return status;
651}
652
653static int ql_get_flash_params(struct ql_adapter *qdev)
654{
655 int i;
656 int status;
657 u32 *p = (u32 *)&qdev->flash;
658
659 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
660 return -ETIMEDOUT;
661
662 for (i = 0; i < sizeof(qdev->flash) / sizeof(u32); i++, p++) {
663 status = ql_read_flash_word(qdev, i, p);
664 if (status) {
665 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
666 goto exit;
667 }
668
669 }
670exit:
671 ql_sem_unlock(qdev, SEM_FLASH_MASK);
672 return status;
673}
674
675/* xgmac register are located behind the xgmac_addr and xgmac_data
676 * register pair. Each read/write requires us to wait for the ready
677 * bit before reading/writing the data.
678 */
679static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
680{
681 int status;
682 /* wait for reg to come ready */
683 status = ql_wait_reg_rdy(qdev,
684 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
685 if (status)
686 return status;
687 /* write the data to the data reg */
688 ql_write32(qdev, XGMAC_DATA, data);
689 /* trigger the write */
690 ql_write32(qdev, XGMAC_ADDR, reg);
691 return status;
692}
693
694/* xgmac register are located behind the xgmac_addr and xgmac_data
695 * register pair. Each read/write requires us to wait for the ready
696 * bit before reading/writing the data.
697 */
698int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
699{
700 int status = 0;
701 /* wait for reg to come ready */
702 status = ql_wait_reg_rdy(qdev,
703 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
704 if (status)
705 goto exit;
706 /* set up for reg read */
707 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
708 /* wait for reg to come ready */
709 status = ql_wait_reg_rdy(qdev,
710 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
711 if (status)
712 goto exit;
713 /* get the data */
714 *data = ql_read32(qdev, XGMAC_DATA);
715exit:
716 return status;
717}
718
719/* This is used for reading the 64-bit statistics regs. */
720int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
721{
722 int status = 0;
723 u32 hi = 0;
724 u32 lo = 0;
725
726 status = ql_read_xgmac_reg(qdev, reg, &lo);
727 if (status)
728 goto exit;
729
730 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
731 if (status)
732 goto exit;
733
734 *data = (u64) lo | ((u64) hi << 32);
735
736exit:
737 return status;
738}
739
740/* Take the MAC Core out of reset.
741 * Enable statistics counting.
742 * Take the transmitter/receiver out of reset.
743 * This functionality may be done in the MPI firmware at a
744 * later date.
745 */
746static int ql_port_initialize(struct ql_adapter *qdev)
747{
748 int status = 0;
749 u32 data;
750
751 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
752 /* Another function has the semaphore, so
753 * wait for the port init bit to come ready.
754 */
755 QPRINTK(qdev, LINK, INFO,
756 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
757 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
758 if (status) {
759 QPRINTK(qdev, LINK, CRIT,
760 "Port initialize timed out.\n");
761 }
762 return status;
763 }
764
765 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
766 /* Set the core reset. */
767 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
768 if (status)
769 goto end;
770 data |= GLOBAL_CFG_RESET;
771 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
772 if (status)
773 goto end;
774
775 /* Clear the core reset and turn on jumbo for receiver. */
776 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
777 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
778 data |= GLOBAL_CFG_TX_STAT_EN;
779 data |= GLOBAL_CFG_RX_STAT_EN;
780 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
781 if (status)
782 goto end;
783
784 /* Enable transmitter, and clear it's reset. */
785 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
786 if (status)
787 goto end;
788 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
789 data |= TX_CFG_EN; /* Enable the transmitter. */
790 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
791 if (status)
792 goto end;
793
794 /* Enable receiver and clear it's reset. */
795 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
796 if (status)
797 goto end;
798 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
799 data |= RX_CFG_EN; /* Enable the receiver. */
800 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
801 if (status)
802 goto end;
803
804 /* Turn on jumbo. */
805 status =
806 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
807 if (status)
808 goto end;
809 status =
810 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
811 if (status)
812 goto end;
813
814 /* Signal to the world that the port is enabled. */
815 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
816end:
817 ql_sem_unlock(qdev, qdev->xg_sem_mask);
818 return status;
819}
820
821/* Get the next large buffer. */
822struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
823{
824 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
825 rx_ring->lbq_curr_idx++;
826 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
827 rx_ring->lbq_curr_idx = 0;
828 rx_ring->lbq_free_cnt++;
829 return lbq_desc;
830}
831
832/* Get the next small buffer. */
833struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
834{
835 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
836 rx_ring->sbq_curr_idx++;
837 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
838 rx_ring->sbq_curr_idx = 0;
839 rx_ring->sbq_free_cnt++;
840 return sbq_desc;
841}
842
843/* Update an rx ring index. */
844static void ql_update_cq(struct rx_ring *rx_ring)
845{
846 rx_ring->cnsmr_idx++;
847 rx_ring->curr_entry++;
848 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
849 rx_ring->cnsmr_idx = 0;
850 rx_ring->curr_entry = rx_ring->cq_base;
851 }
852}
853
854static void ql_write_cq_idx(struct rx_ring *rx_ring)
855{
856 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
857}
858
859/* Process (refill) a large buffer queue. */
860static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
861{
862 int clean_idx = rx_ring->lbq_clean_idx;
863 struct bq_desc *lbq_desc;
864 struct bq_element *bq;
865 u64 map;
866 int i;
867
868 while (rx_ring->lbq_free_cnt > 16) {
869 for (i = 0; i < 16; i++) {
870 QPRINTK(qdev, RX_STATUS, DEBUG,
871 "lbq: try cleaning clean_idx = %d.\n",
872 clean_idx);
873 lbq_desc = &rx_ring->lbq[clean_idx];
874 bq = lbq_desc->bq;
875 if (lbq_desc->p.lbq_page == NULL) {
876 QPRINTK(qdev, RX_STATUS, DEBUG,
877 "lbq: getting new page for index %d.\n",
878 lbq_desc->index);
879 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
880 if (lbq_desc->p.lbq_page == NULL) {
881 QPRINTK(qdev, RX_STATUS, ERR,
882 "Couldn't get a page.\n");
883 return;
884 }
885 map = pci_map_page(qdev->pdev,
886 lbq_desc->p.lbq_page,
887 0, PAGE_SIZE,
888 PCI_DMA_FROMDEVICE);
889 if (pci_dma_mapping_error(qdev->pdev, map)) {
890 QPRINTK(qdev, RX_STATUS, ERR,
891 "PCI mapping failed.\n");
892 return;
893 }
894 pci_unmap_addr_set(lbq_desc, mapaddr, map);
895 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
896 bq->addr_lo = /*lbq_desc->addr_lo = */
897 cpu_to_le32(map);
898 bq->addr_hi = /*lbq_desc->addr_hi = */
899 cpu_to_le32(map >> 32);
900 }
901 clean_idx++;
902 if (clean_idx == rx_ring->lbq_len)
903 clean_idx = 0;
904 }
905
906 rx_ring->lbq_clean_idx = clean_idx;
907 rx_ring->lbq_prod_idx += 16;
908 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
909 rx_ring->lbq_prod_idx = 0;
910 QPRINTK(qdev, RX_STATUS, DEBUG,
911 "lbq: updating prod idx = %d.\n",
912 rx_ring->lbq_prod_idx);
913 ql_write_db_reg(rx_ring->lbq_prod_idx,
914 rx_ring->lbq_prod_idx_db_reg);
915 rx_ring->lbq_free_cnt -= 16;
916 }
917}
918
919/* Process (refill) a small buffer queue. */
920static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
921{
922 int clean_idx = rx_ring->sbq_clean_idx;
923 struct bq_desc *sbq_desc;
924 struct bq_element *bq;
925 u64 map;
926 int i;
927
928 while (rx_ring->sbq_free_cnt > 16) {
929 for (i = 0; i < 16; i++) {
930 sbq_desc = &rx_ring->sbq[clean_idx];
931 QPRINTK(qdev, RX_STATUS, DEBUG,
932 "sbq: try cleaning clean_idx = %d.\n",
933 clean_idx);
934 bq = sbq_desc->bq;
935 if (sbq_desc->p.skb == NULL) {
936 QPRINTK(qdev, RX_STATUS, DEBUG,
937 "sbq: getting new skb for index %d.\n",
938 sbq_desc->index);
939 sbq_desc->p.skb =
940 netdev_alloc_skb(qdev->ndev,
941 rx_ring->sbq_buf_size);
942 if (sbq_desc->p.skb == NULL) {
943 QPRINTK(qdev, PROBE, ERR,
944 "Couldn't get an skb.\n");
945 rx_ring->sbq_clean_idx = clean_idx;
946 return;
947 }
948 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
949 map = pci_map_single(qdev->pdev,
950 sbq_desc->p.skb->data,
951 rx_ring->sbq_buf_size /
952 2, PCI_DMA_FROMDEVICE);
953 pci_unmap_addr_set(sbq_desc, mapaddr, map);
954 pci_unmap_len_set(sbq_desc, maplen,
955 rx_ring->sbq_buf_size / 2);
956 bq->addr_lo = cpu_to_le32(map);
957 bq->addr_hi = cpu_to_le32(map >> 32);
958 }
959
960 clean_idx++;
961 if (clean_idx == rx_ring->sbq_len)
962 clean_idx = 0;
963 }
964 rx_ring->sbq_clean_idx = clean_idx;
965 rx_ring->sbq_prod_idx += 16;
966 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
967 rx_ring->sbq_prod_idx = 0;
968 QPRINTK(qdev, RX_STATUS, DEBUG,
969 "sbq: updating prod idx = %d.\n",
970 rx_ring->sbq_prod_idx);
971 ql_write_db_reg(rx_ring->sbq_prod_idx,
972 rx_ring->sbq_prod_idx_db_reg);
973
974 rx_ring->sbq_free_cnt -= 16;
975 }
976}
977
978static void ql_update_buffer_queues(struct ql_adapter *qdev,
979 struct rx_ring *rx_ring)
980{
981 ql_update_sbq(qdev, rx_ring);
982 ql_update_lbq(qdev, rx_ring);
983}
984
985/* Unmaps tx buffers. Can be called from send() if a pci mapping
986 * fails at some stage, or from the interrupt when a tx completes.
987 */
988static void ql_unmap_send(struct ql_adapter *qdev,
989 struct tx_ring_desc *tx_ring_desc, int mapped)
990{
991 int i;
992 for (i = 0; i < mapped; i++) {
993 if (i == 0 || (i == 7 && mapped > 7)) {
994 /*
995 * Unmap the skb->data area, or the
996 * external sglist (AKA the Outbound
997 * Address List (OAL)).
998 * If its the zeroeth element, then it's
999 * the skb->data area. If it's the 7th
1000 * element and there is more than 6 frags,
1001 * then its an OAL.
1002 */
1003 if (i == 7) {
1004 QPRINTK(qdev, TX_DONE, DEBUG,
1005 "unmapping OAL area.\n");
1006 }
1007 pci_unmap_single(qdev->pdev,
1008 pci_unmap_addr(&tx_ring_desc->map[i],
1009 mapaddr),
1010 pci_unmap_len(&tx_ring_desc->map[i],
1011 maplen),
1012 PCI_DMA_TODEVICE);
1013 } else {
1014 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1015 i);
1016 pci_unmap_page(qdev->pdev,
1017 pci_unmap_addr(&tx_ring_desc->map[i],
1018 mapaddr),
1019 pci_unmap_len(&tx_ring_desc->map[i],
1020 maplen), PCI_DMA_TODEVICE);
1021 }
1022 }
1023
1024}
1025
1026/* Map the buffers for this transmit. This will return
1027 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1028 */
1029static int ql_map_send(struct ql_adapter *qdev,
1030 struct ob_mac_iocb_req *mac_iocb_ptr,
1031 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1032{
1033 int len = skb_headlen(skb);
1034 dma_addr_t map;
1035 int frag_idx, err, map_idx = 0;
1036 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1037 int frag_cnt = skb_shinfo(skb)->nr_frags;
1038
1039 if (frag_cnt) {
1040 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1041 }
1042 /*
1043 * Map the skb buffer first.
1044 */
1045 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1046
1047 err = pci_dma_mapping_error(qdev->pdev, map);
1048 if (err) {
1049 QPRINTK(qdev, TX_QUEUED, ERR,
1050 "PCI mapping failed with error: %d\n", err);
1051
1052 return NETDEV_TX_BUSY;
1053 }
1054
1055 tbd->len = cpu_to_le32(len);
1056 tbd->addr = cpu_to_le64(map);
1057 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1058 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1059 map_idx++;
1060
1061 /*
1062 * This loop fills the remainder of the 8 address descriptors
1063 * in the IOCB. If there are more than 7 fragments, then the
1064 * eighth address desc will point to an external list (OAL).
1065 * When this happens, the remainder of the frags will be stored
1066 * in this list.
1067 */
1068 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1069 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1070 tbd++;
1071 if (frag_idx == 6 && frag_cnt > 7) {
1072 /* Let's tack on an sglist.
1073 * Our control block will now
1074 * look like this:
1075 * iocb->seg[0] = skb->data
1076 * iocb->seg[1] = frag[0]
1077 * iocb->seg[2] = frag[1]
1078 * iocb->seg[3] = frag[2]
1079 * iocb->seg[4] = frag[3]
1080 * iocb->seg[5] = frag[4]
1081 * iocb->seg[6] = frag[5]
1082 * iocb->seg[7] = ptr to OAL (external sglist)
1083 * oal->seg[0] = frag[6]
1084 * oal->seg[1] = frag[7]
1085 * oal->seg[2] = frag[8]
1086 * oal->seg[3] = frag[9]
1087 * oal->seg[4] = frag[10]
1088 * etc...
1089 */
1090 /* Tack on the OAL in the eighth segment of IOCB. */
1091 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1092 sizeof(struct oal),
1093 PCI_DMA_TODEVICE);
1094 err = pci_dma_mapping_error(qdev->pdev, map);
1095 if (err) {
1096 QPRINTK(qdev, TX_QUEUED, ERR,
1097 "PCI mapping outbound address list with error: %d\n",
1098 err);
1099 goto map_error;
1100 }
1101
1102 tbd->addr = cpu_to_le64(map);
1103 /*
1104 * The length is the number of fragments
1105 * that remain to be mapped times the length
1106 * of our sglist (OAL).
1107 */
1108 tbd->len =
1109 cpu_to_le32((sizeof(struct tx_buf_desc) *
1110 (frag_cnt - frag_idx)) | TX_DESC_C);
1111 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1112 map);
1113 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1114 sizeof(struct oal));
1115 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1116 map_idx++;
1117 }
1118
1119 map =
1120 pci_map_page(qdev->pdev, frag->page,
1121 frag->page_offset, frag->size,
1122 PCI_DMA_TODEVICE);
1123
1124 err = pci_dma_mapping_error(qdev->pdev, map);
1125 if (err) {
1126 QPRINTK(qdev, TX_QUEUED, ERR,
1127 "PCI mapping frags failed with error: %d.\n",
1128 err);
1129 goto map_error;
1130 }
1131
1132 tbd->addr = cpu_to_le64(map);
1133 tbd->len = cpu_to_le32(frag->size);
1134 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1135 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1136 frag->size);
1137
1138 }
1139 /* Save the number of segments we've mapped. */
1140 tx_ring_desc->map_cnt = map_idx;
1141 /* Terminate the last segment. */
1142 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1143 return NETDEV_TX_OK;
1144
1145map_error:
1146 /*
1147 * If the first frag mapping failed, then i will be zero.
1148 * This causes the unmap of the skb->data area. Otherwise
1149 * we pass in the number of frags that mapped successfully
1150 * so they can be umapped.
1151 */
1152 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1153 return NETDEV_TX_BUSY;
1154}
1155
1156void ql_realign_skb(struct sk_buff *skb, int len)
1157{
1158 void *temp_addr = skb->data;
1159
1160 /* Undo the skb_reserve(skb,32) we did before
1161 * giving to hardware, and realign data on
1162 * a 2-byte boundary.
1163 */
1164 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1165 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1166 skb_copy_to_linear_data(skb, temp_addr,
1167 (unsigned int)len);
1168}
1169
1170/*
1171 * This function builds an skb for the given inbound
1172 * completion. It will be rewritten for readability in the near
1173 * future, but for not it works well.
1174 */
1175static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1176 struct rx_ring *rx_ring,
1177 struct ib_mac_iocb_rsp *ib_mac_rsp)
1178{
1179 struct bq_desc *lbq_desc;
1180 struct bq_desc *sbq_desc;
1181 struct sk_buff *skb = NULL;
1182 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1183 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1184
1185 /*
1186 * Handle the header buffer if present.
1187 */
1188 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1189 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1190 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1191 /*
1192 * Headers fit nicely into a small buffer.
1193 */
1194 sbq_desc = ql_get_curr_sbuf(rx_ring);
1195 pci_unmap_single(qdev->pdev,
1196 pci_unmap_addr(sbq_desc, mapaddr),
1197 pci_unmap_len(sbq_desc, maplen),
1198 PCI_DMA_FROMDEVICE);
1199 skb = sbq_desc->p.skb;
1200 ql_realign_skb(skb, hdr_len);
1201 skb_put(skb, hdr_len);
1202 sbq_desc->p.skb = NULL;
1203 }
1204
1205 /*
1206 * Handle the data buffer(s).
1207 */
1208 if (unlikely(!length)) { /* Is there data too? */
1209 QPRINTK(qdev, RX_STATUS, DEBUG,
1210 "No Data buffer in this packet.\n");
1211 return skb;
1212 }
1213
1214 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1215 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1216 QPRINTK(qdev, RX_STATUS, DEBUG,
1217 "Headers in small, data of %d bytes in small, combine them.\n", length);
1218 /*
1219 * Data is less than small buffer size so it's
1220 * stuffed in a small buffer.
1221 * For this case we append the data
1222 * from the "data" small buffer to the "header" small
1223 * buffer.
1224 */
1225 sbq_desc = ql_get_curr_sbuf(rx_ring);
1226 pci_dma_sync_single_for_cpu(qdev->pdev,
1227 pci_unmap_addr
1228 (sbq_desc, mapaddr),
1229 pci_unmap_len
1230 (sbq_desc, maplen),
1231 PCI_DMA_FROMDEVICE);
1232 memcpy(skb_put(skb, length),
1233 sbq_desc->p.skb->data, length);
1234 pci_dma_sync_single_for_device(qdev->pdev,
1235 pci_unmap_addr
1236 (sbq_desc,
1237 mapaddr),
1238 pci_unmap_len
1239 (sbq_desc,
1240 maplen),
1241 PCI_DMA_FROMDEVICE);
1242 } else {
1243 QPRINTK(qdev, RX_STATUS, DEBUG,
1244 "%d bytes in a single small buffer.\n", length);
1245 sbq_desc = ql_get_curr_sbuf(rx_ring);
1246 skb = sbq_desc->p.skb;
1247 ql_realign_skb(skb, length);
1248 skb_put(skb, length);
1249 pci_unmap_single(qdev->pdev,
1250 pci_unmap_addr(sbq_desc,
1251 mapaddr),
1252 pci_unmap_len(sbq_desc,
1253 maplen),
1254 PCI_DMA_FROMDEVICE);
1255 sbq_desc->p.skb = NULL;
1256 }
1257 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1258 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1259 QPRINTK(qdev, RX_STATUS, DEBUG,
1260 "Header in small, %d bytes in large. Chain large to small!\n", length);
1261 /*
1262 * The data is in a single large buffer. We
1263 * chain it to the header buffer's skb and let
1264 * it rip.
1265 */
1266 lbq_desc = ql_get_curr_lbuf(rx_ring);
1267 pci_unmap_page(qdev->pdev,
1268 pci_unmap_addr(lbq_desc,
1269 mapaddr),
1270 pci_unmap_len(lbq_desc, maplen),
1271 PCI_DMA_FROMDEVICE);
1272 QPRINTK(qdev, RX_STATUS, DEBUG,
1273 "Chaining page to skb.\n");
1274 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1275 0, length);
1276 skb->len += length;
1277 skb->data_len += length;
1278 skb->truesize += length;
1279 lbq_desc->p.lbq_page = NULL;
1280 } else {
1281 /*
1282 * The headers and data are in a single large buffer. We
1283 * copy it to a new skb and let it go. This can happen with
1284 * jumbo mtu on a non-TCP/UDP frame.
1285 */
1286 lbq_desc = ql_get_curr_lbuf(rx_ring);
1287 skb = netdev_alloc_skb(qdev->ndev, length);
1288 if (skb == NULL) {
1289 QPRINTK(qdev, PROBE, DEBUG,
1290 "No skb available, drop the packet.\n");
1291 return NULL;
1292 }
1293 skb_reserve(skb, NET_IP_ALIGN);
1294 QPRINTK(qdev, RX_STATUS, DEBUG,
1295 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1296 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page,
1297 0, length);
1298 skb->len += length;
1299 skb->data_len += length;
1300 skb->truesize += length;
1301 length -= length;
1302 lbq_desc->p.lbq_page = NULL;
1303 __pskb_pull_tail(skb,
1304 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1305 VLAN_ETH_HLEN : ETH_HLEN);
1306 }
1307 } else {
1308 /*
1309 * The data is in a chain of large buffers
1310 * pointed to by a small buffer. We loop
1311 * thru and chain them to the our small header
1312 * buffer's skb.
1313 * frags: There are 18 max frags and our small
1314 * buffer will hold 32 of them. The thing is,
1315 * we'll use 3 max for our 9000 byte jumbo
1316 * frames. If the MTU goes up we could
1317 * eventually be in trouble.
1318 */
1319 int size, offset, i = 0;
1320 struct bq_element *bq, bq_array[8];
1321 sbq_desc = ql_get_curr_sbuf(rx_ring);
1322 pci_unmap_single(qdev->pdev,
1323 pci_unmap_addr(sbq_desc, mapaddr),
1324 pci_unmap_len(sbq_desc, maplen),
1325 PCI_DMA_FROMDEVICE);
1326 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1327 /*
1328 * This is an non TCP/UDP IP frame, so
1329 * the headers aren't split into a small
1330 * buffer. We have to use the small buffer
1331 * that contains our sg list as our skb to
1332 * send upstairs. Copy the sg list here to
1333 * a local buffer and use it to find the
1334 * pages to chain.
1335 */
1336 QPRINTK(qdev, RX_STATUS, DEBUG,
1337 "%d bytes of headers & data in chain of large.\n", length);
1338 skb = sbq_desc->p.skb;
1339 bq = &bq_array[0];
1340 memcpy(bq, skb->data, sizeof(bq_array));
1341 sbq_desc->p.skb = NULL;
1342 skb_reserve(skb, NET_IP_ALIGN);
1343 } else {
1344 QPRINTK(qdev, RX_STATUS, DEBUG,
1345 "Headers in small, %d bytes of data in chain of large.\n", length);
1346 bq = (struct bq_element *)sbq_desc->p.skb->data;
1347 }
1348 while (length > 0) {
1349 lbq_desc = ql_get_curr_lbuf(rx_ring);
1350 if ((bq->addr_lo & ~BQ_MASK) != lbq_desc->bq->addr_lo) {
1351 QPRINTK(qdev, RX_STATUS, ERR,
1352 "Panic!!! bad large buffer address, expected 0x%.08x, got 0x%.08x.\n",
1353 lbq_desc->bq->addr_lo, bq->addr_lo);
1354 return NULL;
1355 }
1356 pci_unmap_page(qdev->pdev,
1357 pci_unmap_addr(lbq_desc,
1358 mapaddr),
1359 pci_unmap_len(lbq_desc,
1360 maplen),
1361 PCI_DMA_FROMDEVICE);
1362 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1363 offset = 0;
1364
1365 QPRINTK(qdev, RX_STATUS, DEBUG,
1366 "Adding page %d to skb for %d bytes.\n",
1367 i, size);
1368 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page,
1369 offset, size);
1370 skb->len += size;
1371 skb->data_len += size;
1372 skb->truesize += size;
1373 length -= size;
1374 lbq_desc->p.lbq_page = NULL;
1375 bq++;
1376 i++;
1377 }
1378 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1379 VLAN_ETH_HLEN : ETH_HLEN);
1380 }
1381 return skb;
1382}
1383
1384/* Process an inbound completion from an rx ring. */
1385static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1386 struct rx_ring *rx_ring,
1387 struct ib_mac_iocb_rsp *ib_mac_rsp)
1388{
1389 struct net_device *ndev = qdev->ndev;
1390 struct sk_buff *skb = NULL;
1391
1392 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1393
1394 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1395 if (unlikely(!skb)) {
1396 QPRINTK(qdev, RX_STATUS, DEBUG,
1397 "No skb available, drop packet.\n");
1398 return;
1399 }
1400
1401 prefetch(skb->data);
1402 skb->dev = ndev;
1403 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1404 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1405 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1406 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1407 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1408 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1409 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1410 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1411 }
1412 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1413 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1414 }
1415 if (ib_mac_rsp->flags1 & (IB_MAC_IOCB_RSP_IE | IB_MAC_IOCB_RSP_TE)) {
1416 QPRINTK(qdev, RX_STATUS, ERR,
1417 "Bad checksum for this %s packet.\n",
1418 ((ib_mac_rsp->
1419 flags2 & IB_MAC_IOCB_RSP_T) ? "TCP" : "UDP"));
1420 skb->ip_summed = CHECKSUM_NONE;
1421 } else if (qdev->rx_csum &&
1422 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ||
1423 ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1424 !(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU)))) {
1425 QPRINTK(qdev, RX_STATUS, DEBUG, "RX checksum done!\n");
1426 skb->ip_summed = CHECKSUM_UNNECESSARY;
1427 }
1428 qdev->stats.rx_packets++;
1429 qdev->stats.rx_bytes += skb->len;
1430 skb->protocol = eth_type_trans(skb, ndev);
1431 if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
1432 QPRINTK(qdev, RX_STATUS, DEBUG,
1433 "Passing a VLAN packet upstream.\n");
1434 vlan_hwaccel_rx(skb, qdev->vlgrp,
1435 le16_to_cpu(ib_mac_rsp->vlan_id));
1436 } else {
1437 QPRINTK(qdev, RX_STATUS, DEBUG,
1438 "Passing a normal packet upstream.\n");
1439 netif_rx(skb);
1440 }
1441 ndev->last_rx = jiffies;
1442}
1443
1444/* Process an outbound completion from an rx ring. */
1445static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1446 struct ob_mac_iocb_rsp *mac_rsp)
1447{
1448 struct tx_ring *tx_ring;
1449 struct tx_ring_desc *tx_ring_desc;
1450
1451 QL_DUMP_OB_MAC_RSP(mac_rsp);
1452 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1453 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1454 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1455 qdev->stats.tx_bytes += tx_ring_desc->map_cnt;
1456 qdev->stats.tx_packets++;
1457 dev_kfree_skb(tx_ring_desc->skb);
1458 tx_ring_desc->skb = NULL;
1459
1460 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
1461 OB_MAC_IOCB_RSP_S |
1462 OB_MAC_IOCB_RSP_L |
1463 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
1464 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
1465 QPRINTK(qdev, TX_DONE, WARNING,
1466 "Total descriptor length did not match transfer length.\n");
1467 }
1468 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
1469 QPRINTK(qdev, TX_DONE, WARNING,
1470 "Frame too short to be legal, not sent.\n");
1471 }
1472 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
1473 QPRINTK(qdev, TX_DONE, WARNING,
1474 "Frame too long, but sent anyway.\n");
1475 }
1476 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
1477 QPRINTK(qdev, TX_DONE, WARNING,
1478 "PCI backplane error. Frame not sent.\n");
1479 }
1480 }
1481 atomic_inc(&tx_ring->tx_count);
1482}
1483
1484/* Fire up a handler to reset the MPI processor. */
1485void ql_queue_fw_error(struct ql_adapter *qdev)
1486{
1487 netif_stop_queue(qdev->ndev);
1488 netif_carrier_off(qdev->ndev);
1489 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1490}
1491
1492void ql_queue_asic_error(struct ql_adapter *qdev)
1493{
1494 netif_stop_queue(qdev->ndev);
1495 netif_carrier_off(qdev->ndev);
1496 ql_disable_interrupts(qdev);
1497 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
1498}
1499
1500static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
1501 struct ib_ae_iocb_rsp *ib_ae_rsp)
1502{
1503 switch (ib_ae_rsp->event) {
1504 case MGMT_ERR_EVENT:
1505 QPRINTK(qdev, RX_ERR, ERR,
1506 "Management Processor Fatal Error.\n");
1507 ql_queue_fw_error(qdev);
1508 return;
1509
1510 case CAM_LOOKUP_ERR_EVENT:
1511 QPRINTK(qdev, LINK, ERR,
1512 "Multiple CAM hits lookup occurred.\n");
1513 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
1514 ql_queue_asic_error(qdev);
1515 return;
1516
1517 case SOFT_ECC_ERROR_EVENT:
1518 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
1519 ql_queue_asic_error(qdev);
1520 break;
1521
1522 case PCI_ERR_ANON_BUF_RD:
1523 QPRINTK(qdev, RX_ERR, ERR,
1524 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
1525 ib_ae_rsp->q_id);
1526 ql_queue_asic_error(qdev);
1527 break;
1528
1529 default:
1530 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
1531 ib_ae_rsp->event);
1532 ql_queue_asic_error(qdev);
1533 break;
1534 }
1535}
1536
1537static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
1538{
1539 struct ql_adapter *qdev = rx_ring->qdev;
1540 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1541 struct ob_mac_iocb_rsp *net_rsp = NULL;
1542 int count = 0;
1543
1544 /* While there are entries in the completion queue. */
1545 while (prod != rx_ring->cnsmr_idx) {
1546
1547 QPRINTK(qdev, RX_STATUS, DEBUG,
1548 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1549 prod, rx_ring->cnsmr_idx);
1550
1551 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
1552 rmb();
1553 switch (net_rsp->opcode) {
1554
1555 case OPCODE_OB_MAC_TSO_IOCB:
1556 case OPCODE_OB_MAC_IOCB:
1557 ql_process_mac_tx_intr(qdev, net_rsp);
1558 break;
1559 default:
1560 QPRINTK(qdev, RX_STATUS, DEBUG,
1561 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1562 net_rsp->opcode);
1563 }
1564 count++;
1565 ql_update_cq(rx_ring);
1566 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1567 }
1568 ql_write_cq_idx(rx_ring);
1569 if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) {
1570 struct tx_ring *tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
1571 if (atomic_read(&tx_ring->queue_stopped) &&
1572 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
1573 /*
1574 * The queue got stopped because the tx_ring was full.
1575 * Wake it up, because it's now at least 25% empty.
1576 */
1577 netif_wake_queue(qdev->ndev);
1578 }
1579
1580 return count;
1581}
1582
1583static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
1584{
1585 struct ql_adapter *qdev = rx_ring->qdev;
1586 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1587 struct ql_net_rsp_iocb *net_rsp;
1588 int count = 0;
1589
1590 /* While there are entries in the completion queue. */
1591 while (prod != rx_ring->cnsmr_idx) {
1592
1593 QPRINTK(qdev, RX_STATUS, DEBUG,
1594 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
1595 prod, rx_ring->cnsmr_idx);
1596
1597 net_rsp = rx_ring->curr_entry;
1598 rmb();
1599 switch (net_rsp->opcode) {
1600 case OPCODE_IB_MAC_IOCB:
1601 ql_process_mac_rx_intr(qdev, rx_ring,
1602 (struct ib_mac_iocb_rsp *)
1603 net_rsp);
1604 break;
1605
1606 case OPCODE_IB_AE_IOCB:
1607 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
1608 net_rsp);
1609 break;
1610 default:
1611 {
1612 QPRINTK(qdev, RX_STATUS, DEBUG,
1613 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
1614 net_rsp->opcode);
1615 }
1616 }
1617 count++;
1618 ql_update_cq(rx_ring);
1619 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
1620 if (count == budget)
1621 break;
1622 }
1623 ql_update_buffer_queues(qdev, rx_ring);
1624 ql_write_cq_idx(rx_ring);
1625 return count;
1626}
1627
1628static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1629{
1630 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
1631 struct ql_adapter *qdev = rx_ring->qdev;
1632 int work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
1633
1634 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
1635 rx_ring->cq_id);
1636
1637 if (work_done < budget) {
1638 __netif_rx_complete(qdev->ndev, napi);
1639 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1640 }
1641 return work_done;
1642}
1643
1644static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1645{
1646 struct ql_adapter *qdev = netdev_priv(ndev);
1647
1648 qdev->vlgrp = grp;
1649 if (grp) {
1650 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
1651 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
1652 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
1653 } else {
1654 QPRINTK(qdev, IFUP, DEBUG,
1655 "Turning off VLAN in NIC_RCV_CFG.\n");
1656 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
1657 }
1658}
1659
1660static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1661{
1662 struct ql_adapter *qdev = netdev_priv(ndev);
1663 u32 enable_bit = MAC_ADDR_E;
1664
1665 spin_lock(&qdev->hw_lock);
1666 if (ql_set_mac_addr_reg
1667 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1668 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
1669 }
1670 spin_unlock(&qdev->hw_lock);
1671}
1672
1673static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1674{
1675 struct ql_adapter *qdev = netdev_priv(ndev);
1676 u32 enable_bit = 0;
1677
1678 spin_lock(&qdev->hw_lock);
1679 if (ql_set_mac_addr_reg
1680 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
1681 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
1682 }
1683 spin_unlock(&qdev->hw_lock);
1684
1685}
1686
1687/* Worker thread to process a given rx_ring that is dedicated
1688 * to outbound completions.
1689 */
1690static void ql_tx_clean(struct work_struct *work)
1691{
1692 struct rx_ring *rx_ring =
1693 container_of(work, struct rx_ring, rx_work.work);
1694 ql_clean_outbound_rx_ring(rx_ring);
1695 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1696
1697}
1698
1699/* Worker thread to process a given rx_ring that is dedicated
1700 * to inbound completions.
1701 */
1702static void ql_rx_clean(struct work_struct *work)
1703{
1704 struct rx_ring *rx_ring =
1705 container_of(work, struct rx_ring, rx_work.work);
1706 ql_clean_inbound_rx_ring(rx_ring, 64);
1707 ql_enable_completion_interrupt(rx_ring->qdev, rx_ring->irq);
1708}
1709
1710/* MSI-X Multiple Vector Interrupt Handler for outbound completions. */
1711static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1712{
1713 struct rx_ring *rx_ring = dev_id;
1714 queue_delayed_work_on(rx_ring->cpu, rx_ring->qdev->q_workqueue,
1715 &rx_ring->rx_work, 0);
1716 return IRQ_HANDLED;
1717}
1718
1719/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
1720static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1721{
1722 struct rx_ring *rx_ring = dev_id;
1723 struct ql_adapter *qdev = rx_ring->qdev;
1724 netif_rx_schedule(qdev->ndev, &rx_ring->napi);
1725 return IRQ_HANDLED;
1726}
1727
1728/* We check here to see if we're already handling a legacy
1729 * interrupt. If we are, then it must belong to another
1730 * chip with which we're sharing the interrupt line.
1731 */
1732int ql_legacy_check(struct ql_adapter *qdev)
1733{
1734 int err;
1735 spin_lock(&qdev->legacy_lock);
1736 err = atomic_read(&qdev->intr_context[0].irq_cnt);
1737 spin_unlock(&qdev->legacy_lock);
1738 return err;
1739}
1740
1741/* This handles a fatal error, MPI activity, and the default
1742 * rx_ring in an MSI-X multiple vector environment.
1743 * In MSI/Legacy environment it also process the rest of
1744 * the rx_rings.
1745 */
1746static irqreturn_t qlge_isr(int irq, void *dev_id)
1747{
1748 struct rx_ring *rx_ring = dev_id;
1749 struct ql_adapter *qdev = rx_ring->qdev;
1750 struct intr_context *intr_context = &qdev->intr_context[0];
1751 u32 var;
1752 int i;
1753 int work_done = 0;
1754
1755 if (qdev->legacy_check && qdev->legacy_check(qdev)) {
1756 QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n");
1757 return IRQ_NONE; /* Not our interrupt */
1758 }
1759
1760 var = ql_read32(qdev, STS);
1761
1762 /*
1763 * Check for fatal error.
1764 */
1765 if (var & STS_FE) {
1766 ql_queue_asic_error(qdev);
1767 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
1768 var = ql_read32(qdev, ERR_STS);
1769 QPRINTK(qdev, INTR, ERR,
1770 "Resetting chip. Error Status Register = 0x%x\n", var);
1771 return IRQ_HANDLED;
1772 }
1773
1774 /*
1775 * Check MPI processor activity.
1776 */
1777 if (var & STS_PI) {
1778 /*
1779 * We've got an async event or mailbox completion.
1780 * Handle it and clear the source of the interrupt.
1781 */
1782 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
1783 ql_disable_completion_interrupt(qdev, intr_context->intr);
1784 queue_delayed_work_on(smp_processor_id(), qdev->workqueue,
1785 &qdev->mpi_work, 0);
1786 work_done++;
1787 }
1788
1789 /*
1790 * Check the default queue and wake handler if active.
1791 */
1792 rx_ring = &qdev->rx_ring[0];
1793 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) {
1794 QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n");
1795 ql_disable_completion_interrupt(qdev, intr_context->intr);
1796 queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue,
1797 &rx_ring->rx_work, 0);
1798 work_done++;
1799 }
1800
1801 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
1802 /*
1803 * Start the DPC for each active queue.
1804 */
1805 for (i = 1; i < qdev->rx_ring_count; i++) {
1806 rx_ring = &qdev->rx_ring[i];
1807 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
1808 rx_ring->cnsmr_idx) {
1809 QPRINTK(qdev, INTR, INFO,
1810 "Waking handler for rx_ring[%d].\n", i);
1811 ql_disable_completion_interrupt(qdev,
1812 intr_context->
1813 intr);
1814 if (i < qdev->rss_ring_first_cq_id)
1815 queue_delayed_work_on(rx_ring->cpu,
1816 qdev->q_workqueue,
1817 &rx_ring->rx_work,
1818 0);
1819 else
1820 netif_rx_schedule(qdev->ndev,
1821 &rx_ring->napi);
1822 work_done++;
1823 }
1824 }
1825 }
1826 return work_done ? IRQ_HANDLED : IRQ_NONE;
1827}
1828
1829static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1830{
1831
1832 if (skb_is_gso(skb)) {
1833 int err;
1834 if (skb_header_cloned(skb)) {
1835 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1836 if (err)
1837 return err;
1838 }
1839
1840 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1841 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
1842 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1843 mac_iocb_ptr->total_hdrs_len =
1844 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
1845 mac_iocb_ptr->net_trans_offset =
1846 cpu_to_le16(skb_network_offset(skb) |
1847 skb_transport_offset(skb)
1848 << OB_MAC_TRANSPORT_HDR_SHIFT);
1849 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1850 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
1851 if (likely(skb->protocol == htons(ETH_P_IP))) {
1852 struct iphdr *iph = ip_hdr(skb);
1853 iph->check = 0;
1854 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1855 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1856 iph->daddr, 0,
1857 IPPROTO_TCP,
1858 0);
1859 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1860 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
1861 tcp_hdr(skb)->check =
1862 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1863 &ipv6_hdr(skb)->daddr,
1864 0, IPPROTO_TCP, 0);
1865 }
1866 return 1;
1867 }
1868 return 0;
1869}
1870
1871static void ql_hw_csum_setup(struct sk_buff *skb,
1872 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
1873{
1874 int len;
1875 struct iphdr *iph = ip_hdr(skb);
1876 u16 *check;
1877 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
1878 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
1879 mac_iocb_ptr->net_trans_offset =
1880 cpu_to_le16(skb_network_offset(skb) |
1881 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
1882
1883 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
1884 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
1885 if (likely(iph->protocol == IPPROTO_TCP)) {
1886 check = &(tcp_hdr(skb)->check);
1887 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
1888 mac_iocb_ptr->total_hdrs_len =
1889 cpu_to_le16(skb_transport_offset(skb) +
1890 (tcp_hdr(skb)->doff << 2));
1891 } else {
1892 check = &(udp_hdr(skb)->check);
1893 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
1894 mac_iocb_ptr->total_hdrs_len =
1895 cpu_to_le16(skb_transport_offset(skb) +
1896 sizeof(struct udphdr));
1897 }
1898 *check = ~csum_tcpudp_magic(iph->saddr,
1899 iph->daddr, len, iph->protocol, 0);
1900}
1901
1902static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
1903{
1904 struct tx_ring_desc *tx_ring_desc;
1905 struct ob_mac_iocb_req *mac_iocb_ptr;
1906 struct ql_adapter *qdev = netdev_priv(ndev);
1907 int tso;
1908 struct tx_ring *tx_ring;
1909 u32 tx_ring_idx = (u32) QL_TXQ_IDX(qdev, skb);
1910
1911 tx_ring = &qdev->tx_ring[tx_ring_idx];
1912
1913 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
1914 QPRINTK(qdev, TX_QUEUED, INFO,
1915 "%s: shutting down tx queue %d du to lack of resources.\n",
1916 __func__, tx_ring_idx);
1917 netif_stop_queue(ndev);
1918 atomic_inc(&tx_ring->queue_stopped);
1919 return NETDEV_TX_BUSY;
1920 }
1921 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
1922 mac_iocb_ptr = tx_ring_desc->queue_entry;
1923 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
1924 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
1925 QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
1926 return NETDEV_TX_BUSY;
1927 }
1928
1929 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
1930 mac_iocb_ptr->tid = tx_ring_desc->index;
1931 /* We use the upper 32-bits to store the tx queue for this IO.
1932 * When we get the completion we can use it to establish the context.
1933 */
1934 mac_iocb_ptr->txq_idx = tx_ring_idx;
1935 tx_ring_desc->skb = skb;
1936
1937 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
1938
1939 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
1940 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
1941 vlan_tx_tag_get(skb));
1942 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
1943 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
1944 }
1945 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1946 if (tso < 0) {
1947 dev_kfree_skb_any(skb);
1948 return NETDEV_TX_OK;
1949 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
1950 ql_hw_csum_setup(skb,
1951 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
1952 }
1953 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
1954 tx_ring->prod_idx++;
1955 if (tx_ring->prod_idx == tx_ring->wq_len)
1956 tx_ring->prod_idx = 0;
1957 wmb();
1958
1959 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
1960 ndev->trans_start = jiffies;
1961 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
1962 tx_ring->prod_idx, skb->len);
1963
1964 atomic_dec(&tx_ring->tx_count);
1965 return NETDEV_TX_OK;
1966}
1967
1968static void ql_free_shadow_space(struct ql_adapter *qdev)
1969{
1970 if (qdev->rx_ring_shadow_reg_area) {
1971 pci_free_consistent(qdev->pdev,
1972 PAGE_SIZE,
1973 qdev->rx_ring_shadow_reg_area,
1974 qdev->rx_ring_shadow_reg_dma);
1975 qdev->rx_ring_shadow_reg_area = NULL;
1976 }
1977 if (qdev->tx_ring_shadow_reg_area) {
1978 pci_free_consistent(qdev->pdev,
1979 PAGE_SIZE,
1980 qdev->tx_ring_shadow_reg_area,
1981 qdev->tx_ring_shadow_reg_dma);
1982 qdev->tx_ring_shadow_reg_area = NULL;
1983 }
1984}
1985
1986static int ql_alloc_shadow_space(struct ql_adapter *qdev)
1987{
1988 qdev->rx_ring_shadow_reg_area =
1989 pci_alloc_consistent(qdev->pdev,
1990 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
1991 if (qdev->rx_ring_shadow_reg_area == NULL) {
1992 QPRINTK(qdev, IFUP, ERR,
1993 "Allocation of RX shadow space failed.\n");
1994 return -ENOMEM;
1995 }
1996 qdev->tx_ring_shadow_reg_area =
1997 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
1998 &qdev->tx_ring_shadow_reg_dma);
1999 if (qdev->tx_ring_shadow_reg_area == NULL) {
2000 QPRINTK(qdev, IFUP, ERR,
2001 "Allocation of TX shadow space failed.\n");
2002 goto err_wqp_sh_area;
2003 }
2004 return 0;
2005
2006err_wqp_sh_area:
2007 pci_free_consistent(qdev->pdev,
2008 PAGE_SIZE,
2009 qdev->rx_ring_shadow_reg_area,
2010 qdev->rx_ring_shadow_reg_dma);
2011 return -ENOMEM;
2012}
2013
2014static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2015{
2016 struct tx_ring_desc *tx_ring_desc;
2017 int i;
2018 struct ob_mac_iocb_req *mac_iocb_ptr;
2019
2020 mac_iocb_ptr = tx_ring->wq_base;
2021 tx_ring_desc = tx_ring->q;
2022 for (i = 0; i < tx_ring->wq_len; i++) {
2023 tx_ring_desc->index = i;
2024 tx_ring_desc->skb = NULL;
2025 tx_ring_desc->queue_entry = mac_iocb_ptr;
2026 mac_iocb_ptr++;
2027 tx_ring_desc++;
2028 }
2029 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2030 atomic_set(&tx_ring->queue_stopped, 0);
2031}
2032
2033static void ql_free_tx_resources(struct ql_adapter *qdev,
2034 struct tx_ring *tx_ring)
2035{
2036 if (tx_ring->wq_base) {
2037 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2038 tx_ring->wq_base, tx_ring->wq_base_dma);
2039 tx_ring->wq_base = NULL;
2040 }
2041 kfree(tx_ring->q);
2042 tx_ring->q = NULL;
2043}
2044
2045static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2046 struct tx_ring *tx_ring)
2047{
2048 tx_ring->wq_base =
2049 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2050 &tx_ring->wq_base_dma);
2051
2052 if ((tx_ring->wq_base == NULL)
2053 || tx_ring->wq_base_dma & (tx_ring->wq_size - 1)) {
2054 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2055 return -ENOMEM;
2056 }
2057 tx_ring->q =
2058 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2059 if (tx_ring->q == NULL)
2060 goto err;
2061
2062 return 0;
2063err:
2064 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2065 tx_ring->wq_base, tx_ring->wq_base_dma);
2066 return -ENOMEM;
2067}
2068
2069void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2070{
2071 int i;
2072 struct bq_desc *lbq_desc;
2073
2074 for (i = 0; i < rx_ring->lbq_len; i++) {
2075 lbq_desc = &rx_ring->lbq[i];
2076 if (lbq_desc->p.lbq_page) {
2077 pci_unmap_page(qdev->pdev,
2078 pci_unmap_addr(lbq_desc, mapaddr),
2079 pci_unmap_len(lbq_desc, maplen),
2080 PCI_DMA_FROMDEVICE);
2081
2082 put_page(lbq_desc->p.lbq_page);
2083 lbq_desc->p.lbq_page = NULL;
2084 }
2085 lbq_desc->bq->addr_lo = 0;
2086 lbq_desc->bq->addr_hi = 0;
2087 }
2088}
2089
2090/*
2091 * Allocate and map a page for each element of the lbq.
2092 */
2093static int ql_alloc_lbq_buffers(struct ql_adapter *qdev,
2094 struct rx_ring *rx_ring)
2095{
2096 int i;
2097 struct bq_desc *lbq_desc;
2098 u64 map;
2099 struct bq_element *bq = rx_ring->lbq_base;
2100
2101 for (i = 0; i < rx_ring->lbq_len; i++) {
2102 lbq_desc = &rx_ring->lbq[i];
2103 memset(lbq_desc, 0, sizeof(lbq_desc));
2104 lbq_desc->bq = bq;
2105 lbq_desc->index = i;
2106 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
2107 if (unlikely(!lbq_desc->p.lbq_page)) {
2108 QPRINTK(qdev, IFUP, ERR, "failed alloc_page().\n");
2109 goto mem_error;
2110 } else {
2111 map = pci_map_page(qdev->pdev,
2112 lbq_desc->p.lbq_page,
2113 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
2114 if (pci_dma_mapping_error(qdev->pdev, map)) {
2115 QPRINTK(qdev, IFUP, ERR,
2116 "PCI mapping failed.\n");
2117 goto mem_error;
2118 }
2119 pci_unmap_addr_set(lbq_desc, mapaddr, map);
2120 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE);
2121 bq->addr_lo = cpu_to_le32(map);
2122 bq->addr_hi = cpu_to_le32(map >> 32);
2123 }
2124 bq++;
2125 }
2126 return 0;
2127mem_error:
2128 ql_free_lbq_buffers(qdev, rx_ring);
2129 return -ENOMEM;
2130}
2131
2132void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2133{
2134 int i;
2135 struct bq_desc *sbq_desc;
2136
2137 for (i = 0; i < rx_ring->sbq_len; i++) {
2138 sbq_desc = &rx_ring->sbq[i];
2139 if (sbq_desc == NULL) {
2140 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2141 return;
2142 }
2143 if (sbq_desc->p.skb) {
2144 pci_unmap_single(qdev->pdev,
2145 pci_unmap_addr(sbq_desc, mapaddr),
2146 pci_unmap_len(sbq_desc, maplen),
2147 PCI_DMA_FROMDEVICE);
2148 dev_kfree_skb(sbq_desc->p.skb);
2149 sbq_desc->p.skb = NULL;
2150 }
2151 if (sbq_desc->bq == NULL) {
2152 QPRINTK(qdev, IFUP, ERR, "sbq_desc->bq %d is NULL.\n",
2153 i);
2154 return;
2155 }
2156 sbq_desc->bq->addr_lo = 0;
2157 sbq_desc->bq->addr_hi = 0;
2158 }
2159}
2160
2161/* Allocate and map an skb for each element of the sbq. */
2162static int ql_alloc_sbq_buffers(struct ql_adapter *qdev,
2163 struct rx_ring *rx_ring)
2164{
2165 int i;
2166 struct bq_desc *sbq_desc;
2167 struct sk_buff *skb;
2168 u64 map;
2169 struct bq_element *bq = rx_ring->sbq_base;
2170
2171 for (i = 0; i < rx_ring->sbq_len; i++) {
2172 sbq_desc = &rx_ring->sbq[i];
2173 memset(sbq_desc, 0, sizeof(sbq_desc));
2174 sbq_desc->index = i;
2175 sbq_desc->bq = bq;
2176 skb = netdev_alloc_skb(qdev->ndev, rx_ring->sbq_buf_size);
2177 if (unlikely(!skb)) {
2178 /* Better luck next round */
2179 QPRINTK(qdev, IFUP, ERR,
2180 "small buff alloc failed for %d bytes at index %d.\n",
2181 rx_ring->sbq_buf_size, i);
2182 goto mem_err;
2183 }
2184 skb_reserve(skb, QLGE_SB_PAD);
2185 sbq_desc->p.skb = skb;
2186 /*
2187 * Map only half the buffer. Because the
2188 * other half may get some data copied to it
2189 * when the completion arrives.
2190 */
2191 map = pci_map_single(qdev->pdev,
2192 skb->data,
2193 rx_ring->sbq_buf_size / 2,
2194 PCI_DMA_FROMDEVICE);
2195 if (pci_dma_mapping_error(qdev->pdev, map)) {
2196 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
2197 goto mem_err;
2198 }
2199 pci_unmap_addr_set(sbq_desc, mapaddr, map);
2200 pci_unmap_len_set(sbq_desc, maplen, rx_ring->sbq_buf_size / 2);
2201 bq->addr_lo = /*sbq_desc->addr_lo = */
2202 cpu_to_le32(map);
2203 bq->addr_hi = /*sbq_desc->addr_hi = */
2204 cpu_to_le32(map >> 32);
2205 bq++;
2206 }
2207 return 0;
2208mem_err:
2209 ql_free_sbq_buffers(qdev, rx_ring);
2210 return -ENOMEM;
2211}
2212
2213static void ql_free_rx_resources(struct ql_adapter *qdev,
2214 struct rx_ring *rx_ring)
2215{
2216 if (rx_ring->sbq_len)
2217 ql_free_sbq_buffers(qdev, rx_ring);
2218 if (rx_ring->lbq_len)
2219 ql_free_lbq_buffers(qdev, rx_ring);
2220
2221 /* Free the small buffer queue. */
2222 if (rx_ring->sbq_base) {
2223 pci_free_consistent(qdev->pdev,
2224 rx_ring->sbq_size,
2225 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2226 rx_ring->sbq_base = NULL;
2227 }
2228
2229 /* Free the small buffer queue control blocks. */
2230 kfree(rx_ring->sbq);
2231 rx_ring->sbq = NULL;
2232
2233 /* Free the large buffer queue. */
2234 if (rx_ring->lbq_base) {
2235 pci_free_consistent(qdev->pdev,
2236 rx_ring->lbq_size,
2237 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2238 rx_ring->lbq_base = NULL;
2239 }
2240
2241 /* Free the large buffer queue control blocks. */
2242 kfree(rx_ring->lbq);
2243 rx_ring->lbq = NULL;
2244
2245 /* Free the rx queue. */
2246 if (rx_ring->cq_base) {
2247 pci_free_consistent(qdev->pdev,
2248 rx_ring->cq_size,
2249 rx_ring->cq_base, rx_ring->cq_base_dma);
2250 rx_ring->cq_base = NULL;
2251 }
2252}
2253
2254/* Allocate queues and buffers for this completions queue based
2255 * on the values in the parameter structure. */
2256static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2257 struct rx_ring *rx_ring)
2258{
2259
2260 /*
2261 * Allocate the completion queue for this rx_ring.
2262 */
2263 rx_ring->cq_base =
2264 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2265 &rx_ring->cq_base_dma);
2266
2267 if (rx_ring->cq_base == NULL) {
2268 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2269 return -ENOMEM;
2270 }
2271
2272 if (rx_ring->sbq_len) {
2273 /*
2274 * Allocate small buffer queue.
2275 */
2276 rx_ring->sbq_base =
2277 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2278 &rx_ring->sbq_base_dma);
2279
2280 if (rx_ring->sbq_base == NULL) {
2281 QPRINTK(qdev, IFUP, ERR,
2282 "Small buffer queue allocation failed.\n");
2283 goto err_mem;
2284 }
2285
2286 /*
2287 * Allocate small buffer queue control blocks.
2288 */
2289 rx_ring->sbq =
2290 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2291 GFP_KERNEL);
2292 if (rx_ring->sbq == NULL) {
2293 QPRINTK(qdev, IFUP, ERR,
2294 "Small buffer queue control block allocation failed.\n");
2295 goto err_mem;
2296 }
2297
2298 if (ql_alloc_sbq_buffers(qdev, rx_ring)) {
2299 QPRINTK(qdev, IFUP, ERR,
2300 "Small buffer allocation failed.\n");
2301 goto err_mem;
2302 }
2303 }
2304
2305 if (rx_ring->lbq_len) {
2306 /*
2307 * Allocate large buffer queue.
2308 */
2309 rx_ring->lbq_base =
2310 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2311 &rx_ring->lbq_base_dma);
2312
2313 if (rx_ring->lbq_base == NULL) {
2314 QPRINTK(qdev, IFUP, ERR,
2315 "Large buffer queue allocation failed.\n");
2316 goto err_mem;
2317 }
2318 /*
2319 * Allocate large buffer queue control blocks.
2320 */
2321 rx_ring->lbq =
2322 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2323 GFP_KERNEL);
2324 if (rx_ring->lbq == NULL) {
2325 QPRINTK(qdev, IFUP, ERR,
2326 "Large buffer queue control block allocation failed.\n");
2327 goto err_mem;
2328 }
2329
2330 /*
2331 * Allocate the buffers.
2332 */
2333 if (ql_alloc_lbq_buffers(qdev, rx_ring)) {
2334 QPRINTK(qdev, IFUP, ERR,
2335 "Large buffer allocation failed.\n");
2336 goto err_mem;
2337 }
2338 }
2339
2340 return 0;
2341
2342err_mem:
2343 ql_free_rx_resources(qdev, rx_ring);
2344 return -ENOMEM;
2345}
2346
2347static void ql_tx_ring_clean(struct ql_adapter *qdev)
2348{
2349 struct tx_ring *tx_ring;
2350 struct tx_ring_desc *tx_ring_desc;
2351 int i, j;
2352
2353 /*
2354 * Loop through all queues and free
2355 * any resources.
2356 */
2357 for (j = 0; j < qdev->tx_ring_count; j++) {
2358 tx_ring = &qdev->tx_ring[j];
2359 for (i = 0; i < tx_ring->wq_len; i++) {
2360 tx_ring_desc = &tx_ring->q[i];
2361 if (tx_ring_desc && tx_ring_desc->skb) {
2362 QPRINTK(qdev, IFDOWN, ERR,
2363 "Freeing lost SKB %p, from queue %d, index %d.\n",
2364 tx_ring_desc->skb, j,
2365 tx_ring_desc->index);
2366 ql_unmap_send(qdev, tx_ring_desc,
2367 tx_ring_desc->map_cnt);
2368 dev_kfree_skb(tx_ring_desc->skb);
2369 tx_ring_desc->skb = NULL;
2370 }
2371 }
2372 }
2373}
2374
2375static void ql_free_ring_cb(struct ql_adapter *qdev)
2376{
2377 kfree(qdev->ring_mem);
2378}
2379
2380static int ql_alloc_ring_cb(struct ql_adapter *qdev)
2381{
2382 /* Allocate space for tx/rx ring control blocks. */
2383 qdev->ring_mem_size =
2384 (qdev->tx_ring_count * sizeof(struct tx_ring)) +
2385 (qdev->rx_ring_count * sizeof(struct rx_ring));
2386 qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL);
2387 if (qdev->ring_mem == NULL) {
2388 return -ENOMEM;
2389 } else {
2390 qdev->rx_ring = qdev->ring_mem;
2391 qdev->tx_ring = qdev->ring_mem +
2392 (qdev->rx_ring_count * sizeof(struct rx_ring));
2393 }
2394 return 0;
2395}
2396
2397static void ql_free_mem_resources(struct ql_adapter *qdev)
2398{
2399 int i;
2400
2401 for (i = 0; i < qdev->tx_ring_count; i++)
2402 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2403 for (i = 0; i < qdev->rx_ring_count; i++)
2404 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2405 ql_free_shadow_space(qdev);
2406}
2407
2408static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2409{
2410 int i;
2411
2412 /* Allocate space for our shadow registers and such. */
2413 if (ql_alloc_shadow_space(qdev))
2414 return -ENOMEM;
2415
2416 for (i = 0; i < qdev->rx_ring_count; i++) {
2417 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2418 QPRINTK(qdev, IFUP, ERR,
2419 "RX resource allocation failed.\n");
2420 goto err_mem;
2421 }
2422 }
2423 /* Allocate tx queue resources */
2424 for (i = 0; i < qdev->tx_ring_count; i++) {
2425 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2426 QPRINTK(qdev, IFUP, ERR,
2427 "TX resource allocation failed.\n");
2428 goto err_mem;
2429 }
2430 }
2431 return 0;
2432
2433err_mem:
2434 ql_free_mem_resources(qdev);
2435 return -ENOMEM;
2436}
2437
2438/* Set up the rx ring control block and pass it to the chip.
2439 * The control block is defined as
2440 * "Completion Queue Initialization Control Block", or cqicb.
2441 */
2442static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2443{
2444 struct cqicb *cqicb = &rx_ring->cqicb;
2445 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2446 (rx_ring->cq_id * sizeof(u64) * 4);
2447 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2448 (rx_ring->cq_id * sizeof(u64) * 4);
2449 void __iomem *doorbell_area =
2450 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2451 int err = 0;
2452 u16 bq_len;
2453
2454 /* Set up the shadow registers for this ring. */
2455 rx_ring->prod_idx_sh_reg = shadow_reg;
2456 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2457 shadow_reg += sizeof(u64);
2458 shadow_reg_dma += sizeof(u64);
2459 rx_ring->lbq_base_indirect = shadow_reg;
2460 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
2461 shadow_reg += sizeof(u64);
2462 shadow_reg_dma += sizeof(u64);
2463 rx_ring->sbq_base_indirect = shadow_reg;
2464 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2465
2466 /* PCI doorbell mem area + 0x00 for consumer index register */
2467 rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area;
2468 rx_ring->cnsmr_idx = 0;
2469 rx_ring->curr_entry = rx_ring->cq_base;
2470
2471 /* PCI doorbell mem area + 0x04 for valid register */
2472 rx_ring->valid_db_reg = doorbell_area + 0x04;
2473
2474 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2475 rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18);
2476
2477 /* PCI doorbell mem area + 0x1c */
2478 rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c);
2479
2480 memset((void *)cqicb, 0, sizeof(struct cqicb));
2481 cqicb->msix_vect = rx_ring->irq;
2482
2483 cqicb->len = cpu_to_le16(rx_ring->cq_len | LEN_V | LEN_CPP_CONT);
2484
2485 cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma);
2486 cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32);
2487
2488 cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma);
2489 cqicb->prod_idx_addr_hi =
2490 cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32);
2491
2492 /*
2493 * Set up the control block load flags.
2494 */
2495 cqicb->flags = FLAGS_LC | /* Load queue base address */
2496 FLAGS_LV | /* Load MSI-X vector */
2497 FLAGS_LI; /* Load irq delay values */
2498 if (rx_ring->lbq_len) {
2499 cqicb->flags |= FLAGS_LL; /* Load lbq values */
2500 *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma;
2501 cqicb->lbq_addr_lo =
2502 cpu_to_le32(rx_ring->lbq_base_indirect_dma);
2503 cqicb->lbq_addr_hi =
2504 cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32);
2505 cqicb->lbq_buf_size = cpu_to_le32(rx_ring->lbq_buf_size);
2506 bq_len = (u16) rx_ring->lbq_len;
2507 cqicb->lbq_len = cpu_to_le16(bq_len);
2508 rx_ring->lbq_prod_idx = rx_ring->lbq_len - 16;
2509 rx_ring->lbq_curr_idx = 0;
2510 rx_ring->lbq_clean_idx = rx_ring->lbq_prod_idx;
2511 rx_ring->lbq_free_cnt = 16;
2512 }
2513 if (rx_ring->sbq_len) {
2514 cqicb->flags |= FLAGS_LS; /* Load sbq values */
2515 *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma;
2516 cqicb->sbq_addr_lo =
2517 cpu_to_le32(rx_ring->sbq_base_indirect_dma);
2518 cqicb->sbq_addr_hi =
2519 cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32);
2520 cqicb->sbq_buf_size =
2521 cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8);
2522 bq_len = (u16) rx_ring->sbq_len;
2523 cqicb->sbq_len = cpu_to_le16(bq_len);
2524 rx_ring->sbq_prod_idx = rx_ring->sbq_len - 16;
2525 rx_ring->sbq_curr_idx = 0;
2526 rx_ring->sbq_clean_idx = rx_ring->sbq_prod_idx;
2527 rx_ring->sbq_free_cnt = 16;
2528 }
2529 switch (rx_ring->type) {
2530 case TX_Q:
2531 /* If there's only one interrupt, then we use
2532 * worker threads to process the outbound
2533 * completion handling rx_rings. We do this so
2534 * they can be run on multiple CPUs. There is
2535 * room to play with this more where we would only
2536 * run in a worker if there are more than x number
2537 * of outbound completions on the queue and more
2538 * than one queue active. Some threshold that
2539 * would indicate a benefit in spite of the cost
2540 * of a context switch.
2541 * If there's more than one interrupt, then the
2542 * outbound completions are processed in the ISR.
2543 */
2544 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2545 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2546 else {
2547 /* With all debug warnings on we see a WARN_ON message
2548 * when we free the skb in the interrupt context.
2549 */
2550 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_tx_clean);
2551 }
2552 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
2553 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
2554 break;
2555 case DEFAULT_Q:
2556 INIT_DELAYED_WORK(&rx_ring->rx_work, ql_rx_clean);
2557 cqicb->irq_delay = 0;
2558 cqicb->pkt_delay = 0;
2559 break;
2560 case RX_Q:
2561 /* Inbound completion handling rx_rings run in
2562 * separate NAPI contexts.
2563 */
2564 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
2565 64);
2566 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
2567 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
2568 break;
2569 default:
2570 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
2571 rx_ring->type);
2572 }
2573 QPRINTK(qdev, IFUP, INFO, "Initializing rx work queue.\n");
2574 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
2575 CFG_LCQ, rx_ring->cq_id);
2576 if (err) {
2577 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
2578 return err;
2579 }
2580 QPRINTK(qdev, IFUP, INFO, "Successfully loaded CQICB.\n");
2581 /*
2582 * Advance the producer index for the buffer queues.
2583 */
2584 wmb();
2585 if (rx_ring->lbq_len)
2586 ql_write_db_reg(rx_ring->lbq_prod_idx,
2587 rx_ring->lbq_prod_idx_db_reg);
2588 if (rx_ring->sbq_len)
2589 ql_write_db_reg(rx_ring->sbq_prod_idx,
2590 rx_ring->sbq_prod_idx_db_reg);
2591 return err;
2592}
2593
2594static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2595{
2596 struct wqicb *wqicb = (struct wqicb *)tx_ring;
2597 void __iomem *doorbell_area =
2598 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
2599 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
2600 (tx_ring->wq_id * sizeof(u64));
2601 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
2602 (tx_ring->wq_id * sizeof(u64));
2603 int err = 0;
2604
2605 /*
2606 * Assign doorbell registers for this tx_ring.
2607 */
2608 /* TX PCI doorbell mem area for tx producer index */
2609 tx_ring->prod_idx_db_reg = (u32 *) doorbell_area;
2610 tx_ring->prod_idx = 0;
2611 /* TX PCI doorbell mem area + 0x04 */
2612 tx_ring->valid_db_reg = doorbell_area + 0x04;
2613
2614 /*
2615 * Assign shadow registers for this tx_ring.
2616 */
2617 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
2618 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
2619
2620 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
2621 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
2622 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
2623 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
2624 wqicb->rid = 0;
2625 wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma);
2626 wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32);
2627
2628 wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma);
2629 wqicb->cnsmr_idx_addr_hi =
2630 cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32);
2631
2632 ql_init_tx_ring(qdev, tx_ring);
2633
2634 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ,
2635 (u16) tx_ring->wq_id);
2636 if (err) {
2637 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
2638 return err;
2639 }
2640 QPRINTK(qdev, IFUP, INFO, "Successfully loaded WQICB.\n");
2641 return err;
2642}
2643
2644static void ql_disable_msix(struct ql_adapter *qdev)
2645{
2646 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2647 pci_disable_msix(qdev->pdev);
2648 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
2649 kfree(qdev->msi_x_entry);
2650 qdev->msi_x_entry = NULL;
2651 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
2652 pci_disable_msi(qdev->pdev);
2653 clear_bit(QL_MSI_ENABLED, &qdev->flags);
2654 }
2655}
2656
2657static void ql_enable_msix(struct ql_adapter *qdev)
2658{
2659 int i;
2660
2661 qdev->intr_count = 1;
2662 /* Get the MSIX vectors. */
2663 if (irq_type == MSIX_IRQ) {
2664 /* Try to alloc space for the msix struct,
2665 * if it fails then go to MSI/legacy.
2666 */
2667 qdev->msi_x_entry = kcalloc(qdev->rx_ring_count,
2668 sizeof(struct msix_entry),
2669 GFP_KERNEL);
2670 if (!qdev->msi_x_entry) {
2671 irq_type = MSI_IRQ;
2672 goto msi;
2673 }
2674
2675 for (i = 0; i < qdev->rx_ring_count; i++)
2676 qdev->msi_x_entry[i].entry = i;
2677
2678 if (!pci_enable_msix
2679 (qdev->pdev, qdev->msi_x_entry, qdev->rx_ring_count)) {
2680 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2681 qdev->intr_count = qdev->rx_ring_count;
2682 QPRINTK(qdev, IFUP, INFO,
2683 "MSI-X Enabled, got %d vectors.\n",
2684 qdev->intr_count);
2685 return;
2686 } else {
2687 kfree(qdev->msi_x_entry);
2688 qdev->msi_x_entry = NULL;
2689 QPRINTK(qdev, IFUP, WARNING,
2690 "MSI-X Enable failed, trying MSI.\n");
2691 irq_type = MSI_IRQ;
2692 }
2693 }
2694msi:
2695 if (irq_type == MSI_IRQ) {
2696 if (!pci_enable_msi(qdev->pdev)) {
2697 set_bit(QL_MSI_ENABLED, &qdev->flags);
2698 QPRINTK(qdev, IFUP, INFO,
2699 "Running with MSI interrupts.\n");
2700 return;
2701 }
2702 }
2703 irq_type = LEG_IRQ;
2704 spin_lock_init(&qdev->legacy_lock);
2705 qdev->legacy_check = ql_legacy_check;
2706 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2707}
2708
2709/*
2710 * Here we build the intr_context structures based on
2711 * our rx_ring count and intr vector count.
2712 * The intr_context structure is used to hook each vector
2713 * to possibly different handlers.
2714 */
2715static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
2716{
2717 int i = 0;
2718 struct intr_context *intr_context = &qdev->intr_context[0];
2719
2720 ql_enable_msix(qdev);
2721
2722 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
2723 /* Each rx_ring has it's
2724 * own intr_context since we have separate
2725 * vectors for each queue.
2726 * This only true when MSI-X is enabled.
2727 */
2728 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2729 qdev->rx_ring[i].irq = i;
2730 intr_context->intr = i;
2731 intr_context->qdev = qdev;
2732 /*
2733 * We set up each vectors enable/disable/read bits so
2734 * there's no bit/mask calculations in the critical path.
2735 */
2736 intr_context->intr_en_mask =
2737 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2738 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
2739 | i;
2740 intr_context->intr_dis_mask =
2741 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2742 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
2743 INTR_EN_IHD | i;
2744 intr_context->intr_read_mask =
2745 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2746 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
2747 i;
2748
2749 if (i == 0) {
2750 /*
2751 * Default queue handles bcast/mcast plus
2752 * async events. Needs buffers.
2753 */
2754 intr_context->handler = qlge_isr;
2755 sprintf(intr_context->name, "%s-default-queue",
2756 qdev->ndev->name);
2757 } else if (i < qdev->rss_ring_first_cq_id) {
2758 /*
2759 * Outbound queue is for outbound completions only.
2760 */
2761 intr_context->handler = qlge_msix_tx_isr;
2762 sprintf(intr_context->name, "%s-txq-%d",
2763 qdev->ndev->name, i);
2764 } else {
2765 /*
2766 * Inbound queues handle unicast frames only.
2767 */
2768 intr_context->handler = qlge_msix_rx_isr;
2769 sprintf(intr_context->name, "%s-rxq-%d",
2770 qdev->ndev->name, i);
2771 }
2772 }
2773 } else {
2774 /*
2775 * All rx_rings use the same intr_context since
2776 * there is only one vector.
2777 */
2778 intr_context->intr = 0;
2779 intr_context->qdev = qdev;
2780 /*
2781 * We set up each vectors enable/disable/read bits so
2782 * there's no bit/mask calculations in the critical path.
2783 */
2784 intr_context->intr_en_mask =
2785 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
2786 intr_context->intr_dis_mask =
2787 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
2788 INTR_EN_TYPE_DISABLE;
2789 intr_context->intr_read_mask =
2790 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
2791 /*
2792 * Single interrupt means one handler for all rings.
2793 */
2794 intr_context->handler = qlge_isr;
2795 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
2796 for (i = 0; i < qdev->rx_ring_count; i++)
2797 qdev->rx_ring[i].irq = 0;
2798 }
2799}
2800
2801static void ql_free_irq(struct ql_adapter *qdev)
2802{
2803 int i;
2804 struct intr_context *intr_context = &qdev->intr_context[0];
2805
2806 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2807 if (intr_context->hooked) {
2808 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2809 free_irq(qdev->msi_x_entry[i].vector,
2810 &qdev->rx_ring[i]);
2811 QPRINTK(qdev, IFDOWN, ERR,
2812 "freeing msix interrupt %d.\n", i);
2813 } else {
2814 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
2815 QPRINTK(qdev, IFDOWN, ERR,
2816 "freeing msi interrupt %d.\n", i);
2817 }
2818 }
2819 }
2820 ql_disable_msix(qdev);
2821}
2822
2823static int ql_request_irq(struct ql_adapter *qdev)
2824{
2825 int i;
2826 int status = 0;
2827 struct pci_dev *pdev = qdev->pdev;
2828 struct intr_context *intr_context = &qdev->intr_context[0];
2829
2830 ql_resolve_queues_to_irqs(qdev);
2831
2832 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
2833 atomic_set(&intr_context->irq_cnt, 0);
2834 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
2835 status = request_irq(qdev->msi_x_entry[i].vector,
2836 intr_context->handler,
2837 0,
2838 intr_context->name,
2839 &qdev->rx_ring[i]);
2840 if (status) {
2841 QPRINTK(qdev, IFUP, ERR,
2842 "Failed request for MSIX interrupt %d.\n",
2843 i);
2844 goto err_irq;
2845 } else {
2846 QPRINTK(qdev, IFUP, INFO,
2847 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2848 i,
2849 qdev->rx_ring[i].type ==
2850 DEFAULT_Q ? "DEFAULT_Q" : "",
2851 qdev->rx_ring[i].type ==
2852 TX_Q ? "TX_Q" : "",
2853 qdev->rx_ring[i].type ==
2854 RX_Q ? "RX_Q" : "", intr_context->name);
2855 }
2856 } else {
2857 QPRINTK(qdev, IFUP, DEBUG,
2858 "trying msi or legacy interrupts.\n");
2859 QPRINTK(qdev, IFUP, DEBUG,
2860 "%s: irq = %d.\n", __func__, pdev->irq);
2861 QPRINTK(qdev, IFUP, DEBUG,
2862 "%s: context->name = %s.\n", __func__,
2863 intr_context->name);
2864 QPRINTK(qdev, IFUP, DEBUG,
2865 "%s: dev_id = 0x%p.\n", __func__,
2866 &qdev->rx_ring[0]);
2867 status =
2868 request_irq(pdev->irq, qlge_isr,
2869 test_bit(QL_MSI_ENABLED,
2870 &qdev->
2871 flags) ? 0 : IRQF_SHARED,
2872 intr_context->name, &qdev->rx_ring[0]);
2873 if (status)
2874 goto err_irq;
2875
2876 QPRINTK(qdev, IFUP, ERR,
2877 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
2878 i,
2879 qdev->rx_ring[0].type ==
2880 DEFAULT_Q ? "DEFAULT_Q" : "",
2881 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
2882 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
2883 intr_context->name);
2884 }
2885 intr_context->hooked = 1;
2886 }
2887 return status;
2888err_irq:
2889 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
2890 ql_free_irq(qdev);
2891 return status;
2892}
2893
2894static int ql_start_rss(struct ql_adapter *qdev)
2895{
2896 struct ricb *ricb = &qdev->ricb;
2897 int status = 0;
2898 int i;
2899 u8 *hash_id = (u8 *) ricb->hash_cq_id;
2900
2901 memset((void *)ricb, 0, sizeof(ricb));
2902
2903 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
2904 ricb->flags =
2905 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
2906 RSS_RT6);
2907 ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
2908
2909 /*
2910 * Fill out the Indirection Table.
2911 */
2912 for (i = 0; i < 32; i++)
2913 hash_id[i] = i & 1;
2914
2915 /*
2916 * Random values for the IPv6 and IPv4 Hash Keys.
2917 */
2918 get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
2919 get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
2920
2921 QPRINTK(qdev, IFUP, INFO, "Initializing RSS.\n");
2922
2923 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0);
2924 if (status) {
2925 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
2926 return status;
2927 }
2928 QPRINTK(qdev, IFUP, INFO, "Successfully loaded RICB.\n");
2929 return status;
2930}
2931
2932/* Initialize the frame-to-queue routing. */
2933static int ql_route_initialize(struct ql_adapter *qdev)
2934{
2935 int status = 0;
2936 int i;
2937
2938 /* Clear all the entries in the routing table. */
2939 for (i = 0; i < 16; i++) {
2940 status = ql_set_routing_reg(qdev, i, 0, 0);
2941 if (status) {
2942 QPRINTK(qdev, IFUP, ERR,
2943 "Failed to init routing register for CAM packets.\n");
2944 return status;
2945 }
2946 }
2947
2948 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
2949 if (status) {
2950 QPRINTK(qdev, IFUP, ERR,
2951 "Failed to init routing register for error packets.\n");
2952 return status;
2953 }
2954 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
2955 if (status) {
2956 QPRINTK(qdev, IFUP, ERR,
2957 "Failed to init routing register for broadcast packets.\n");
2958 return status;
2959 }
2960 /* If we have more than one inbound queue, then turn on RSS in the
2961 * routing block.
2962 */
2963 if (qdev->rss_ring_count > 1) {
2964 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
2965 RT_IDX_RSS_MATCH, 1);
2966 if (status) {
2967 QPRINTK(qdev, IFUP, ERR,
2968 "Failed to init routing register for MATCH RSS packets.\n");
2969 return status;
2970 }
2971 }
2972
2973 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
2974 RT_IDX_CAM_HIT, 1);
2975 if (status) {
2976 QPRINTK(qdev, IFUP, ERR,
2977 "Failed to init routing register for CAM packets.\n");
2978 return status;
2979 }
2980 return status;
2981}
2982
2983static int ql_adapter_initialize(struct ql_adapter *qdev)
2984{
2985 u32 value, mask;
2986 int i;
2987 int status = 0;
2988
2989 /*
2990 * Set up the System register to halt on errors.
2991 */
2992 value = SYS_EFE | SYS_FAE;
2993 mask = value << 16;
2994 ql_write32(qdev, SYS, mask | value);
2995
2996 /* Set the default queue. */
2997 value = NIC_RCV_CFG_DFQ;
2998 mask = NIC_RCV_CFG_DFQ_MASK;
2999 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3000
3001 /* Set the MPI interrupt to enabled. */
3002 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3003
3004 /* Enable the function, set pagesize, enable error checking. */
3005 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3006 FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
3007
3008 /* Set/clear header splitting. */
3009 mask = FSC_VM_PAGESIZE_MASK |
3010 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3011 ql_write32(qdev, FSC, mask | value);
3012
3013 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3014 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
3015
3016 /* Start up the rx queues. */
3017 for (i = 0; i < qdev->rx_ring_count; i++) {
3018 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3019 if (status) {
3020 QPRINTK(qdev, IFUP, ERR,
3021 "Failed to start rx ring[%d].\n", i);
3022 return status;
3023 }
3024 }
3025
3026 /* If there is more than one inbound completion queue
3027 * then download a RICB to configure RSS.
3028 */
3029 if (qdev->rss_ring_count > 1) {
3030 status = ql_start_rss(qdev);
3031 if (status) {
3032 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3033 return status;
3034 }
3035 }
3036
3037 /* Start up the tx queues. */
3038 for (i = 0; i < qdev->tx_ring_count; i++) {
3039 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3040 if (status) {
3041 QPRINTK(qdev, IFUP, ERR,
3042 "Failed to start tx ring[%d].\n", i);
3043 return status;
3044 }
3045 }
3046
3047 status = ql_port_initialize(qdev);
3048 if (status) {
3049 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3050 return status;
3051 }
3052
3053 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr,
3054 MAC_ADDR_TYPE_CAM_MAC, qdev->func);
3055 if (status) {
3056 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3057 return status;
3058 }
3059
3060 status = ql_route_initialize(qdev);
3061 if (status) {
3062 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3063 return status;
3064 }
3065
3066 /* Start NAPI for the RSS queues. */
3067 for (i = qdev->rss_ring_first_cq_id; i < qdev->rx_ring_count; i++) {
3068 QPRINTK(qdev, IFUP, INFO, "Enabling NAPI for rx_ring[%d].\n",
3069 i);
3070 napi_enable(&qdev->rx_ring[i].napi);
3071 }
3072
3073 return status;
3074}
3075
3076/* Issue soft reset to chip. */
3077static int ql_adapter_reset(struct ql_adapter *qdev)
3078{
3079 u32 value;
3080 int max_wait_time;
3081 int status = 0;
3082 int resetCnt = 0;
3083
3084#define MAX_RESET_CNT 1
3085issueReset:
3086 resetCnt++;
3087 QPRINTK(qdev, IFDOWN, DEBUG, "Issue soft reset to chip.\n");
3088 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3089 /* Wait for reset to complete. */
3090 max_wait_time = 3;
3091 QPRINTK(qdev, IFDOWN, DEBUG, "Wait %d seconds for reset to complete.\n",
3092 max_wait_time);
3093 do {
3094 value = ql_read32(qdev, RST_FO);
3095 if ((value & RST_FO_FR) == 0)
3096 break;
3097
3098 ssleep(1);
3099 } while ((--max_wait_time));
3100 if (value & RST_FO_FR) {
3101 QPRINTK(qdev, IFDOWN, ERR,
3102 "Stuck in SoftReset: FSC_SR:0x%08x\n", value);
3103 if (resetCnt < MAX_RESET_CNT)
3104 goto issueReset;
3105 }
3106 if (max_wait_time == 0) {
3107 status = -ETIMEDOUT;
3108 QPRINTK(qdev, IFDOWN, ERR,
3109 "ETIMEOUT!!! errored out of resetting the chip!\n");
3110 }
3111
3112 return status;
3113}
3114
3115static void ql_display_dev_info(struct net_device *ndev)
3116{
3117 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3118
3119 QPRINTK(qdev, PROBE, INFO,
3120 "Function #%d, NIC Roll %d, NIC Rev = %d, "
3121 "XG Roll = %d, XG Rev = %d.\n",
3122 qdev->func,
3123 qdev->chip_rev_id & 0x0000000f,
3124 qdev->chip_rev_id >> 4 & 0x0000000f,
3125 qdev->chip_rev_id >> 8 & 0x0000000f,
3126 qdev->chip_rev_id >> 12 & 0x0000000f);
3127 QPRINTK(qdev, PROBE, INFO,
3128 "MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3129 ndev->dev_addr[0], ndev->dev_addr[1],
3130 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3131 ndev->dev_addr[5]);
3132}
3133
3134static int ql_adapter_down(struct ql_adapter *qdev)
3135{
3136 struct net_device *ndev = qdev->ndev;
3137 int i, status = 0;
3138 struct rx_ring *rx_ring;
3139
3140 netif_stop_queue(ndev);
3141 netif_carrier_off(ndev);
3142
3143 cancel_delayed_work_sync(&qdev->asic_reset_work);
3144 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3145 cancel_delayed_work_sync(&qdev->mpi_work);
3146
3147 /* The default queue at index 0 is always processed in
3148 * a workqueue.
3149 */
3150 cancel_delayed_work_sync(&qdev->rx_ring[0].rx_work);
3151
3152 /* The rest of the rx_rings are processed in
3153 * a workqueue only if it's a single interrupt
3154 * environment (MSI/Legacy).
3155 */
3156 for (i = 1; i > qdev->rx_ring_count; i++) {
3157 rx_ring = &qdev->rx_ring[i];
3158 /* Only the RSS rings use NAPI on multi irq
3159 * environment. Outbound completion processing
3160 * is done in interrupt context.
3161 */
3162 if (i >= qdev->rss_ring_first_cq_id) {
3163 napi_disable(&rx_ring->napi);
3164 } else {
3165 cancel_delayed_work_sync(&rx_ring->rx_work);
3166 }
3167 }
3168
3169 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3170
3171 ql_disable_interrupts(qdev);
3172
3173 ql_tx_ring_clean(qdev);
3174
3175 spin_lock(&qdev->hw_lock);
3176 status = ql_adapter_reset(qdev);
3177 if (status)
3178 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3179 qdev->func);
3180 spin_unlock(&qdev->hw_lock);
3181 return status;
3182}
3183
3184static int ql_adapter_up(struct ql_adapter *qdev)
3185{
3186 int err = 0;
3187
3188 spin_lock(&qdev->hw_lock);
3189 err = ql_adapter_initialize(qdev);
3190 if (err) {
3191 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
3192 spin_unlock(&qdev->hw_lock);
3193 goto err_init;
3194 }
3195 spin_unlock(&qdev->hw_lock);
3196 set_bit(QL_ADAPTER_UP, &qdev->flags);
3197 ql_enable_interrupts(qdev);
3198 ql_enable_all_completion_interrupts(qdev);
3199 if ((ql_read32(qdev, STS) & qdev->port_init)) {
3200 netif_carrier_on(qdev->ndev);
3201 netif_start_queue(qdev->ndev);
3202 }
3203
3204 return 0;
3205err_init:
3206 ql_adapter_reset(qdev);
3207 return err;
3208}
3209
3210static int ql_cycle_adapter(struct ql_adapter *qdev)
3211{
3212 int status;
3213
3214 status = ql_adapter_down(qdev);
3215 if (status)
3216 goto error;
3217
3218 status = ql_adapter_up(qdev);
3219 if (status)
3220 goto error;
3221
3222 return status;
3223error:
3224 QPRINTK(qdev, IFUP, ALERT,
3225 "Driver up/down cycle failed, closing device\n");
3226 rtnl_lock();
3227 dev_close(qdev->ndev);
3228 rtnl_unlock();
3229 return status;
3230}
3231
3232static void ql_release_adapter_resources(struct ql_adapter *qdev)
3233{
3234 ql_free_mem_resources(qdev);
3235 ql_free_irq(qdev);
3236}
3237
3238static int ql_get_adapter_resources(struct ql_adapter *qdev)
3239{
3240 int status = 0;
3241
3242 if (ql_alloc_mem_resources(qdev)) {
3243 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3244 return -ENOMEM;
3245 }
3246 status = ql_request_irq(qdev);
3247 if (status)
3248 goto err_irq;
3249 return status;
3250err_irq:
3251 ql_free_mem_resources(qdev);
3252 return status;
3253}
3254
3255static int qlge_close(struct net_device *ndev)
3256{
3257 struct ql_adapter *qdev = netdev_priv(ndev);
3258
3259 /*
3260 * Wait for device to recover from a reset.
3261 * (Rarely happens, but possible.)
3262 */
3263 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3264 msleep(1);
3265 ql_adapter_down(qdev);
3266 ql_release_adapter_resources(qdev);
3267 ql_free_ring_cb(qdev);
3268 return 0;
3269}
3270
3271static int ql_configure_rings(struct ql_adapter *qdev)
3272{
3273 int i;
3274 struct rx_ring *rx_ring;
3275 struct tx_ring *tx_ring;
3276 int cpu_cnt = num_online_cpus();
3277
3278 /*
3279 * For each processor present we allocate one
3280 * rx_ring for outbound completions, and one
3281 * rx_ring for inbound completions. Plus there is
3282 * always the one default queue. For the CPU
3283 * counts we end up with the following rx_rings:
3284 * rx_ring count =
3285 * one default queue +
3286 * (CPU count * outbound completion rx_ring) +
3287 * (CPU count * inbound (RSS) completion rx_ring)
3288 * To keep it simple we limit the total number of
3289 * queues to < 32, so we truncate CPU to 8.
3290 * This limitation can be removed when requested.
3291 */
3292
3293 if (cpu_cnt > 8)
3294 cpu_cnt = 8;
3295
3296 /*
3297 * rx_ring[0] is always the default queue.
3298 */
3299 /* Allocate outbound completion ring for each CPU. */
3300 qdev->tx_ring_count = cpu_cnt;
3301 /* Allocate inbound completion (RSS) ring for each CPU. */
3302 qdev->rss_ring_count = cpu_cnt;
3303 /* cq_id for the first inbound ring handler. */
3304 qdev->rss_ring_first_cq_id = cpu_cnt + 1;
3305 /*
3306 * qdev->rx_ring_count:
3307 * Total number of rx_rings. This includes the one
3308 * default queue, a number of outbound completion
3309 * handler rx_rings, and the number of inbound
3310 * completion handler rx_rings.
3311 */
3312 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1;
3313
3314 if (ql_alloc_ring_cb(qdev))
3315 return -ENOMEM;
3316
3317 for (i = 0; i < qdev->tx_ring_count; i++) {
3318 tx_ring = &qdev->tx_ring[i];
3319 memset((void *)tx_ring, 0, sizeof(tx_ring));
3320 tx_ring->qdev = qdev;
3321 tx_ring->wq_id = i;
3322 tx_ring->wq_len = qdev->tx_ring_size;
3323 tx_ring->wq_size =
3324 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3325
3326 /*
3327 * The completion queue ID for the tx rings start
3328 * immediately after the default Q ID, which is zero.
3329 */
3330 tx_ring->cq_id = i + 1;
3331 }
3332
3333 for (i = 0; i < qdev->rx_ring_count; i++) {
3334 rx_ring = &qdev->rx_ring[i];
3335 memset((void *)rx_ring, 0, sizeof(rx_ring));
3336 rx_ring->qdev = qdev;
3337 rx_ring->cq_id = i;
3338 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
3339 if (i == 0) { /* Default queue at index 0. */
3340 /*
3341 * Default queue handles bcast/mcast plus
3342 * async events. Needs buffers.
3343 */
3344 rx_ring->cq_len = qdev->rx_ring_size;
3345 rx_ring->cq_size =
3346 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3347 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3348 rx_ring->lbq_size =
3349 rx_ring->lbq_len * sizeof(struct bq_element);
3350 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3351 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3352 rx_ring->sbq_size =
3353 rx_ring->sbq_len * sizeof(struct bq_element);
3354 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3355 rx_ring->type = DEFAULT_Q;
3356 } else if (i < qdev->rss_ring_first_cq_id) {
3357 /*
3358 * Outbound queue handles outbound completions only.
3359 */
3360 /* outbound cq is same size as tx_ring it services. */
3361 rx_ring->cq_len = qdev->tx_ring_size;
3362 rx_ring->cq_size =
3363 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3364 rx_ring->lbq_len = 0;
3365 rx_ring->lbq_size = 0;
3366 rx_ring->lbq_buf_size = 0;
3367 rx_ring->sbq_len = 0;
3368 rx_ring->sbq_size = 0;
3369 rx_ring->sbq_buf_size = 0;
3370 rx_ring->type = TX_Q;
3371 } else { /* Inbound completions (RSS) queues */
3372 /*
3373 * Inbound queues handle unicast frames only.
3374 */
3375 rx_ring->cq_len = qdev->rx_ring_size;
3376 rx_ring->cq_size =
3377 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3378 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3379 rx_ring->lbq_size =
3380 rx_ring->lbq_len * sizeof(struct bq_element);
3381 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE;
3382 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3383 rx_ring->sbq_size =
3384 rx_ring->sbq_len * sizeof(struct bq_element);
3385 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2;
3386 rx_ring->type = RX_Q;
3387 }
3388 }
3389 return 0;
3390}
3391
3392static int qlge_open(struct net_device *ndev)
3393{
3394 int err = 0;
3395 struct ql_adapter *qdev = netdev_priv(ndev);
3396
3397 err = ql_configure_rings(qdev);
3398 if (err)
3399 return err;
3400
3401 err = ql_get_adapter_resources(qdev);
3402 if (err)
3403 goto error_up;
3404
3405 err = ql_adapter_up(qdev);
3406 if (err)
3407 goto error_up;
3408
3409 return err;
3410
3411error_up:
3412 ql_release_adapter_resources(qdev);
3413 ql_free_ring_cb(qdev);
3414 return err;
3415}
3416
3417static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3418{
3419 struct ql_adapter *qdev = netdev_priv(ndev);
3420
3421 if (ndev->mtu == 1500 && new_mtu == 9000) {
3422 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3423 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3424 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3425 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
3426 (ndev->mtu == 9000 && new_mtu == 9000)) {
3427 return 0;
3428 } else
3429 return -EINVAL;
3430 ndev->mtu = new_mtu;
3431 return 0;
3432}
3433
3434static struct net_device_stats *qlge_get_stats(struct net_device
3435 *ndev)
3436{
3437 struct ql_adapter *qdev = netdev_priv(ndev);
3438 return &qdev->stats;
3439}
3440
3441static void qlge_set_multicast_list(struct net_device *ndev)
3442{
3443 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3444 struct dev_mc_list *mc_ptr;
3445 int i;
3446
3447 spin_lock(&qdev->hw_lock);
3448 /*
3449 * Set or clear promiscuous mode if a
3450 * transition is taking place.
3451 */
3452 if (ndev->flags & IFF_PROMISC) {
3453 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3454 if (ql_set_routing_reg
3455 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
3456 QPRINTK(qdev, HW, ERR,
3457 "Failed to set promiscous mode.\n");
3458 } else {
3459 set_bit(QL_PROMISCUOUS, &qdev->flags);
3460 }
3461 }
3462 } else {
3463 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
3464 if (ql_set_routing_reg
3465 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
3466 QPRINTK(qdev, HW, ERR,
3467 "Failed to clear promiscous mode.\n");
3468 } else {
3469 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3470 }
3471 }
3472 }
3473
3474 /*
3475 * Set or clear all multicast mode if a
3476 * transition is taking place.
3477 */
3478 if ((ndev->flags & IFF_ALLMULTI) ||
3479 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
3480 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
3481 if (ql_set_routing_reg
3482 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
3483 QPRINTK(qdev, HW, ERR,
3484 "Failed to set all-multi mode.\n");
3485 } else {
3486 set_bit(QL_ALLMULTI, &qdev->flags);
3487 }
3488 }
3489 } else {
3490 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
3491 if (ql_set_routing_reg
3492 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
3493 QPRINTK(qdev, HW, ERR,
3494 "Failed to clear all-multi mode.\n");
3495 } else {
3496 clear_bit(QL_ALLMULTI, &qdev->flags);
3497 }
3498 }
3499 }
3500
3501 if (ndev->mc_count) {
3502 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
3503 i++, mc_ptr = mc_ptr->next)
3504 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
3505 MAC_ADDR_TYPE_MULTI_MAC, i)) {
3506 QPRINTK(qdev, HW, ERR,
3507 "Failed to loadmulticast address.\n");
3508 goto exit;
3509 }
3510 if (ql_set_routing_reg
3511 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
3512 QPRINTK(qdev, HW, ERR,
3513 "Failed to set multicast match mode.\n");
3514 } else {
3515 set_bit(QL_ALLMULTI, &qdev->flags);
3516 }
3517 }
3518exit:
3519 spin_unlock(&qdev->hw_lock);
3520}
3521
3522static int qlge_set_mac_address(struct net_device *ndev, void *p)
3523{
3524 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3525 struct sockaddr *addr = p;
3526
3527 if (netif_running(ndev))
3528 return -EBUSY;
3529
3530 if (!is_valid_ether_addr(addr->sa_data))
3531 return -EADDRNOTAVAIL;
3532 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3533
3534 spin_lock(&qdev->hw_lock);
3535 if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3536 MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3537 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3538 return -1;
3539 }
3540 spin_unlock(&qdev->hw_lock);
3541
3542 return 0;
3543}
3544
3545static void qlge_tx_timeout(struct net_device *ndev)
3546{
3547 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3548 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
3549}
3550
3551static void ql_asic_reset_work(struct work_struct *work)
3552{
3553 struct ql_adapter *qdev =
3554 container_of(work, struct ql_adapter, asic_reset_work.work);
3555 ql_cycle_adapter(qdev);
3556}
3557
3558static void ql_get_board_info(struct ql_adapter *qdev)
3559{
3560 qdev->func =
3561 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
3562 if (qdev->func) {
3563 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
3564 qdev->port_link_up = STS_PL1;
3565 qdev->port_init = STS_PI1;
3566 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
3567 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
3568 } else {
3569 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
3570 qdev->port_link_up = STS_PL0;
3571 qdev->port_init = STS_PI0;
3572 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
3573 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
3574 }
3575 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
3576}
3577
3578static void ql_release_all(struct pci_dev *pdev)
3579{
3580 struct net_device *ndev = pci_get_drvdata(pdev);
3581 struct ql_adapter *qdev = netdev_priv(ndev);
3582
3583 if (qdev->workqueue) {
3584 destroy_workqueue(qdev->workqueue);
3585 qdev->workqueue = NULL;
3586 }
3587 if (qdev->q_workqueue) {
3588 destroy_workqueue(qdev->q_workqueue);
3589 qdev->q_workqueue = NULL;
3590 }
3591 if (qdev->reg_base)
3592 iounmap((void *)qdev->reg_base);
3593 if (qdev->doorbell_area)
3594 iounmap(qdev->doorbell_area);
3595 pci_release_regions(pdev);
3596 pci_set_drvdata(pdev, NULL);
3597}
3598
3599static int __devinit ql_init_device(struct pci_dev *pdev,
3600 struct net_device *ndev, int cards_found)
3601{
3602 struct ql_adapter *qdev = netdev_priv(ndev);
3603 int pos, err = 0;
3604 u16 val16;
3605
3606 memset((void *)qdev, 0, sizeof(qdev));
3607 err = pci_enable_device(pdev);
3608 if (err) {
3609 dev_err(&pdev->dev, "PCI device enable failed.\n");
3610 return err;
3611 }
3612
3613 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3614 if (pos <= 0) {
3615 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
3616 "aborting.\n");
3617 goto err_out;
3618 } else {
3619 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3620 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3621 val16 |= (PCI_EXP_DEVCTL_CERE |
3622 PCI_EXP_DEVCTL_NFERE |
3623 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3624 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3625 }
3626
3627 err = pci_request_regions(pdev, DRV_NAME);
3628 if (err) {
3629 dev_err(&pdev->dev, "PCI region request failed.\n");
3630 goto err_out;
3631 }
3632
3633 pci_set_master(pdev);
3634 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3635 set_bit(QL_DMA64, &qdev->flags);
3636 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3637 } else {
3638 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3639 if (!err)
3640 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3641 }
3642
3643 if (err) {
3644 dev_err(&pdev->dev, "No usable DMA configuration.\n");
3645 goto err_out;
3646 }
3647
3648 pci_set_drvdata(pdev, ndev);
3649 qdev->reg_base =
3650 ioremap_nocache(pci_resource_start(pdev, 1),
3651 pci_resource_len(pdev, 1));
3652 if (!qdev->reg_base) {
3653 dev_err(&pdev->dev, "Register mapping failed.\n");
3654 err = -ENOMEM;
3655 goto err_out;
3656 }
3657
3658 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
3659 qdev->doorbell_area =
3660 ioremap_nocache(pci_resource_start(pdev, 3),
3661 pci_resource_len(pdev, 3));
3662 if (!qdev->doorbell_area) {
3663 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
3664 err = -ENOMEM;
3665 goto err_out;
3666 }
3667
3668 ql_get_board_info(qdev);
3669 qdev->ndev = ndev;
3670 qdev->pdev = pdev;
3671 qdev->msg_enable = netif_msg_init(debug, default_msg);
3672 spin_lock_init(&qdev->hw_lock);
3673 spin_lock_init(&qdev->stats_lock);
3674
3675 /* make sure the EEPROM is good */
3676 err = ql_get_flash_params(qdev);
3677 if (err) {
3678 dev_err(&pdev->dev, "Invalid FLASH.\n");
3679 goto err_out;
3680 }
3681
3682 if (!is_valid_ether_addr(qdev->flash.mac_addr))
3683 goto err_out;
3684
3685 memcpy(ndev->dev_addr, qdev->flash.mac_addr, ndev->addr_len);
3686 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3687
3688 /* Set up the default ring sizes. */
3689 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
3690 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
3691
3692 /* Set up the coalescing parameters. */
3693 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
3694 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
3695 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3696 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
3697
3698 /*
3699 * Set up the operating parameters.
3700 */
3701 qdev->rx_csum = 1;
3702
3703 qdev->q_workqueue = create_workqueue(ndev->name);
3704 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3705 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
3706 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
3707 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
3708
3709 if (!cards_found) {
3710 dev_info(&pdev->dev, "%s\n", DRV_STRING);
3711 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
3712 DRV_NAME, DRV_VERSION);
3713 }
3714 return 0;
3715err_out:
3716 ql_release_all(pdev);
3717 pci_disable_device(pdev);
3718 return err;
3719}
3720
3721static int __devinit qlge_probe(struct pci_dev *pdev,
3722 const struct pci_device_id *pci_entry)
3723{
3724 struct net_device *ndev = NULL;
3725 struct ql_adapter *qdev = NULL;
3726 static int cards_found = 0;
3727 int err = 0;
3728
3729 ndev = alloc_etherdev(sizeof(struct ql_adapter));
3730 if (!ndev)
3731 return -ENOMEM;
3732
3733 err = ql_init_device(pdev, ndev, cards_found);
3734 if (err < 0) {
3735 free_netdev(ndev);
3736 return err;
3737 }
3738
3739 qdev = netdev_priv(ndev);
3740 SET_NETDEV_DEV(ndev, &pdev->dev);
3741 ndev->features = (0
3742 | NETIF_F_IP_CSUM
3743 | NETIF_F_SG
3744 | NETIF_F_TSO
3745 | NETIF_F_TSO6
3746 | NETIF_F_TSO_ECN
3747 | NETIF_F_HW_VLAN_TX
3748 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
3749
3750 if (test_bit(QL_DMA64, &qdev->flags))
3751 ndev->features |= NETIF_F_HIGHDMA;
3752
3753 /*
3754 * Set up net_device structure.
3755 */
3756 ndev->tx_queue_len = qdev->tx_ring_size;
3757 ndev->irq = pdev->irq;
3758 ndev->open = qlge_open;
3759 ndev->stop = qlge_close;
3760 ndev->hard_start_xmit = qlge_send;
3761 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
3762 ndev->change_mtu = qlge_change_mtu;
3763 ndev->get_stats = qlge_get_stats;
3764 ndev->set_multicast_list = qlge_set_multicast_list;
3765 ndev->set_mac_address = qlge_set_mac_address;
3766 ndev->tx_timeout = qlge_tx_timeout;
3767 ndev->watchdog_timeo = 10 * HZ;
3768 ndev->vlan_rx_register = ql_vlan_rx_register;
3769 ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid;
3770 ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid;
3771 err = register_netdev(ndev);
3772 if (err) {
3773 dev_err(&pdev->dev, "net device registration failed.\n");
3774 ql_release_all(pdev);
3775 pci_disable_device(pdev);
3776 return err;
3777 }
3778 netif_carrier_off(ndev);
3779 netif_stop_queue(ndev);
3780 ql_display_dev_info(ndev);
3781 cards_found++;
3782 return 0;
3783}
3784
3785static void __devexit qlge_remove(struct pci_dev *pdev)
3786{
3787 struct net_device *ndev = pci_get_drvdata(pdev);
3788 unregister_netdev(ndev);
3789 ql_release_all(pdev);
3790 pci_disable_device(pdev);
3791 free_netdev(ndev);
3792}
3793
3794/*
3795 * This callback is called by the PCI subsystem whenever
3796 * a PCI bus error is detected.
3797 */
3798static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
3799 enum pci_channel_state state)
3800{
3801 struct net_device *ndev = pci_get_drvdata(pdev);
3802 struct ql_adapter *qdev = netdev_priv(ndev);
3803
3804 if (netif_running(ndev))
3805 ql_adapter_down(qdev);
3806
3807 pci_disable_device(pdev);
3808
3809 /* Request a slot reset. */
3810 return PCI_ERS_RESULT_NEED_RESET;
3811}
3812
3813/*
3814 * This callback is called after the PCI buss has been reset.
3815 * Basically, this tries to restart the card from scratch.
3816 * This is a shortened version of the device probe/discovery code,
3817 * it resembles the first-half of the () routine.
3818 */
3819static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
3820{
3821 struct net_device *ndev = pci_get_drvdata(pdev);
3822 struct ql_adapter *qdev = netdev_priv(ndev);
3823
3824 if (pci_enable_device(pdev)) {
3825 QPRINTK(qdev, IFUP, ERR,
3826 "Cannot re-enable PCI device after reset.\n");
3827 return PCI_ERS_RESULT_DISCONNECT;
3828 }
3829
3830 pci_set_master(pdev);
3831
3832 netif_carrier_off(ndev);
3833 netif_stop_queue(ndev);
3834 ql_adapter_reset(qdev);
3835
3836 /* Make sure the EEPROM is good */
3837 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3838
3839 if (!is_valid_ether_addr(ndev->perm_addr)) {
3840 QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n");
3841 return PCI_ERS_RESULT_DISCONNECT;
3842 }
3843
3844 return PCI_ERS_RESULT_RECOVERED;
3845}
3846
3847static void qlge_io_resume(struct pci_dev *pdev)
3848{
3849 struct net_device *ndev = pci_get_drvdata(pdev);
3850 struct ql_adapter *qdev = netdev_priv(ndev);
3851
3852 pci_set_master(pdev);
3853
3854 if (netif_running(ndev)) {
3855 if (ql_adapter_up(qdev)) {
3856 QPRINTK(qdev, IFUP, ERR,
3857 "Device initialization failed after reset.\n");
3858 return;
3859 }
3860 }
3861
3862 netif_device_attach(ndev);
3863}
3864
3865static struct pci_error_handlers qlge_err_handler = {
3866 .error_detected = qlge_io_error_detected,
3867 .slot_reset = qlge_io_slot_reset,
3868 .resume = qlge_io_resume,
3869};
3870
3871static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
3872{
3873 struct net_device *ndev = pci_get_drvdata(pdev);
3874 struct ql_adapter *qdev = netdev_priv(ndev);
3875 int err;
3876
3877 netif_device_detach(ndev);
3878
3879 if (netif_running(ndev)) {
3880 err = ql_adapter_down(qdev);
3881 if (!err)
3882 return err;
3883 }
3884
3885 err = pci_save_state(pdev);
3886 if (err)
3887 return err;
3888
3889 pci_disable_device(pdev);
3890
3891 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3892
3893 return 0;
3894}
3895
3896#ifdef CONFIG_PM
3897static int qlge_resume(struct pci_dev *pdev)
3898{
3899 struct net_device *ndev = pci_get_drvdata(pdev);
3900 struct ql_adapter *qdev = netdev_priv(ndev);
3901 int err;
3902
3903 pci_set_power_state(pdev, PCI_D0);
3904 pci_restore_state(pdev);
3905 err = pci_enable_device(pdev);
3906 if (err) {
3907 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
3908 return err;
3909 }
3910 pci_set_master(pdev);
3911
3912 pci_enable_wake(pdev, PCI_D3hot, 0);
3913 pci_enable_wake(pdev, PCI_D3cold, 0);
3914
3915 if (netif_running(ndev)) {
3916 err = ql_adapter_up(qdev);
3917 if (err)
3918 return err;
3919 }
3920
3921 netif_device_attach(ndev);
3922
3923 return 0;
3924}
3925#endif /* CONFIG_PM */
3926
3927static void qlge_shutdown(struct pci_dev *pdev)
3928{
3929 qlge_suspend(pdev, PMSG_SUSPEND);
3930}
3931
3932static struct pci_driver qlge_driver = {
3933 .name = DRV_NAME,
3934 .id_table = qlge_pci_tbl,
3935 .probe = qlge_probe,
3936 .remove = __devexit_p(qlge_remove),
3937#ifdef CONFIG_PM
3938 .suspend = qlge_suspend,
3939 .resume = qlge_resume,
3940#endif
3941 .shutdown = qlge_shutdown,
3942 .err_handler = &qlge_err_handler
3943};
3944
3945static int __init qlge_init_module(void)
3946{
3947 return pci_register_driver(&qlge_driver);
3948}
3949
3950static void __exit qlge_exit(void)
3951{
3952 pci_unregister_driver(&qlge_driver);
3953}
3954
3955module_init(qlge_init_module);
3956module_exit(qlge_exit);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
new file mode 100644
index 000000000000..24fe344bcf1f
--- /dev/null
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -0,0 +1,150 @@
1#include "qlge.h"
2
3static int ql_read_mbox_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
4{
5 int status;
6 /* wait for reg to come ready */
7 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
8 if (status)
9 goto exit;
10 /* set up for reg read */
11 ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
12 /* wait for reg to come ready */
13 status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
14 if (status)
15 goto exit;
16 /* get the data */
17 *data = ql_read32(qdev, PROC_DATA);
18exit:
19 return status;
20}
21
22int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
23{
24 int i, status;
25
26 status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
27 if (status)
28 return -EBUSY;
29 for (i = 0; i < mbcp->out_count; i++) {
30 status =
31 ql_read_mbox_reg(qdev, qdev->mailbox_out + i,
32 &mbcp->mbox_out[i]);
33 if (status) {
34 QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
35 break;
36 }
37 }
38 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
39 return status;
40}
41
42static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
43{
44 mbcp->out_count = 2;
45
46 if (ql_get_mb_sts(qdev, mbcp))
47 goto exit;
48
49 qdev->link_status = mbcp->mbox_out[1];
50 QPRINTK(qdev, DRV, ERR, "Link Up.\n");
51 QPRINTK(qdev, DRV, INFO, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
52 if (!netif_carrier_ok(qdev->ndev)) {
53 QPRINTK(qdev, LINK, INFO, "Link is Up.\n");
54 netif_carrier_on(qdev->ndev);
55 netif_wake_queue(qdev->ndev);
56 }
57exit:
58 /* Clear the MPI firmware status. */
59 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
60}
61
62static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
63{
64 mbcp->out_count = 3;
65
66 if (ql_get_mb_sts(qdev, mbcp)) {
67 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
68 goto exit;
69 }
70
71 if (netif_carrier_ok(qdev->ndev)) {
72 QPRINTK(qdev, LINK, INFO, "Link is Down.\n");
73 netif_carrier_off(qdev->ndev);
74 netif_stop_queue(qdev->ndev);
75 }
76 QPRINTK(qdev, DRV, ERR, "Link Down.\n");
77 QPRINTK(qdev, DRV, ERR, "Link Status = 0x%.08x.\n", mbcp->mbox_out[1]);
78exit:
79 /* Clear the MPI firmware status. */
80 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
81}
82
83static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
84{
85 mbcp->out_count = 2;
86
87 if (ql_get_mb_sts(qdev, mbcp)) {
88 QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
89 goto exit;
90 }
91 QPRINTK(qdev, DRV, ERR, "Firmware initialized!\n");
92 QPRINTK(qdev, DRV, ERR, "Firmware status = 0x%.08x.\n",
93 mbcp->mbox_out[0]);
94 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
95 mbcp->mbox_out[1]);
96exit:
97 /* Clear the MPI firmware status. */
98 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
99}
100
101void ql_mpi_work(struct work_struct *work)
102{
103 struct ql_adapter *qdev =
104 container_of(work, struct ql_adapter, mpi_work.work);
105 struct mbox_params mbc;
106 struct mbox_params *mbcp = &mbc;
107 mbcp->out_count = 1;
108
109 while (ql_read32(qdev, STS) & STS_PI) {
110 if (ql_get_mb_sts(qdev, mbcp)) {
111 QPRINTK(qdev, DRV, ERR,
112 "Could not read MPI, resetting ASIC!\n");
113 ql_queue_asic_error(qdev);
114 }
115
116 switch (mbcp->mbox_out[0]) {
117 case AEN_LINK_UP:
118 ql_link_up(qdev, mbcp);
119 break;
120 case AEN_LINK_DOWN:
121 ql_link_down(qdev, mbcp);
122 break;
123 case AEN_FW_INIT_DONE:
124 ql_init_fw_done(qdev, mbcp);
125 break;
126 case MB_CMD_STS_GOOD:
127 break;
128 case AEN_FW_INIT_FAIL:
129 case AEN_SYS_ERR:
130 case MB_CMD_STS_ERR:
131 ql_queue_fw_error(qdev);
132 default:
133 /* Clear the MPI firmware status. */
134 ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
135 break;
136 }
137 }
138 ql_enable_completion_interrupt(qdev, 0);
139}
140
141void ql_mpi_reset_work(struct work_struct *work)
142{
143 struct ql_adapter *qdev =
144 container_of(work, struct ql_adapter, mpi_reset_work.work);
145 QPRINTK(qdev, DRV, ERR,
146 "Enter, qdev = %p..\n", qdev);
147 ql_write32(qdev, CSR, CSR_CMD_SET_RST);
148 msleep(50);
149 ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
150}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 5d86281d9363..34fe7ef8e5ed 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -265,7 +265,7 @@ static void r6040_free_txbufs(struct net_device *dev)
265 le32_to_cpu(lp->tx_insert_ptr->buf), 265 le32_to_cpu(lp->tx_insert_ptr->buf),
266 MAX_BUF_SIZE, PCI_DMA_TODEVICE); 266 MAX_BUF_SIZE, PCI_DMA_TODEVICE);
267 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr); 267 dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
268 lp->rx_insert_ptr->skb_ptr = NULL; 268 lp->tx_insert_ptr->skb_ptr = NULL;
269 } 269 }
270 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp; 270 lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp;
271 } 271 }
@@ -370,7 +370,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
370 /* Reset internal state machine */ 370 /* Reset internal state machine */
371 iowrite16(2, ioaddr + MAC_SM); 371 iowrite16(2, ioaddr + MAC_SM);
372 iowrite16(0, ioaddr + MAC_SM); 372 iowrite16(0, ioaddr + MAC_SM);
373 udelay(5000); 373 mdelay(5);
374 374
375 /* MAC Bus Control Register */ 375 /* MAC Bus Control Register */
376 iowrite16(MBCR_DEFAULT, ioaddr + MBCR); 376 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -806,7 +806,7 @@ static void r6040_mac_address(struct net_device *dev)
806 iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */ 806 iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
807 iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */ 807 iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
808 iowrite16(0, ioaddr + MAC_SM); 808 iowrite16(0, ioaddr + MAC_SM);
809 udelay(5000); 809 mdelay(5);
810 810
811 /* Restore MAC Address */ 811 /* Restore MAC Address */
812 adrp = (u16 *) dev->dev_addr; 812 adrp = (u16 *) dev->dev_addr;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0f6f9747d255..fb899c675f47 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -36,7 +36,7 @@
36#define assert(expr) \ 36#define assert(expr) \
37 if (!(expr)) { \ 37 if (!(expr)) { \
38 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 38 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
39 #expr,__FILE__,__FUNCTION__,__LINE__); \ 39 #expr,__FILE__,__func__,__LINE__); \
40 } 40 }
41#define dprintk(fmt, args...) \ 41#define dprintk(fmt, args...) \
42 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) 42 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
@@ -61,6 +61,7 @@ static const int multicast_filter_limit = 32;
61/* MAC address length */ 61/* MAC address length */
62#define MAC_ADDR_LEN 6 62#define MAC_ADDR_LEN 6
63 63
64#define MAX_READ_REQUEST_SHIFT 12
64#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */ 65#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
65#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 66#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
66#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 67#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
@@ -95,6 +96,10 @@ enum mac_version {
95 RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB 96 RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
96 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd 97 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
97 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe 98 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
99 RTL_GIGA_MAC_VER_07 = 0x07, // 8102e
100 RTL_GIGA_MAC_VER_08 = 0x08, // 8102e
101 RTL_GIGA_MAC_VER_09 = 0x09, // 8102e
102 RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e
98 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb 103 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
99 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 104 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
100 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 105 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
@@ -121,6 +126,10 @@ static const struct {
121 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB 126 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
122 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd 127 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
123 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe 128 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
129 _R("RTL8102e", RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
130 _R("RTL8102e", RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
131 _R("RTL8102e", RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
132 _R("RTL8101e", RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
124 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E 133 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
125 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E 134 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
126 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139 135 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
@@ -196,9 +205,6 @@ enum rtl_registers {
196 Config5 = 0x56, 205 Config5 = 0x56,
197 MultiIntr = 0x5c, 206 MultiIntr = 0x5c,
198 PHYAR = 0x60, 207 PHYAR = 0x60,
199 TBICSR = 0x64,
200 TBI_ANAR = 0x68,
201 TBI_LPAR = 0x6a,
202 PHYstatus = 0x6c, 208 PHYstatus = 0x6c,
203 RxMaxSize = 0xda, 209 RxMaxSize = 0xda,
204 CPlusCmd = 0xe0, 210 CPlusCmd = 0xe0,
@@ -212,6 +218,32 @@ enum rtl_registers {
212 FuncForceEvent = 0xfc, 218 FuncForceEvent = 0xfc,
213}; 219};
214 220
221enum rtl8110_registers {
222 TBICSR = 0x64,
223 TBI_ANAR = 0x68,
224 TBI_LPAR = 0x6a,
225};
226
227enum rtl8168_8101_registers {
228 CSIDR = 0x64,
229 CSIAR = 0x68,
230#define CSIAR_FLAG 0x80000000
231#define CSIAR_WRITE_CMD 0x80000000
232#define CSIAR_BYTE_ENABLE 0x0f
233#define CSIAR_BYTE_ENABLE_SHIFT 12
234#define CSIAR_ADDR_MASK 0x0fff
235
236 EPHYAR = 0x80,
237#define EPHYAR_FLAG 0x80000000
238#define EPHYAR_WRITE_CMD 0x80000000
239#define EPHYAR_REG_MASK 0x1f
240#define EPHYAR_REG_SHIFT 16
241#define EPHYAR_DATA_MASK 0xffff
242 DBG_REG = 0xd1,
243#define FIX_NAK_1 (1 << 4)
244#define FIX_NAK_2 (1 << 3)
245};
246
215enum rtl_register_content { 247enum rtl_register_content {
216 /* InterruptStatusBits */ 248 /* InterruptStatusBits */
217 SYSErr = 0x8000, 249 SYSErr = 0x8000,
@@ -265,7 +297,13 @@ enum rtl_register_content {
265 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 297 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
266 298
267 /* Config1 register p.24 */ 299 /* Config1 register p.24 */
300 LEDS1 = (1 << 7),
301 LEDS0 = (1 << 6),
268 MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */ 302 MSIEnable = (1 << 5), /* Enable Message Signaled Interrupt */
303 Speed_down = (1 << 4),
304 MEMMAP = (1 << 3),
305 IOMAP = (1 << 2),
306 VPD = (1 << 1),
269 PMEnable = (1 << 0), /* Power Management Enable */ 307 PMEnable = (1 << 0), /* Power Management Enable */
270 308
271 /* Config2 register p. 25 */ 309 /* Config2 register p. 25 */
@@ -275,6 +313,7 @@ enum rtl_register_content {
275 /* Config3 register p.25 */ 313 /* Config3 register p.25 */
276 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ 314 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
277 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ 315 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
316 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
278 317
279 /* Config5 register p.27 */ 318 /* Config5 register p.27 */
280 BWF = (1 << 6), /* Accept Broadcast wakeup frame */ 319 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
@@ -292,7 +331,16 @@ enum rtl_register_content {
292 TBINwComplete = 0x01000000, 331 TBINwComplete = 0x01000000,
293 332
294 /* CPlusCmd p.31 */ 333 /* CPlusCmd p.31 */
295 PktCntrDisable = (1 << 7), // 8168 334 EnableBist = (1 << 15), // 8168 8101
335 Mac_dbgo_oe = (1 << 14), // 8168 8101
336 Normal_mode = (1 << 13), // unused
337 Force_half_dup = (1 << 12), // 8168 8101
338 Force_rxflow_en = (1 << 11), // 8168 8101
339 Force_txflow_en = (1 << 10), // 8168 8101
340 Cxpl_dbg_sel = (1 << 9), // 8168 8101
341 ASF = (1 << 8), // 8168 8101
342 PktCntrDisable = (1 << 7), // 8168 8101
343 Mac_dbgo_sel = 0x001c, // 8168
296 RxVlan = (1 << 6), 344 RxVlan = (1 << 6),
297 RxChkSum = (1 << 5), 345 RxChkSum = (1 << 5),
298 PCIDAC = (1 << 4), 346 PCIDAC = (1 << 4),
@@ -370,8 +418,9 @@ struct ring_info {
370}; 418};
371 419
372enum features { 420enum features {
373 RTL_FEATURE_WOL = (1 << 0), 421 RTL_FEATURE_WOL = (1 << 0),
374 RTL_FEATURE_MSI = (1 << 1), 422 RTL_FEATURE_MSI = (1 << 1),
423 RTL_FEATURE_GMII = (1 << 2),
375}; 424};
376 425
377struct rtl8169_private { 426struct rtl8169_private {
@@ -406,13 +455,16 @@ struct rtl8169_private {
406 struct vlan_group *vlgrp; 455 struct vlan_group *vlgrp;
407#endif 456#endif
408 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); 457 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
409 void (*get_settings)(struct net_device *, struct ethtool_cmd *); 458 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
410 void (*phy_reset_enable)(void __iomem *); 459 void (*phy_reset_enable)(void __iomem *);
411 void (*hw_start)(struct net_device *); 460 void (*hw_start)(struct net_device *);
412 unsigned int (*phy_reset_pending)(void __iomem *); 461 unsigned int (*phy_reset_pending)(void __iomem *);
413 unsigned int (*link_ok)(void __iomem *); 462 unsigned int (*link_ok)(void __iomem *);
463 int pcie_cap;
414 struct delayed_work task; 464 struct delayed_work task;
415 unsigned features; 465 unsigned features;
466
467 struct mii_if_info mii;
416}; 468};
417 469
418MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 470MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -482,6 +534,94 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
482 return value; 534 return value;
483} 535}
484 536
537static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value)
538{
539 mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value);
540}
541
542static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
543 int val)
544{
545 struct rtl8169_private *tp = netdev_priv(dev);
546 void __iomem *ioaddr = tp->mmio_addr;
547
548 mdio_write(ioaddr, location, val);
549}
550
551static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
552{
553 struct rtl8169_private *tp = netdev_priv(dev);
554 void __iomem *ioaddr = tp->mmio_addr;
555
556 return mdio_read(ioaddr, location);
557}
558
559static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
560{
561 unsigned int i;
562
563 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
564 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
565
566 for (i = 0; i < 100; i++) {
567 if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
568 break;
569 udelay(10);
570 }
571}
572
573static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
574{
575 u16 value = 0xffff;
576 unsigned int i;
577
578 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
579
580 for (i = 0; i < 100; i++) {
581 if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
582 value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
583 break;
584 }
585 udelay(10);
586 }
587
588 return value;
589}
590
591static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
592{
593 unsigned int i;
594
595 RTL_W32(CSIDR, value);
596 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
597 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
598
599 for (i = 0; i < 100; i++) {
600 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
601 break;
602 udelay(10);
603 }
604}
605
606static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
607{
608 u32 value = ~0x00;
609 unsigned int i;
610
611 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
612 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
613
614 for (i = 0; i < 100; i++) {
615 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
616 value = RTL_R32(CSIDR);
617 break;
618 }
619 udelay(10);
620 }
621
622 return value;
623}
624
485static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) 625static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
486{ 626{
487 RTL_W16(IntrMask, 0x0000); 627 RTL_W16(IntrMask, 0x0000);
@@ -705,8 +845,12 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
705 } 845 }
706 } 846 }
707 847
708 /* The 8100e/8101e do Fast Ethernet only. */ 848 /* The 8100e/8101e/8102e do Fast Ethernet only. */
709 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 849 if ((tp->mac_version == RTL_GIGA_MAC_VER_07) ||
850 (tp->mac_version == RTL_GIGA_MAC_VER_08) ||
851 (tp->mac_version == RTL_GIGA_MAC_VER_09) ||
852 (tp->mac_version == RTL_GIGA_MAC_VER_10) ||
853 (tp->mac_version == RTL_GIGA_MAC_VER_13) ||
710 (tp->mac_version == RTL_GIGA_MAC_VER_14) || 854 (tp->mac_version == RTL_GIGA_MAC_VER_14) ||
711 (tp->mac_version == RTL_GIGA_MAC_VER_15) || 855 (tp->mac_version == RTL_GIGA_MAC_VER_15) ||
712 (tp->mac_version == RTL_GIGA_MAC_VER_16)) { 856 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
@@ -850,7 +994,7 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
850 994
851#endif 995#endif
852 996
853static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) 997static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
854{ 998{
855 struct rtl8169_private *tp = netdev_priv(dev); 999 struct rtl8169_private *tp = netdev_priv(dev);
856 void __iomem *ioaddr = tp->mmio_addr; 1000 void __iomem *ioaddr = tp->mmio_addr;
@@ -867,65 +1011,29 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
867 1011
868 cmd->speed = SPEED_1000; 1012 cmd->speed = SPEED_1000;
869 cmd->duplex = DUPLEX_FULL; /* Always set */ 1013 cmd->duplex = DUPLEX_FULL; /* Always set */
1014
1015 return 0;
870} 1016}
871 1017
872static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) 1018static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
873{ 1019{
874 struct rtl8169_private *tp = netdev_priv(dev); 1020 struct rtl8169_private *tp = netdev_priv(dev);
875 void __iomem *ioaddr = tp->mmio_addr; 1021
876 u8 status; 1022 return mii_ethtool_gset(&tp->mii, cmd);
877
878 cmd->supported = SUPPORTED_10baseT_Half |
879 SUPPORTED_10baseT_Full |
880 SUPPORTED_100baseT_Half |
881 SUPPORTED_100baseT_Full |
882 SUPPORTED_1000baseT_Full |
883 SUPPORTED_Autoneg |
884 SUPPORTED_TP;
885
886 cmd->autoneg = 1;
887 cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
888
889 if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
890 cmd->advertising |= ADVERTISED_10baseT_Half;
891 if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
892 cmd->advertising |= ADVERTISED_10baseT_Full;
893 if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
894 cmd->advertising |= ADVERTISED_100baseT_Half;
895 if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
896 cmd->advertising |= ADVERTISED_100baseT_Full;
897 if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
898 cmd->advertising |= ADVERTISED_1000baseT_Full;
899
900 status = RTL_R8(PHYstatus);
901
902 if (status & _1000bpsF)
903 cmd->speed = SPEED_1000;
904 else if (status & _100bps)
905 cmd->speed = SPEED_100;
906 else if (status & _10bps)
907 cmd->speed = SPEED_10;
908
909 if (status & TxFlowCtrl)
910 cmd->advertising |= ADVERTISED_Asym_Pause;
911 if (status & RxFlowCtrl)
912 cmd->advertising |= ADVERTISED_Pause;
913
914 cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
915 DUPLEX_FULL : DUPLEX_HALF;
916} 1023}
917 1024
918static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1025static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
919{ 1026{
920 struct rtl8169_private *tp = netdev_priv(dev); 1027 struct rtl8169_private *tp = netdev_priv(dev);
921 unsigned long flags; 1028 unsigned long flags;
1029 int rc;
922 1030
923 spin_lock_irqsave(&tp->lock, flags); 1031 spin_lock_irqsave(&tp->lock, flags);
924 1032
925 tp->get_settings(dev, cmd); 1033 rc = tp->get_settings(dev, cmd);
926 1034
927 spin_unlock_irqrestore(&tp->lock, flags); 1035 spin_unlock_irqrestore(&tp->lock, flags);
928 return 0; 1036 return rc;
929} 1037}
930 1038
931static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1039static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -1116,8 +1224,17 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1116 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1224 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1117 1225
1118 /* 8101 family. */ 1226 /* 8101 family. */
1227 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
1228 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
1229 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
1230 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
1231 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
1232 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
1119 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, 1233 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
1234 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
1120 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, 1235 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
1236 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
1237 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
1121 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, 1238 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
1122 /* FIXME: where did these entries come from ? -- FR */ 1239 /* FIXME: where did these entries come from ? -- FR */
1123 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, 1240 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
@@ -1279,6 +1396,22 @@ static void rtl8168cx_hw_phy_config(void __iomem *ioaddr)
1279 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1396 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1280} 1397}
1281 1398
1399static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
1400{
1401 struct phy_reg phy_reg_init[] = {
1402 { 0x1f, 0x0003 },
1403 { 0x08, 0x441d },
1404 { 0x01, 0x9100 },
1405 { 0x1f, 0x0000 }
1406 };
1407
1408 mdio_write(ioaddr, 0x1f, 0x0000);
1409 mdio_patch(ioaddr, 0x11, 1 << 12);
1410 mdio_patch(ioaddr, 0x19, 1 << 13);
1411
1412 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1413}
1414
1282static void rtl_hw_phy_config(struct net_device *dev) 1415static void rtl_hw_phy_config(struct net_device *dev)
1283{ 1416{
1284 struct rtl8169_private *tp = netdev_priv(dev); 1417 struct rtl8169_private *tp = netdev_priv(dev);
@@ -1296,6 +1429,11 @@ static void rtl_hw_phy_config(struct net_device *dev)
1296 case RTL_GIGA_MAC_VER_04: 1429 case RTL_GIGA_MAC_VER_04:
1297 rtl8169sb_hw_phy_config(ioaddr); 1430 rtl8169sb_hw_phy_config(ioaddr);
1298 break; 1431 break;
1432 case RTL_GIGA_MAC_VER_07:
1433 case RTL_GIGA_MAC_VER_08:
1434 case RTL_GIGA_MAC_VER_09:
1435 rtl8102e_hw_phy_config(ioaddr);
1436 break;
1299 case RTL_GIGA_MAC_VER_18: 1437 case RTL_GIGA_MAC_VER_18:
1300 rtl8168cp_hw_phy_config(ioaddr); 1438 rtl8168cp_hw_phy_config(ioaddr);
1301 break; 1439 break;
@@ -1513,7 +1651,7 @@ static const struct rtl_cfg_info {
1513 unsigned int align; 1651 unsigned int align;
1514 u16 intr_event; 1652 u16 intr_event;
1515 u16 napi_event; 1653 u16 napi_event;
1516 unsigned msi; 1654 unsigned features;
1517} rtl_cfg_infos [] = { 1655} rtl_cfg_infos [] = {
1518 [RTL_CFG_0] = { 1656 [RTL_CFG_0] = {
1519 .hw_start = rtl_hw_start_8169, 1657 .hw_start = rtl_hw_start_8169,
@@ -1522,7 +1660,7 @@ static const struct rtl_cfg_info {
1522 .intr_event = SYSErr | LinkChg | RxOverflow | 1660 .intr_event = SYSErr | LinkChg | RxOverflow |
1523 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1661 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1524 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, 1662 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
1525 .msi = 0 1663 .features = RTL_FEATURE_GMII
1526 }, 1664 },
1527 [RTL_CFG_1] = { 1665 [RTL_CFG_1] = {
1528 .hw_start = rtl_hw_start_8168, 1666 .hw_start = rtl_hw_start_8168,
@@ -1531,7 +1669,7 @@ static const struct rtl_cfg_info {
1531 .intr_event = SYSErr | LinkChg | RxOverflow | 1669 .intr_event = SYSErr | LinkChg | RxOverflow |
1532 TxErr | TxOK | RxOK | RxErr, 1670 TxErr | TxOK | RxOK | RxErr,
1533 .napi_event = TxErr | TxOK | RxOK | RxOverflow, 1671 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
1534 .msi = RTL_FEATURE_MSI 1672 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI
1535 }, 1673 },
1536 [RTL_CFG_2] = { 1674 [RTL_CFG_2] = {
1537 .hw_start = rtl_hw_start_8101, 1675 .hw_start = rtl_hw_start_8101,
@@ -1540,7 +1678,7 @@ static const struct rtl_cfg_info {
1540 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout | 1678 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
1541 RxFIFOOver | TxErr | TxOK | RxOK | RxErr, 1679 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1542 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow, 1680 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
1543 .msi = RTL_FEATURE_MSI 1681 .features = RTL_FEATURE_MSI
1544 } 1682 }
1545}; 1683};
1546 1684
@@ -1552,7 +1690,7 @@ static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
1552 u8 cfg2; 1690 u8 cfg2;
1553 1691
1554 cfg2 = RTL_R8(Config2) & ~MSIEnable; 1692 cfg2 = RTL_R8(Config2) & ~MSIEnable;
1555 if (cfg->msi) { 1693 if (cfg->features & RTL_FEATURE_MSI) {
1556 if (pci_enable_msi(pdev)) { 1694 if (pci_enable_msi(pdev)) {
1557 dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); 1695 dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
1558 } else { 1696 } else {
@@ -1578,6 +1716,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1578 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; 1716 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
1579 const unsigned int region = cfg->region; 1717 const unsigned int region = cfg->region;
1580 struct rtl8169_private *tp; 1718 struct rtl8169_private *tp;
1719 struct mii_if_info *mii;
1581 struct net_device *dev; 1720 struct net_device *dev;
1582 void __iomem *ioaddr; 1721 void __iomem *ioaddr;
1583 unsigned int i; 1722 unsigned int i;
@@ -1602,6 +1741,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1602 tp->pci_dev = pdev; 1741 tp->pci_dev = pdev;
1603 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 1742 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
1604 1743
1744 mii = &tp->mii;
1745 mii->dev = dev;
1746 mii->mdio_read = rtl_mdio_read;
1747 mii->mdio_write = rtl_mdio_write;
1748 mii->phy_id_mask = 0x1f;
1749 mii->reg_num_mask = 0x1f;
1750 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
1751
1605 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 1752 /* enable device (incl. PCI PM wakeup and hotplug setup) */
1606 rc = pci_enable_device(pdev); 1753 rc = pci_enable_device(pdev);
1607 if (rc < 0) { 1754 if (rc < 0) {
@@ -1670,6 +1817,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1670 goto err_out_free_res_4; 1817 goto err_out_free_res_4;
1671 } 1818 }
1672 1819
1820 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1821 if (!tp->pcie_cap && netif_msg_probe(tp))
1822 dev_info(&pdev->dev, "no PCI Express capability\n");
1823
1673 /* Unneeded ? Don't mess with Mrs. Murphy. */ 1824 /* Unneeded ? Don't mess with Mrs. Murphy. */
1674 rtl8169_irq_mask_and_ack(ioaddr); 1825 rtl8169_irq_mask_and_ack(ioaddr);
1675 1826
@@ -2061,12 +2212,51 @@ static void rtl_hw_start_8169(struct net_device *dev)
2061 RTL_W16(IntrMask, tp->intr_event); 2212 RTL_W16(IntrMask, tp->intr_event);
2062} 2213}
2063 2214
2215static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
2216{
2217 struct net_device *dev = pci_get_drvdata(pdev);
2218 struct rtl8169_private *tp = netdev_priv(dev);
2219 int cap = tp->pcie_cap;
2220
2221 if (cap) {
2222 u16 ctl;
2223
2224 pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
2225 ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
2226 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
2227 }
2228}
2229
2230static void rtl_csi_access_enable(void __iomem *ioaddr)
2231{
2232 u32 csi;
2233
2234 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
2235 rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000);
2236}
2237
2238struct ephy_info {
2239 unsigned int offset;
2240 u16 mask;
2241 u16 bits;
2242};
2243
2244static void rtl_ephy_init(void __iomem *ioaddr, struct ephy_info *e, int len)
2245{
2246 u16 w;
2247
2248 while (len-- > 0) {
2249 w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
2250 rtl_ephy_write(ioaddr, e->offset, w);
2251 e++;
2252 }
2253}
2254
2064static void rtl_hw_start_8168(struct net_device *dev) 2255static void rtl_hw_start_8168(struct net_device *dev)
2065{ 2256{
2066 struct rtl8169_private *tp = netdev_priv(dev); 2257 struct rtl8169_private *tp = netdev_priv(dev);
2067 void __iomem *ioaddr = tp->mmio_addr; 2258 void __iomem *ioaddr = tp->mmio_addr;
2068 struct pci_dev *pdev = tp->pci_dev; 2259 struct pci_dev *pdev = tp->pci_dev;
2069 u8 ctl;
2070 2260
2071 RTL_W8(Cfg9346, Cfg9346_Unlock); 2261 RTL_W8(Cfg9346, Cfg9346_Unlock);
2072 2262
@@ -2080,10 +2270,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
2080 2270
2081 RTL_W16(CPlusCmd, tp->cp_cmd); 2271 RTL_W16(CPlusCmd, tp->cp_cmd);
2082 2272
2083 /* Tx performance tweak. */ 2273 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
2084 pci_read_config_byte(pdev, 0x69, &ctl);
2085 ctl = (ctl & ~0x70) | 0x50;
2086 pci_write_config_byte(pdev, 0x69, ctl);
2087 2274
2088 RTL_W16(IntrMitigate, 0x5151); 2275 RTL_W16(IntrMitigate, 0x5151);
2089 2276
@@ -2099,8 +2286,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
2099 2286
2100 RTL_R8(IntrMask); 2287 RTL_R8(IntrMask);
2101 2288
2102 RTL_W32(RxMissed, 0);
2103
2104 rtl_set_rx_mode(dev); 2289 rtl_set_rx_mode(dev);
2105 2290
2106 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 2291 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -2110,6 +2295,70 @@ static void rtl_hw_start_8168(struct net_device *dev)
2110 RTL_W16(IntrMask, tp->intr_event); 2295 RTL_W16(IntrMask, tp->intr_event);
2111} 2296}
2112 2297
2298#define R810X_CPCMD_QUIRK_MASK (\
2299 EnableBist | \
2300 Mac_dbgo_oe | \
2301 Force_half_dup | \
2302 Force_half_dup | \
2303 Force_txflow_en | \
2304 Cxpl_dbg_sel | \
2305 ASF | \
2306 PktCntrDisable | \
2307 PCIDAC | \
2308 PCIMulRW)
2309
2310static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
2311{
2312 static struct ephy_info e_info_8102e_1[] = {
2313 { 0x01, 0, 0x6e65 },
2314 { 0x02, 0, 0x091f },
2315 { 0x03, 0, 0xc2f9 },
2316 { 0x06, 0, 0xafb5 },
2317 { 0x07, 0, 0x0e00 },
2318 { 0x19, 0, 0xec80 },
2319 { 0x01, 0, 0x2e65 },
2320 { 0x01, 0, 0x6e65 }
2321 };
2322 u8 cfg1;
2323
2324 rtl_csi_access_enable(ioaddr);
2325
2326 RTL_W8(DBG_REG, FIX_NAK_1);
2327
2328 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
2329
2330 RTL_W8(Config1,
2331 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
2332 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
2333
2334 cfg1 = RTL_R8(Config1);
2335 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
2336 RTL_W8(Config1, cfg1 & ~LEDS0);
2337
2338 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
2339
2340 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
2341}
2342
2343static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
2344{
2345 rtl_csi_access_enable(ioaddr);
2346
2347 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
2348
2349 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
2350 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
2351
2352 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
2353}
2354
2355static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
2356{
2357 rtl_hw_start_8102e_2(ioaddr, pdev);
2358
2359 rtl_ephy_write(ioaddr, 0x03, 0xc2f9);
2360}
2361
2113static void rtl_hw_start_8101(struct net_device *dev) 2362static void rtl_hw_start_8101(struct net_device *dev)
2114{ 2363{
2115 struct rtl8169_private *tp = netdev_priv(dev); 2364 struct rtl8169_private *tp = netdev_priv(dev);
@@ -2118,8 +2367,26 @@ static void rtl_hw_start_8101(struct net_device *dev)
2118 2367
2119 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || 2368 if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
2120 (tp->mac_version == RTL_GIGA_MAC_VER_16)) { 2369 (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
2121 pci_write_config_word(pdev, 0x68, 0x00); 2370 int cap = tp->pcie_cap;
2122 pci_write_config_word(pdev, 0x69, 0x08); 2371
2372 if (cap) {
2373 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL,
2374 PCI_EXP_DEVCTL_NOSNOOP_EN);
2375 }
2376 }
2377
2378 switch (tp->mac_version) {
2379 case RTL_GIGA_MAC_VER_07:
2380 rtl_hw_start_8102e_1(ioaddr, pdev);
2381 break;
2382
2383 case RTL_GIGA_MAC_VER_08:
2384 rtl_hw_start_8102e_3(ioaddr, pdev);
2385 break;
2386
2387 case RTL_GIGA_MAC_VER_09:
2388 rtl_hw_start_8102e_2(ioaddr, pdev);
2389 break;
2123 } 2390 }
2124 2391
2125 RTL_W8(Cfg9346, Cfg9346_Unlock); 2392 RTL_W8(Cfg9346, Cfg9346_Unlock);
@@ -2143,8 +2410,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
2143 2410
2144 RTL_R8(IntrMask); 2411 RTL_R8(IntrMask);
2145 2412
2146 RTL_W32(RxMissed, 0);
2147
2148 rtl_set_rx_mode(dev); 2413 rtl_set_rx_mode(dev);
2149 2414
2150 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 2415 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -2922,6 +3187,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
2922 return work_done; 3187 return work_done;
2923} 3188}
2924 3189
3190static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
3191{
3192 struct rtl8169_private *tp = netdev_priv(dev);
3193
3194 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
3195 return;
3196
3197 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
3198 RTL_W32(RxMissed, 0);
3199}
3200
2925static void rtl8169_down(struct net_device *dev) 3201static void rtl8169_down(struct net_device *dev)
2926{ 3202{
2927 struct rtl8169_private *tp = netdev_priv(dev); 3203 struct rtl8169_private *tp = netdev_priv(dev);
@@ -2939,9 +3215,7 @@ core_down:
2939 3215
2940 rtl8169_asic_down(ioaddr); 3216 rtl8169_asic_down(ioaddr);
2941 3217
2942 /* Update the error counts. */ 3218 rtl8169_rx_missed(dev, ioaddr);
2943 dev->stats.rx_missed_errors += RTL_R32(RxMissed);
2944 RTL_W32(RxMissed, 0);
2945 3219
2946 spin_unlock_irq(&tp->lock); 3220 spin_unlock_irq(&tp->lock);
2947 3221
@@ -3063,8 +3337,7 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
3063 3337
3064 if (netif_running(dev)) { 3338 if (netif_running(dev)) {
3065 spin_lock_irqsave(&tp->lock, flags); 3339 spin_lock_irqsave(&tp->lock, flags);
3066 dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3340 rtl8169_rx_missed(dev, ioaddr);
3067 RTL_W32(RxMissed, 0);
3068 spin_unlock_irqrestore(&tp->lock, flags); 3341 spin_unlock_irqrestore(&tp->lock, flags);
3069 } 3342 }
3070 3343
@@ -3089,8 +3362,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
3089 3362
3090 rtl8169_asic_down(ioaddr); 3363 rtl8169_asic_down(ioaddr);
3091 3364
3092 dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3365 rtl8169_rx_missed(dev, ioaddr);
3093 RTL_W32(RxMissed, 0);
3094 3366
3095 spin_unlock_irq(&tp->lock); 3367 spin_unlock_irq(&tp->lock);
3096 3368
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index a2b073097e5c..6a1375f9cbb8 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -371,9 +371,6 @@ static void s2io_vlan_rx_register(struct net_device *dev,
371 flags[i]); 371 flags[i]);
372} 372}
373 373
374/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375static int vlan_strip_flag;
376
377/* Unregister the vlan */ 374/* Unregister the vlan */
378static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 375static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379{ 376{
@@ -2303,7 +2300,7 @@ static int start_nic(struct s2io_nic *nic)
2303 val64 = readq(&bar0->rx_pa_cfg); 2300 val64 = readq(&bar0->rx_pa_cfg);
2304 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 2301 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2305 writeq(val64, &bar0->rx_pa_cfg); 2302 writeq(val64, &bar0->rx_pa_cfg);
2306 vlan_strip_flag = 0; 2303 nic->vlan_strip_flag = 0;
2307 } 2304 }
2308 2305
2309 /* 2306 /*
@@ -3136,7 +3133,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
3136 if (skb == NULL) { 3133 if (skb == NULL) {
3137 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); 3134 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3138 DBG_PRINT(ERR_DBG, "%s: Null skb ", 3135 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3139 __FUNCTION__); 3136 __func__);
3140 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); 3137 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3141 return; 3138 return;
3142 } 3139 }
@@ -3496,7 +3493,7 @@ static void s2io_reset(struct s2io_nic * sp)
3496 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt; 3493 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3497 3494
3498 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n", 3495 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3499 __FUNCTION__, sp->dev->name); 3496 __func__, sp->dev->name);
3500 3497
3501 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3498 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3502 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); 3499 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
@@ -3518,7 +3515,7 @@ static void s2io_reset(struct s2io_nic * sp)
3518 } 3515 }
3519 3516
3520 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) { 3517 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3521 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__); 3518 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__);
3522 } 3519 }
3523 3520
3524 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); 3521 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
@@ -3768,7 +3765,7 @@ static void restore_xmsi_data(struct s2io_nic *nic)
3768 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); 3765 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3769 writeq(val64, &bar0->xmsi_access); 3766 writeq(val64, &bar0->xmsi_access);
3770 if (wait_for_msix_trans(nic, msix_index)) { 3767 if (wait_for_msix_trans(nic, msix_index)) {
3771 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3768 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3772 continue; 3769 continue;
3773 } 3770 }
3774 } 3771 }
@@ -3789,7 +3786,7 @@ static void store_xmsi_data(struct s2io_nic *nic)
3789 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); 3786 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3790 writeq(val64, &bar0->xmsi_access); 3787 writeq(val64, &bar0->xmsi_access);
3791 if (wait_for_msix_trans(nic, msix_index)) { 3788 if (wait_for_msix_trans(nic, msix_index)) {
3792 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3789 DBG_PRINT(ERR_DBG, "failed in %s\n", __func__);
3793 continue; 3790 continue;
3794 } 3791 }
3795 addr = readq(&bar0->xmsi_address); 3792 addr = readq(&bar0->xmsi_address);
@@ -3812,7 +3809,7 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3812 GFP_KERNEL); 3809 GFP_KERNEL);
3813 if (!nic->entries) { 3810 if (!nic->entries) {
3814 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ 3811 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3815 __FUNCTION__); 3812 __func__);
3816 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3813 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3817 return -ENOMEM; 3814 return -ENOMEM;
3818 } 3815 }
@@ -3826,7 +3823,7 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3826 GFP_KERNEL); 3823 GFP_KERNEL);
3827 if (!nic->s2io_entries) { 3824 if (!nic->s2io_entries) {
3828 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 3825 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3829 __FUNCTION__); 3826 __func__);
3830 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3827 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3831 kfree(nic->entries); 3828 kfree(nic->entries);
3832 nic->mac_control.stats_info->sw_stat.mem_freed 3829 nic->mac_control.stats_info->sw_stat.mem_freed
@@ -5010,7 +5007,7 @@ static void s2io_set_multicast(struct net_device *dev)
5010 val64 = readq(&bar0->rx_pa_cfg); 5007 val64 = readq(&bar0->rx_pa_cfg);
5011 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 5008 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
5012 writeq(val64, &bar0->rx_pa_cfg); 5009 writeq(val64, &bar0->rx_pa_cfg);
5013 vlan_strip_flag = 0; 5010 sp->vlan_strip_flag = 0;
5014 } 5011 }
5015 5012
5016 val64 = readq(&bar0->mac_cfg); 5013 val64 = readq(&bar0->mac_cfg);
@@ -5032,7 +5029,7 @@ static void s2io_set_multicast(struct net_device *dev)
5032 val64 = readq(&bar0->rx_pa_cfg); 5029 val64 = readq(&bar0->rx_pa_cfg);
5033 val64 |= RX_PA_CFG_STRIP_VLAN_TAG; 5030 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5034 writeq(val64, &bar0->rx_pa_cfg); 5031 writeq(val64, &bar0->rx_pa_cfg);
5035 vlan_strip_flag = 1; 5032 sp->vlan_strip_flag = 1;
5036 } 5033 }
5037 5034
5038 val64 = readq(&bar0->mac_cfg); 5035 val64 = readq(&bar0->mac_cfg);
@@ -6746,7 +6743,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6746 ret = s2io_card_up(sp); 6743 ret = s2io_card_up(sp);
6747 if (ret) { 6744 if (ret) {
6748 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", 6745 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6749 __FUNCTION__); 6746 __func__);
6750 return ret; 6747 return ret;
6751 } 6748 }
6752 s2io_wake_all_tx_queue(sp); 6749 s2io_wake_all_tx_queue(sp);
@@ -7530,7 +7527,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7530 default: 7527 default:
7531 DBG_PRINT(ERR_DBG, 7528 DBG_PRINT(ERR_DBG,
7532 "%s: Samadhana!!\n", 7529 "%s: Samadhana!!\n",
7533 __FUNCTION__); 7530 __func__);
7534 BUG(); 7531 BUG();
7535 } 7532 }
7536 } 7533 }
@@ -7781,7 +7778,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7781 return -ENOMEM; 7778 return -ENOMEM;
7782 } 7779 }
7783 if ((ret = pci_request_regions(pdev, s2io_driver_name))) { 7780 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7784 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret); 7781 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret);
7785 pci_disable_device(pdev); 7782 pci_disable_device(pdev);
7786 return -ENODEV; 7783 return -ENODEV;
7787 } 7784 }
@@ -7998,7 +7995,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7998 if (sp->device_type & XFRAME_II_DEVICE) { 7995 if (sp->device_type & XFRAME_II_DEVICE) {
7999 mode = s2io_verify_pci_mode(sp); 7996 mode = s2io_verify_pci_mode(sp);
8000 if (mode < 0) { 7997 if (mode < 0) {
8001 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__); 7998 DBG_PRINT(ERR_DBG, "%s: ", __func__);
8002 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n"); 7999 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8003 ret = -EBADSLT; 8000 ret = -EBADSLT;
8004 goto set_swap_failed; 8001 goto set_swap_failed;
@@ -8175,8 +8172,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8175 break; 8172 break;
8176 } 8173 }
8177 if (sp->config.multiq) { 8174 if (sp->config.multiq) {
8178 for (i = 0; i < sp->config.tx_fifo_num; i++) 8175 for (i = 0; i < sp->config.tx_fifo_num; i++)
8179 mac_control->fifos[i].multiq = config->multiq; 8176 mac_control->fifos[i].multiq = config->multiq;
8180 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n", 8177 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8181 dev->name); 8178 dev->name);
8182 } else 8179 } else
@@ -8206,6 +8203,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8206 /* Initialize device name */ 8203 /* Initialize device name */
8207 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); 8204 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8208 8205
8206 if (vlan_tag_strip)
8207 sp->vlan_strip_flag = 1;
8208 else
8209 sp->vlan_strip_flag = 0;
8210
8209 /* 8211 /*
8210 * Make Link state as off at this point, when the Link change 8212 * Make Link state as off at this point, when the Link change
8211 * interrupt comes the state will be automatically changed to 8213 * interrupt comes the state will be automatically changed to
@@ -8299,7 +8301,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8299 8301
8300 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { 8302 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8301 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n", 8303 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8302 __FUNCTION__); 8304 __func__);
8303 return -1; 8305 return -1;
8304 } 8306 }
8305 8307
@@ -8311,7 +8313,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8311 * If vlan stripping is disabled and the frame is VLAN tagged, 8313 * If vlan stripping is disabled and the frame is VLAN tagged,
8312 * shift the offset by the VLAN header size bytes. 8314 * shift the offset by the VLAN header size bytes.
8313 */ 8315 */
8314 if ((!vlan_strip_flag) && 8316 if ((!sp->vlan_strip_flag) &&
8315 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) 8317 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8316 ip_off += HEADER_VLAN_SIZE; 8318 ip_off += HEADER_VLAN_SIZE;
8317 } else { 8319 } else {
@@ -8330,7 +8332,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8330static int check_for_socket_match(struct lro *lro, struct iphdr *ip, 8332static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8331 struct tcphdr *tcp) 8333 struct tcphdr *tcp)
8332{ 8334{
8333 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8335 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8334 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) || 8336 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8335 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest)) 8337 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8336 return -1; 8338 return -1;
@@ -8345,7 +8347,7 @@ static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8345static void initiate_new_session(struct lro *lro, u8 *l2h, 8347static void initiate_new_session(struct lro *lro, u8 *l2h,
8346 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag) 8348 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8347{ 8349{
8348 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8350 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8349 lro->l2h = l2h; 8351 lro->l2h = l2h;
8350 lro->iph = ip; 8352 lro->iph = ip;
8351 lro->tcph = tcp; 8353 lro->tcph = tcp;
@@ -8375,7 +8377,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8375 struct tcphdr *tcp = lro->tcph; 8377 struct tcphdr *tcp = lro->tcph;
8376 __sum16 nchk; 8378 __sum16 nchk;
8377 struct stat_block *statinfo = sp->mac_control.stats_info; 8379 struct stat_block *statinfo = sp->mac_control.stats_info;
8378 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8380 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8379 8381
8380 /* Update L3 header */ 8382 /* Update L3 header */
8381 ip->tot_len = htons(lro->total_len); 8383 ip->tot_len = htons(lro->total_len);
@@ -8403,7 +8405,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8403static void aggregate_new_rx(struct lro *lro, struct iphdr *ip, 8405static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8404 struct tcphdr *tcp, u32 l4_pyld) 8406 struct tcphdr *tcp, u32 l4_pyld)
8405{ 8407{
8406 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8408 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8407 lro->total_len += l4_pyld; 8409 lro->total_len += l4_pyld;
8408 lro->frags_len += l4_pyld; 8410 lro->frags_len += l4_pyld;
8409 lro->tcp_next_seq += l4_pyld; 8411 lro->tcp_next_seq += l4_pyld;
@@ -8427,7 +8429,7 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8427{ 8429{
8428 u8 *ptr; 8430 u8 *ptr;
8429 8431
8430 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8432 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__);
8431 8433
8432 if (!tcp_pyld_len) { 8434 if (!tcp_pyld_len) {
8433 /* Runt frame or a pure ack */ 8435 /* Runt frame or a pure ack */
@@ -8509,7 +8511,7 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8509 8511
8510 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { 8512 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8511 DBG_PRINT(INFO_DBG, "%s:Out of order. expected " 8513 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8512 "0x%x, actual 0x%x\n", __FUNCTION__, 8514 "0x%x, actual 0x%x\n", __func__,
8513 (*lro)->tcp_next_seq, 8515 (*lro)->tcp_next_seq,
8514 ntohl(tcph->seq)); 8516 ntohl(tcph->seq));
8515 8517
@@ -8549,7 +8551,7 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8549 8551
8550 if (ret == 0) { /* sessions exceeded */ 8552 if (ret == 0) { /* sessions exceeded */
8551 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n", 8553 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8552 __FUNCTION__); 8554 __func__);
8553 *lro = NULL; 8555 *lro = NULL;
8554 return ret; 8556 return ret;
8555 } 8557 }
@@ -8571,7 +8573,7 @@ s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8571 break; 8573 break;
8572 default: 8574 default:
8573 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n", 8575 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8574 __FUNCTION__); 8576 __func__);
8575 break; 8577 break;
8576 } 8578 }
8577 8579
@@ -8592,7 +8594,7 @@ static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8592 8594
8593 skb->protocol = eth_type_trans(skb, dev); 8595 skb->protocol = eth_type_trans(skb, dev);
8594 if (sp->vlgrp && vlan_tag 8596 if (sp->vlgrp && vlan_tag
8595 && (vlan_strip_flag)) { 8597 && (sp->vlan_strip_flag)) {
8596 /* Queueing the vlan frame to the upper layer */ 8598 /* Queueing the vlan frame to the upper layer */
8597 if (sp->config.napi) 8599 if (sp->config.napi)
8598 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag); 8600 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 6722a2f7d091..55cb943f23f8 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -962,6 +962,7 @@ struct s2io_nic {
962 int task_flag; 962 int task_flag;
963 unsigned long long start_time; 963 unsigned long long start_time;
964 struct vlan_group *vlgrp; 964 struct vlan_group *vlgrp;
965 int vlan_strip_flag;
965#define MSIX_FLG 0xA5 966#define MSIX_FLG 0xA5
966 int num_entries; 967 int num_entries;
967 struct msix_entry *entries; 968 struct msix_entry *entries;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index fe41e4ec21ec..ce10cfa1ee53 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2069,9 +2069,10 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
2069static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) 2069static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
2070{ 2070{
2071 struct sbmac_softc *sc = netdev_priv(dev); 2071 struct sbmac_softc *sc = netdev_priv(dev);
2072 unsigned long flags;
2072 2073
2073 /* lock eth irq */ 2074 /* lock eth irq */
2074 spin_lock_irq (&sc->sbm_lock); 2075 spin_lock_irqsave(&sc->sbm_lock, flags);
2075 2076
2076 /* 2077 /*
2077 * Put the buffer on the transmit ring. If we 2078 * Put the buffer on the transmit ring. If we
@@ -2081,14 +2082,14 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
2081 if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { 2082 if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
2082 /* XXX save skb that we could not send */ 2083 /* XXX save skb that we could not send */
2083 netif_stop_queue(dev); 2084 netif_stop_queue(dev);
2084 spin_unlock_irq(&sc->sbm_lock); 2085 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2085 2086
2086 return 1; 2087 return 1;
2087 } 2088 }
2088 2089
2089 dev->trans_start = jiffies; 2090 dev->trans_start = jiffies;
2090 2091
2091 spin_unlock_irq (&sc->sbm_lock); 2092 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2092 2093
2093 return 0; 2094 return 0;
2094} 2095}
@@ -2568,14 +2569,15 @@ static void sbmac_mii_poll(struct net_device *dev)
2568static void sbmac_tx_timeout (struct net_device *dev) 2569static void sbmac_tx_timeout (struct net_device *dev)
2569{ 2570{
2570 struct sbmac_softc *sc = netdev_priv(dev); 2571 struct sbmac_softc *sc = netdev_priv(dev);
2572 unsigned long flags;
2571 2573
2572 spin_lock_irq (&sc->sbm_lock); 2574 spin_lock_irqsave(&sc->sbm_lock, flags);
2573 2575
2574 2576
2575 dev->trans_start = jiffies; 2577 dev->trans_start = jiffies;
2576 dev->stats.tx_errors++; 2578 dev->stats.tx_errors++;
2577 2579
2578 spin_unlock_irq (&sc->sbm_lock); 2580 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2579 2581
2580 printk (KERN_WARNING "%s: Transmit timed out\n",dev->name); 2582 printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
2581} 2583}
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index 2c79d27404e0..d95c21828014 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -52,9 +52,9 @@
52 * 52 *
53 * The maximum width mask that can be generated is 64 bits. 53 * The maximum width mask that can be generated is 64 bits.
54 */ 54 */
55#define EFX_MASK64(field) \ 55#define EFX_MASK64(width) \
56 (EFX_WIDTH(field) == 64 ? ~((u64) 0) : \ 56 ((width) == 64 ? ~((u64) 0) : \
57 (((((u64) 1) << EFX_WIDTH(field))) - 1)) 57 (((((u64) 1) << (width))) - 1))
58 58
59/* Mask equal in width to the specified field. 59/* Mask equal in width to the specified field.
60 * 60 *
@@ -63,9 +63,9 @@
63 * The maximum width mask that can be generated is 32 bits. Use 63 * The maximum width mask that can be generated is 32 bits. Use
64 * EFX_MASK64 for higher width fields. 64 * EFX_MASK64 for higher width fields.
65 */ 65 */
66#define EFX_MASK32(field) \ 66#define EFX_MASK32(width) \
67 (EFX_WIDTH(field) == 32 ? ~((u32) 0) : \ 67 ((width) == 32 ? ~((u32) 0) : \
68 (((((u32) 1) << EFX_WIDTH(field))) - 1)) 68 (((((u32) 1) << (width))) - 1))
69 69
70/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */ 70/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
71typedef union efx_dword { 71typedef union efx_dword {
@@ -138,44 +138,49 @@ typedef union efx_oword {
138 EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high) 138 EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
139 139
140#define EFX_EXTRACT_OWORD64(oword, low, high) \ 140#define EFX_EXTRACT_OWORD64(oword, low, high) \
141 (EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ 141 ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
142 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) 142 EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
143 EFX_MASK64(high + 1 - low))
143 144
144#define EFX_EXTRACT_QWORD64(qword, low, high) \ 145#define EFX_EXTRACT_QWORD64(qword, low, high) \
145 EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) 146 (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
147 EFX_MASK64(high + 1 - low))
146 148
147#define EFX_EXTRACT_OWORD32(oword, low, high) \ 149#define EFX_EXTRACT_OWORD32(oword, low, high) \
148 (EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ 150 ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
149 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ 151 EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
150 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ 152 EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
151 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) 153 EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
154 EFX_MASK32(high + 1 - low))
152 155
153#define EFX_EXTRACT_QWORD32(qword, low, high) \ 156#define EFX_EXTRACT_QWORD32(qword, low, high) \
154 (EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ 157 ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
155 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) 158 EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
159 EFX_MASK32(high + 1 - low))
156 160
157#define EFX_EXTRACT_DWORD(dword, low, high) \ 161#define EFX_EXTRACT_DWORD(dword, low, high) \
158 EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) 162 (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
163 EFX_MASK32(high + 1 - low))
159 164
160#define EFX_OWORD_FIELD64(oword, field) \ 165#define EFX_OWORD_FIELD64(oword, field) \
161 (EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 166 EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
162 & EFX_MASK64(field)) 167 EFX_HIGH_BIT(field))
163 168
164#define EFX_QWORD_FIELD64(qword, field) \ 169#define EFX_QWORD_FIELD64(qword, field) \
165 (EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 170 EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
166 & EFX_MASK64(field)) 171 EFX_HIGH_BIT(field))
167 172
168#define EFX_OWORD_FIELD32(oword, field) \ 173#define EFX_OWORD_FIELD32(oword, field) \
169 (EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 174 EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
170 & EFX_MASK32(field)) 175 EFX_HIGH_BIT(field))
171 176
172#define EFX_QWORD_FIELD32(qword, field) \ 177#define EFX_QWORD_FIELD32(qword, field) \
173 (EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 178 EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
174 & EFX_MASK32(field)) 179 EFX_HIGH_BIT(field))
175 180
176#define EFX_DWORD_FIELD(dword, field) \ 181#define EFX_DWORD_FIELD(dword, field) \
177 (EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ 182 EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
178 & EFX_MASK32(field)) 183 EFX_HIGH_BIT(field))
179 184
180#define EFX_OWORD_IS_ZERO64(oword) \ 185#define EFX_OWORD_IS_ZERO64(oword) \
181 (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0) 186 (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
@@ -411,69 +416,102 @@ typedef union efx_oword {
411 * for read-modify-write operations. 416 * for read-modify-write operations.
412 * 417 *
413 */ 418 */
414
415#define EFX_INVERT_OWORD(oword) do { \ 419#define EFX_INVERT_OWORD(oword) do { \
416 (oword).u64[0] = ~((oword).u64[0]); \ 420 (oword).u64[0] = ~((oword).u64[0]); \
417 (oword).u64[1] = ~((oword).u64[1]); \ 421 (oword).u64[1] = ~((oword).u64[1]); \
418 } while (0) 422 } while (0)
419 423
420#define EFX_INSERT_FIELD64(...) \ 424#define EFX_AND_OWORD(oword, from, mask) \
421 cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__)) 425 do { \
426 (oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
427 (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
428 } while (0)
429
430#define EFX_OR_OWORD(oword, from, mask) \
431 do { \
432 (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
433 (oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
434 } while (0)
422 435
423#define EFX_INSERT_FIELD32(...) \ 436#define EFX_INSERT64(min, max, low, high, value) \
424 cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__)) 437 cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
425 438
426#define EFX_INPLACE_MASK64(min, max, field) \ 439#define EFX_INSERT32(min, max, low, high, value) \
427 EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field)) 440 cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
428 441
429#define EFX_INPLACE_MASK32(min, max, field) \ 442#define EFX_INPLACE_MASK64(min, max, low, high) \
430 EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field)) 443 EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
431 444
432#define EFX_SET_OWORD_FIELD64(oword, field, value) do { \ 445#define EFX_INPLACE_MASK32(min, max, low, high) \
446 EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
447
448#define EFX_SET_OWORD64(oword, low, high, value) do { \
433 (oword).u64[0] = (((oword).u64[0] \ 449 (oword).u64[0] = (((oword).u64[0] \
434 & ~EFX_INPLACE_MASK64(0, 63, field)) \ 450 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
435 | EFX_INSERT_FIELD64(0, 63, field, value)); \ 451 | EFX_INSERT64(0, 63, low, high, value)); \
436 (oword).u64[1] = (((oword).u64[1] \ 452 (oword).u64[1] = (((oword).u64[1] \
437 & ~EFX_INPLACE_MASK64(64, 127, field)) \ 453 & ~EFX_INPLACE_MASK64(64, 127, low, high)) \
438 | EFX_INSERT_FIELD64(64, 127, field, value)); \ 454 | EFX_INSERT64(64, 127, low, high, value)); \
439 } while (0) 455 } while (0)
440 456
441#define EFX_SET_QWORD_FIELD64(qword, field, value) do { \ 457#define EFX_SET_QWORD64(qword, low, high, value) do { \
442 (qword).u64[0] = (((qword).u64[0] \ 458 (qword).u64[0] = (((qword).u64[0] \
443 & ~EFX_INPLACE_MASK64(0, 63, field)) \ 459 & ~EFX_INPLACE_MASK64(0, 63, low, high)) \
444 | EFX_INSERT_FIELD64(0, 63, field, value)); \ 460 | EFX_INSERT64(0, 63, low, high, value)); \
445 } while (0) 461 } while (0)
446 462
447#define EFX_SET_OWORD_FIELD32(oword, field, value) do { \ 463#define EFX_SET_OWORD32(oword, low, high, value) do { \
448 (oword).u32[0] = (((oword).u32[0] \ 464 (oword).u32[0] = (((oword).u32[0] \
449 & ~EFX_INPLACE_MASK32(0, 31, field)) \ 465 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
450 | EFX_INSERT_FIELD32(0, 31, field, value)); \ 466 | EFX_INSERT32(0, 31, low, high, value)); \
451 (oword).u32[1] = (((oword).u32[1] \ 467 (oword).u32[1] = (((oword).u32[1] \
452 & ~EFX_INPLACE_MASK32(32, 63, field)) \ 468 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
453 | EFX_INSERT_FIELD32(32, 63, field, value)); \ 469 | EFX_INSERT32(32, 63, low, high, value)); \
454 (oword).u32[2] = (((oword).u32[2] \ 470 (oword).u32[2] = (((oword).u32[2] \
455 & ~EFX_INPLACE_MASK32(64, 95, field)) \ 471 & ~EFX_INPLACE_MASK32(64, 95, low, high)) \
456 | EFX_INSERT_FIELD32(64, 95, field, value)); \ 472 | EFX_INSERT32(64, 95, low, high, value)); \
457 (oword).u32[3] = (((oword).u32[3] \ 473 (oword).u32[3] = (((oword).u32[3] \
458 & ~EFX_INPLACE_MASK32(96, 127, field)) \ 474 & ~EFX_INPLACE_MASK32(96, 127, low, high)) \
459 | EFX_INSERT_FIELD32(96, 127, field, value)); \ 475 | EFX_INSERT32(96, 127, low, high, value)); \
460 } while (0) 476 } while (0)
461 477
462#define EFX_SET_QWORD_FIELD32(qword, field, value) do { \ 478#define EFX_SET_QWORD32(qword, low, high, value) do { \
463 (qword).u32[0] = (((qword).u32[0] \ 479 (qword).u32[0] = (((qword).u32[0] \
464 & ~EFX_INPLACE_MASK32(0, 31, field)) \ 480 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
465 | EFX_INSERT_FIELD32(0, 31, field, value)); \ 481 | EFX_INSERT32(0, 31, low, high, value)); \
466 (qword).u32[1] = (((qword).u32[1] \ 482 (qword).u32[1] = (((qword).u32[1] \
467 & ~EFX_INPLACE_MASK32(32, 63, field)) \ 483 & ~EFX_INPLACE_MASK32(32, 63, low, high)) \
468 | EFX_INSERT_FIELD32(32, 63, field, value)); \ 484 | EFX_INSERT32(32, 63, low, high, value)); \
469 } while (0) 485 } while (0)
470 486
471#define EFX_SET_DWORD_FIELD(dword, field, value) do { \ 487#define EFX_SET_DWORD32(dword, low, high, value) do { \
472 (dword).u32[0] = (((dword).u32[0] \ 488 (dword).u32[0] = (((dword).u32[0] \
473 & ~EFX_INPLACE_MASK32(0, 31, field)) \ 489 & ~EFX_INPLACE_MASK32(0, 31, low, high)) \
474 | EFX_INSERT_FIELD32(0, 31, field, value)); \ 490 | EFX_INSERT32(0, 31, low, high, value)); \
475 } while (0) 491 } while (0)
476 492
493#define EFX_SET_OWORD_FIELD64(oword, field, value) \
494 EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
495 EFX_HIGH_BIT(field), value)
496
497#define EFX_SET_QWORD_FIELD64(qword, field, value) \
498 EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
499 EFX_HIGH_BIT(field), value)
500
501#define EFX_SET_OWORD_FIELD32(oword, field, value) \
502 EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
503 EFX_HIGH_BIT(field), value)
504
505#define EFX_SET_QWORD_FIELD32(qword, field, value) \
506 EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
507 EFX_HIGH_BIT(field), value)
508
509#define EFX_SET_DWORD_FIELD(dword, field, value) \
510 EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
511 EFX_HIGH_BIT(field), value)
512
513
514
477#if BITS_PER_LONG == 64 515#if BITS_PER_LONG == 64
478#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 516#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
479#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 517#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
@@ -502,4 +540,10 @@ typedef union efx_oword {
502#define EFX_DMA_TYPE_WIDTH(width) \ 540#define EFX_DMA_TYPE_WIDTH(width) \
503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) 541 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
504 542
543
544/* Static initialiser */
545#define EFX_OWORD32(a, b, c, d) \
546 { .u32 = { __constant_cpu_to_le32(a), __constant_cpu_to_le32(b), \
547 __constant_cpu_to_le32(c), __constant_cpu_to_le32(d) } }
548
505#endif /* EFX_BITFIELD_H */ 549#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index d3d3dd0a1170..99e602373269 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -31,23 +31,23 @@ static void blink_led_timer(unsigned long context)
31 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); 31 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
32} 32}
33 33
34static void board_blink(struct efx_nic *efx, int blink) 34static void board_blink(struct efx_nic *efx, bool blink)
35{ 35{
36 struct efx_blinker *blinker = &efx->board_info.blinker; 36 struct efx_blinker *blinker = &efx->board_info.blinker;
37 37
38 /* The rtnl mutex serialises all ethtool ioctls, so 38 /* The rtnl mutex serialises all ethtool ioctls, so
39 * nothing special needs doing here. */ 39 * nothing special needs doing here. */
40 if (blink) { 40 if (blink) {
41 blinker->resubmit = 1; 41 blinker->resubmit = true;
42 blinker->state = 0; 42 blinker->state = false;
43 setup_timer(&blinker->timer, blink_led_timer, 43 setup_timer(&blinker->timer, blink_led_timer,
44 (unsigned long)efx); 44 (unsigned long)efx);
45 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); 45 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
46 } else { 46 } else {
47 blinker->resubmit = 0; 47 blinker->resubmit = false;
48 if (blinker->timer.function) 48 if (blinker->timer.function)
49 del_timer_sync(&blinker->timer); 49 del_timer_sync(&blinker->timer);
50 efx->board_info.set_fault_led(efx, 0); 50 efx->board_info.set_fault_led(efx, false);
51 } 51 }
52} 52}
53 53
@@ -78,7 +78,7 @@ static int sfe4002_init_leds(struct efx_nic *efx)
78 return 0; 78 return 0;
79} 79}
80 80
81static void sfe4002_fault_led(struct efx_nic *efx, int state) 81static void sfe4002_fault_led(struct efx_nic *efx, bool state)
82{ 82{
83 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON : 83 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
84 QUAKE_LED_OFF); 84 QUAKE_LED_OFF);
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index e5e844359ce7..c6e01b64bfb4 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -21,7 +21,5 @@ enum efx_board_type {
21 21
22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); 22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
23extern int sfe4001_init(struct efx_nic *efx); 23extern int sfe4001_init(struct efx_nic *efx);
24/* Are we putting the PHY into flash config mode */
25extern unsigned int sfe4001_phy_flash_cfg;
26 24
27#endif 25#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 45c72eebb3a7..06ea71c7e34e 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -28,7 +28,6 @@
28#include "efx.h" 28#include "efx.h"
29#include "mdio_10g.h" 29#include "mdio_10g.h"
30#include "falcon.h" 30#include "falcon.h"
31#include "workarounds.h"
32#include "mac.h" 31#include "mac.h"
33 32
34#define EFX_MAX_MTU (9 * 1024) 33#define EFX_MAX_MTU (9 * 1024)
@@ -52,7 +51,7 @@ static struct workqueue_struct *refill_workqueue;
52 * This sets the default for new devices. It can be controlled later 51 * This sets the default for new devices. It can be controlled later
53 * using ethtool. 52 * using ethtool.
54 */ 53 */
55static int lro = 1; 54static int lro = true;
56module_param(lro, int, 0644); 55module_param(lro, int, 0644);
57MODULE_PARM_DESC(lro, "Large receive offload acceleration"); 56MODULE_PARM_DESC(lro, "Large receive offload acceleration");
58 57
@@ -65,7 +64,7 @@ MODULE_PARM_DESC(lro, "Large receive offload acceleration");
65 * This is forced to 0 for MSI interrupt mode as the interrupt vector 64 * This is forced to 0 for MSI interrupt mode as the interrupt vector
66 * is not written 65 * is not written
67 */ 66 */
68static unsigned int separate_tx_and_rx_channels = 1; 67static unsigned int separate_tx_and_rx_channels = true;
69 68
70/* This is the weight assigned to each of the (per-channel) virtual 69/* This is the weight assigned to each of the (per-channel) virtual
71 * NAPI devices. 70 * NAPI devices.
@@ -81,7 +80,7 @@ unsigned int efx_monitor_interval = 1 * HZ;
81/* This controls whether or not the hardware monitor will trigger a 80/* This controls whether or not the hardware monitor will trigger a
82 * reset when it detects an error condition. 81 * reset when it detects an error condition.
83 */ 82 */
84static unsigned int monitor_reset = 1; 83static unsigned int monitor_reset = true;
85 84
86/* This controls whether or not the driver will initialise devices 85/* This controls whether or not the driver will initialise devices
87 * with invalid MAC addresses stored in the EEPROM or flash. If true, 86 * with invalid MAC addresses stored in the EEPROM or flash. If true,
@@ -141,8 +140,7 @@ static void efx_fini_channels(struct efx_nic *efx);
141 140
142#define EFX_ASSERT_RESET_SERIALISED(efx) \ 141#define EFX_ASSERT_RESET_SERIALISED(efx) \
143 do { \ 142 do { \
144 if ((efx->state == STATE_RUNNING) || \ 143 if (efx->state == STATE_RUNNING) \
145 (efx->state == STATE_RESETTING)) \
146 ASSERT_RTNL(); \ 144 ASSERT_RTNL(); \
147 } while (0) 145 } while (0)
148 146
@@ -159,16 +157,18 @@ static void efx_fini_channels(struct efx_nic *efx);
159 * never be concurrently called more than once on the same channel, 157 * never be concurrently called more than once on the same channel,
160 * though different channels may be being processed concurrently. 158 * though different channels may be being processed concurrently.
161 */ 159 */
162static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) 160static int efx_process_channel(struct efx_channel *channel, int rx_quota)
163{ 161{
164 int rxdmaqs; 162 struct efx_nic *efx = channel->efx;
165 struct efx_rx_queue *rx_queue; 163 int rx_packets;
166 164
167 if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE || 165 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
168 !channel->enabled)) 166 !channel->enabled))
169 return rx_quota; 167 return 0;
170 168
171 rxdmaqs = falcon_process_eventq(channel, &rx_quota); 169 rx_packets = falcon_process_eventq(channel, rx_quota);
170 if (rx_packets == 0)
171 return 0;
172 172
173 /* Deliver last RX packet. */ 173 /* Deliver last RX packet. */
174 if (channel->rx_pkt) { 174 if (channel->rx_pkt) {
@@ -180,16 +180,9 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
180 efx_flush_lro(channel); 180 efx_flush_lro(channel);
181 efx_rx_strategy(channel); 181 efx_rx_strategy(channel);
182 182
183 /* Refill descriptor rings as necessary */ 183 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
184 rx_queue = &channel->efx->rx_queue[0];
185 while (rxdmaqs) {
186 if (rxdmaqs & 0x01)
187 efx_fast_push_rx_descriptors(rx_queue);
188 rx_queue++;
189 rxdmaqs >>= 1;
190 }
191 184
192 return rx_quota; 185 return rx_packets;
193} 186}
194 187
195/* Mark channel as finished processing 188/* Mark channel as finished processing
@@ -203,7 +196,7 @@ static inline void efx_channel_processed(struct efx_channel *channel)
203 /* The interrupt handler for this channel may set work_pending 196 /* The interrupt handler for this channel may set work_pending
204 * as soon as we acknowledge the events we've seen. Make sure 197 * as soon as we acknowledge the events we've seen. Make sure
205 * it's cleared before then. */ 198 * it's cleared before then. */
206 channel->work_pending = 0; 199 channel->work_pending = false;
207 smp_wmb(); 200 smp_wmb();
208 201
209 falcon_eventq_read_ack(channel); 202 falcon_eventq_read_ack(channel);
@@ -219,14 +212,12 @@ static int efx_poll(struct napi_struct *napi, int budget)
219 struct efx_channel *channel = 212 struct efx_channel *channel =
220 container_of(napi, struct efx_channel, napi_str); 213 container_of(napi, struct efx_channel, napi_str);
221 struct net_device *napi_dev = channel->napi_dev; 214 struct net_device *napi_dev = channel->napi_dev;
222 int unused;
223 int rx_packets; 215 int rx_packets;
224 216
225 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", 217 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
226 channel->channel, raw_smp_processor_id()); 218 channel->channel, raw_smp_processor_id());
227 219
228 unused = efx_process_channel(channel, budget); 220 rx_packets = efx_process_channel(channel, budget);
229 rx_packets = (budget - unused);
230 221
231 if (rx_packets < budget) { 222 if (rx_packets < budget) {
232 /* There is no race here; although napi_disable() will 223 /* There is no race here; although napi_disable() will
@@ -260,7 +251,7 @@ void efx_process_channel_now(struct efx_channel *channel)
260 falcon_disable_interrupts(efx); 251 falcon_disable_interrupts(efx);
261 if (efx->legacy_irq) 252 if (efx->legacy_irq)
262 synchronize_irq(efx->legacy_irq); 253 synchronize_irq(efx->legacy_irq);
263 if (channel->has_interrupt && channel->irq) 254 if (channel->irq)
264 synchronize_irq(channel->irq); 255 synchronize_irq(channel->irq);
265 256
266 /* Wait for any NAPI processing to complete */ 257 /* Wait for any NAPI processing to complete */
@@ -290,13 +281,13 @@ static int efx_probe_eventq(struct efx_channel *channel)
290} 281}
291 282
292/* Prepare channel's event queue */ 283/* Prepare channel's event queue */
293static int efx_init_eventq(struct efx_channel *channel) 284static void efx_init_eventq(struct efx_channel *channel)
294{ 285{
295 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel); 286 EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
296 287
297 channel->eventq_read_ptr = 0; 288 channel->eventq_read_ptr = 0;
298 289
299 return falcon_init_eventq(channel); 290 falcon_init_eventq(channel);
300} 291}
301 292
302static void efx_fini_eventq(struct efx_channel *channel) 293static void efx_fini_eventq(struct efx_channel *channel)
@@ -362,12 +353,11 @@ static int efx_probe_channel(struct efx_channel *channel)
362 * to propagate configuration changes (mtu, checksum offload), or 353 * to propagate configuration changes (mtu, checksum offload), or
363 * to clear hardware error conditions 354 * to clear hardware error conditions
364 */ 355 */
365static int efx_init_channels(struct efx_nic *efx) 356static void efx_init_channels(struct efx_nic *efx)
366{ 357{
367 struct efx_tx_queue *tx_queue; 358 struct efx_tx_queue *tx_queue;
368 struct efx_rx_queue *rx_queue; 359 struct efx_rx_queue *rx_queue;
369 struct efx_channel *channel; 360 struct efx_channel *channel;
370 int rc = 0;
371 361
372 /* Calculate the rx buffer allocation parameters required to 362 /* Calculate the rx buffer allocation parameters required to
373 * support the current MTU, including padding for header 363 * support the current MTU, including padding for header
@@ -382,36 +372,20 @@ static int efx_init_channels(struct efx_nic *efx)
382 efx_for_each_channel(channel, efx) { 372 efx_for_each_channel(channel, efx) {
383 EFX_LOG(channel->efx, "init chan %d\n", channel->channel); 373 EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
384 374
385 rc = efx_init_eventq(channel); 375 efx_init_eventq(channel);
386 if (rc)
387 goto err;
388 376
389 efx_for_each_channel_tx_queue(tx_queue, channel) { 377 efx_for_each_channel_tx_queue(tx_queue, channel)
390 rc = efx_init_tx_queue(tx_queue); 378 efx_init_tx_queue(tx_queue);
391 if (rc)
392 goto err;
393 }
394 379
395 /* The rx buffer allocation strategy is MTU dependent */ 380 /* The rx buffer allocation strategy is MTU dependent */
396 efx_rx_strategy(channel); 381 efx_rx_strategy(channel);
397 382
398 efx_for_each_channel_rx_queue(rx_queue, channel) { 383 efx_for_each_channel_rx_queue(rx_queue, channel)
399 rc = efx_init_rx_queue(rx_queue); 384 efx_init_rx_queue(rx_queue);
400 if (rc)
401 goto err;
402 }
403 385
404 WARN_ON(channel->rx_pkt != NULL); 386 WARN_ON(channel->rx_pkt != NULL);
405 efx_rx_strategy(channel); 387 efx_rx_strategy(channel);
406 } 388 }
407
408 return 0;
409
410 err:
411 EFX_ERR(efx, "failed to initialise channel %d\n",
412 channel ? channel->channel : -1);
413 efx_fini_channels(efx);
414 return rc;
415} 389}
416 390
417/* This enables event queue processing and packet transmission. 391/* This enables event queue processing and packet transmission.
@@ -432,8 +406,8 @@ static void efx_start_channel(struct efx_channel *channel)
432 /* The interrupt handler for this channel may set work_pending 406 /* The interrupt handler for this channel may set work_pending
433 * as soon as we enable it. Make sure it's cleared before 407 * as soon as we enable it. Make sure it's cleared before
434 * then. Similarly, make sure it sees the enabled flag set. */ 408 * then. Similarly, make sure it sees the enabled flag set. */
435 channel->work_pending = 0; 409 channel->work_pending = false;
436 channel->enabled = 1; 410 channel->enabled = true;
437 smp_wmb(); 411 smp_wmb();
438 412
439 napi_enable(&channel->napi_str); 413 napi_enable(&channel->napi_str);
@@ -456,7 +430,7 @@ static void efx_stop_channel(struct efx_channel *channel)
456 430
457 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel); 431 EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
458 432
459 channel->enabled = 0; 433 channel->enabled = false;
460 napi_disable(&channel->napi_str); 434 napi_disable(&channel->napi_str);
461 435
462 /* Ensure that any worker threads have exited or will be no-ops */ 436 /* Ensure that any worker threads have exited or will be no-ops */
@@ -471,10 +445,17 @@ static void efx_fini_channels(struct efx_nic *efx)
471 struct efx_channel *channel; 445 struct efx_channel *channel;
472 struct efx_tx_queue *tx_queue; 446 struct efx_tx_queue *tx_queue;
473 struct efx_rx_queue *rx_queue; 447 struct efx_rx_queue *rx_queue;
448 int rc;
474 449
475 EFX_ASSERT_RESET_SERIALISED(efx); 450 EFX_ASSERT_RESET_SERIALISED(efx);
476 BUG_ON(efx->port_enabled); 451 BUG_ON(efx->port_enabled);
477 452
453 rc = falcon_flush_queues(efx);
454 if (rc)
455 EFX_ERR(efx, "failed to flush queues\n");
456 else
457 EFX_LOG(efx, "successfully flushed all queues\n");
458
478 efx_for_each_channel(channel, efx) { 459 efx_for_each_channel(channel, efx) {
479 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); 460 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
480 461
@@ -482,13 +463,6 @@ static void efx_fini_channels(struct efx_nic *efx)
482 efx_fini_rx_queue(rx_queue); 463 efx_fini_rx_queue(rx_queue);
483 efx_for_each_channel_tx_queue(tx_queue, channel) 464 efx_for_each_channel_tx_queue(tx_queue, channel)
484 efx_fini_tx_queue(tx_queue); 465 efx_fini_tx_queue(tx_queue);
485 }
486
487 /* Do the event queues last so that we can handle flush events
488 * for all DMA queues. */
489 efx_for_each_channel(channel, efx) {
490 EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
491
492 efx_fini_eventq(channel); 466 efx_fini_eventq(channel);
493 } 467 }
494} 468}
@@ -526,8 +500,6 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
526 */ 500 */
527static void efx_link_status_changed(struct efx_nic *efx) 501static void efx_link_status_changed(struct efx_nic *efx)
528{ 502{
529 int carrier_ok;
530
531 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 503 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
532 * that no events are triggered between unregister_netdev() and the 504 * that no events are triggered between unregister_netdev() and the
533 * driver unloading. A more general condition is that NETDEV_CHANGE 505 * driver unloading. A more general condition is that NETDEV_CHANGE
@@ -535,8 +507,12 @@ static void efx_link_status_changed(struct efx_nic *efx)
535 if (!netif_running(efx->net_dev)) 507 if (!netif_running(efx->net_dev))
536 return; 508 return;
537 509
538 carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0; 510 if (efx->port_inhibited) {
539 if (efx->link_up != carrier_ok) { 511 netif_carrier_off(efx->net_dev);
512 return;
513 }
514
515 if (efx->link_up != netif_carrier_ok(efx->net_dev)) {
540 efx->n_link_state_changes++; 516 efx->n_link_state_changes++;
541 517
542 if (efx->link_up) 518 if (efx->link_up)
@@ -577,13 +553,19 @@ static void efx_link_status_changed(struct efx_nic *efx)
577 553
578/* This call reinitialises the MAC to pick up new PHY settings. The 554/* This call reinitialises the MAC to pick up new PHY settings. The
579 * caller must hold the mac_lock */ 555 * caller must hold the mac_lock */
580static void __efx_reconfigure_port(struct efx_nic *efx) 556void __efx_reconfigure_port(struct efx_nic *efx)
581{ 557{
582 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 558 WARN_ON(!mutex_is_locked(&efx->mac_lock));
583 559
584 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n", 560 EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
585 raw_smp_processor_id()); 561 raw_smp_processor_id());
586 562
563 /* Serialise the promiscuous flag with efx_set_multicast_list. */
564 if (efx_dev_registered(efx)) {
565 netif_addr_lock_bh(efx->net_dev);
566 netif_addr_unlock_bh(efx->net_dev);
567 }
568
587 falcon_reconfigure_xmac(efx); 569 falcon_reconfigure_xmac(efx);
588 570
589 /* Inform kernel of loss/gain of carrier */ 571 /* Inform kernel of loss/gain of carrier */
@@ -661,7 +643,8 @@ static int efx_init_port(struct efx_nic *efx)
661 if (rc) 643 if (rc)
662 return rc; 644 return rc;
663 645
664 efx->port_initialized = 1; 646 efx->port_initialized = true;
647 efx->stats_enabled = true;
665 648
666 /* Reconfigure port to program MAC registers */ 649 /* Reconfigure port to program MAC registers */
667 falcon_reconfigure_xmac(efx); 650 falcon_reconfigure_xmac(efx);
@@ -678,7 +661,7 @@ static void efx_start_port(struct efx_nic *efx)
678 BUG_ON(efx->port_enabled); 661 BUG_ON(efx->port_enabled);
679 662
680 mutex_lock(&efx->mac_lock); 663 mutex_lock(&efx->mac_lock);
681 efx->port_enabled = 1; 664 efx->port_enabled = true;
682 __efx_reconfigure_port(efx); 665 __efx_reconfigure_port(efx);
683 mutex_unlock(&efx->mac_lock); 666 mutex_unlock(&efx->mac_lock);
684} 667}
@@ -692,7 +675,7 @@ static void efx_stop_port(struct efx_nic *efx)
692 EFX_LOG(efx, "stop port\n"); 675 EFX_LOG(efx, "stop port\n");
693 676
694 mutex_lock(&efx->mac_lock); 677 mutex_lock(&efx->mac_lock);
695 efx->port_enabled = 0; 678 efx->port_enabled = false;
696 mutex_unlock(&efx->mac_lock); 679 mutex_unlock(&efx->mac_lock);
697 680
698 /* Serialise against efx_set_multicast_list() */ 681 /* Serialise against efx_set_multicast_list() */
@@ -710,9 +693,9 @@ static void efx_fini_port(struct efx_nic *efx)
710 return; 693 return;
711 694
712 falcon_fini_xmac(efx); 695 falcon_fini_xmac(efx);
713 efx->port_initialized = 0; 696 efx->port_initialized = false;
714 697
715 efx->link_up = 0; 698 efx->link_up = false;
716 efx_link_status_changed(efx); 699 efx_link_status_changed(efx);
717} 700}
718 701
@@ -797,7 +780,7 @@ static int efx_init_io(struct efx_nic *efx)
797 return 0; 780 return 0;
798 781
799 fail4: 782 fail4:
800 release_mem_region(efx->membase_phys, efx->type->mem_map_size); 783 pci_release_region(efx->pci_dev, efx->type->mem_bar);
801 fail3: 784 fail3:
802 efx->membase_phys = 0; 785 efx->membase_phys = 0;
803 fail2: 786 fail2:
@@ -823,53 +806,61 @@ static void efx_fini_io(struct efx_nic *efx)
823 pci_disable_device(efx->pci_dev); 806 pci_disable_device(efx->pci_dev);
824} 807}
825 808
826/* Probe the number and type of interrupts we are able to obtain. */ 809/* Get number of RX queues wanted. Return number of online CPU
810 * packages in the expectation that an IRQ balancer will spread
811 * interrupts across them. */
812static int efx_wanted_rx_queues(void)
813{
814 cpumask_t core_mask;
815 int count;
816 int cpu;
817
818 cpus_clear(core_mask);
819 count = 0;
820 for_each_online_cpu(cpu) {
821 if (!cpu_isset(cpu, core_mask)) {
822 ++count;
823 cpus_or(core_mask, core_mask,
824 topology_core_siblings(cpu));
825 }
826 }
827
828 return count;
829}
830
831/* Probe the number and type of interrupts we are able to obtain, and
832 * the resulting numbers of channels and RX queues.
833 */
827static void efx_probe_interrupts(struct efx_nic *efx) 834static void efx_probe_interrupts(struct efx_nic *efx)
828{ 835{
829 int max_channel = efx->type->phys_addr_channels - 1; 836 int max_channels =
830 struct msix_entry xentries[EFX_MAX_CHANNELS]; 837 min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
831 int rc, i; 838 int rc, i;
832 839
833 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 840 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
834 BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX)); 841 struct msix_entry xentries[EFX_MAX_CHANNELS];
835 842 int wanted_ints;
836 if (rss_cpus == 0) {
837 cpumask_t core_mask;
838 int cpu;
839
840 cpus_clear(core_mask);
841 efx->rss_queues = 0;
842 for_each_online_cpu(cpu) {
843 if (!cpu_isset(cpu, core_mask)) {
844 ++efx->rss_queues;
845 cpus_or(core_mask, core_mask,
846 topology_core_siblings(cpu));
847 }
848 }
849 } else {
850 efx->rss_queues = rss_cpus;
851 }
852 843
853 efx->rss_queues = min(efx->rss_queues, max_channel + 1); 844 /* We want one RX queue and interrupt per CPU package
854 efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS); 845 * (or as specified by the rss_cpus module parameter).
846 * We will need one channel per interrupt.
847 */
848 wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
849 efx->n_rx_queues = min(wanted_ints, max_channels);
855 850
856 /* Request maximum number of MSI interrupts, and fill out 851 for (i = 0; i < efx->n_rx_queues; i++)
857 * the channel interrupt information the allowed allocation */
858 for (i = 0; i < efx->rss_queues; i++)
859 xentries[i].entry = i; 852 xentries[i].entry = i;
860 rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues); 853 rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues);
861 if (rc > 0) { 854 if (rc > 0) {
862 EFX_BUG_ON_PARANOID(rc >= efx->rss_queues); 855 EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues);
863 efx->rss_queues = rc; 856 efx->n_rx_queues = rc;
864 rc = pci_enable_msix(efx->pci_dev, xentries, 857 rc = pci_enable_msix(efx->pci_dev, xentries,
865 efx->rss_queues); 858 efx->n_rx_queues);
866 } 859 }
867 860
868 if (rc == 0) { 861 if (rc == 0) {
869 for (i = 0; i < efx->rss_queues; i++) { 862 for (i = 0; i < efx->n_rx_queues; i++)
870 efx->channel[i].has_interrupt = 1;
871 efx->channel[i].irq = xentries[i].vector; 863 efx->channel[i].irq = xentries[i].vector;
872 }
873 } else { 864 } else {
874 /* Fall back to single channel MSI */ 865 /* Fall back to single channel MSI */
875 efx->interrupt_mode = EFX_INT_MODE_MSI; 866 efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -879,11 +870,10 @@ static void efx_probe_interrupts(struct efx_nic *efx)
879 870
880 /* Try single interrupt MSI */ 871 /* Try single interrupt MSI */
881 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 872 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
882 efx->rss_queues = 1; 873 efx->n_rx_queues = 1;
883 rc = pci_enable_msi(efx->pci_dev); 874 rc = pci_enable_msi(efx->pci_dev);
884 if (rc == 0) { 875 if (rc == 0) {
885 efx->channel[0].irq = efx->pci_dev->irq; 876 efx->channel[0].irq = efx->pci_dev->irq;
886 efx->channel[0].has_interrupt = 1;
887 } else { 877 } else {
888 EFX_ERR(efx, "could not enable MSI\n"); 878 EFX_ERR(efx, "could not enable MSI\n");
889 efx->interrupt_mode = EFX_INT_MODE_LEGACY; 879 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
@@ -892,10 +882,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
892 882
893 /* Assume legacy interrupts */ 883 /* Assume legacy interrupts */
894 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 884 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
895 efx->rss_queues = 1; 885 efx->n_rx_queues = 1;
896 /* Every channel is interruptible */
897 for (i = 0; i < EFX_MAX_CHANNELS; i++)
898 efx->channel[i].has_interrupt = 1;
899 efx->legacy_irq = efx->pci_dev->irq; 886 efx->legacy_irq = efx->pci_dev->irq;
900 } 887 }
901} 888}
@@ -905,7 +892,7 @@ static void efx_remove_interrupts(struct efx_nic *efx)
905 struct efx_channel *channel; 892 struct efx_channel *channel;
906 893
907 /* Remove MSI/MSI-X interrupts */ 894 /* Remove MSI/MSI-X interrupts */
908 efx_for_each_channel_with_interrupt(channel, efx) 895 efx_for_each_channel(channel, efx)
909 channel->irq = 0; 896 channel->irq = 0;
910 pci_disable_msi(efx->pci_dev); 897 pci_disable_msi(efx->pci_dev);
911 pci_disable_msix(efx->pci_dev); 898 pci_disable_msix(efx->pci_dev);
@@ -914,45 +901,22 @@ static void efx_remove_interrupts(struct efx_nic *efx)
914 efx->legacy_irq = 0; 901 efx->legacy_irq = 0;
915} 902}
916 903
917/* Select number of used resources 904static void efx_set_channels(struct efx_nic *efx)
918 * Should be called after probe_interrupts()
919 */
920static void efx_select_used(struct efx_nic *efx)
921{ 905{
922 struct efx_tx_queue *tx_queue; 906 struct efx_tx_queue *tx_queue;
923 struct efx_rx_queue *rx_queue; 907 struct efx_rx_queue *rx_queue;
924 int i;
925 908
926 /* TX queues. One per port per channel with TX capability 909 efx_for_each_tx_queue(tx_queue, efx) {
927 * (more than one per port won't work on Linux, due to out 910 if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels)
928 * of order issues... but will be fine on Solaris) 911 tx_queue->channel = &efx->channel[1];
929 */ 912 else
930 tx_queue = &efx->tx_queue[0]; 913 tx_queue->channel = &efx->channel[0];
931 914 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
932 /* Perform this for each channel with TX capabilities. 915 }
933 * At the moment, we only support a single TX queue
934 */
935 tx_queue->used = 1;
936 if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
937 tx_queue->channel = &efx->channel[1];
938 else
939 tx_queue->channel = &efx->channel[0];
940 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
941 tx_queue++;
942
943 /* RX queues. Each has a dedicated channel. */
944 for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
945 rx_queue = &efx->rx_queue[i];
946 916
947 if (i < efx->rss_queues) { 917 efx_for_each_rx_queue(rx_queue, efx) {
948 rx_queue->used = 1; 918 rx_queue->channel = &efx->channel[rx_queue->queue];
949 /* If we allow multiple RX queues per channel 919 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
950 * we need to decide that here
951 */
952 rx_queue->channel = &efx->channel[rx_queue->queue];
953 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
954 rx_queue++;
955 }
956 } 920 }
957} 921}
958 922
@@ -971,8 +935,7 @@ static int efx_probe_nic(struct efx_nic *efx)
971 * in MSI-X interrupts. */ 935 * in MSI-X interrupts. */
972 efx_probe_interrupts(efx); 936 efx_probe_interrupts(efx);
973 937
974 /* Determine number of RX queues and TX queues */ 938 efx_set_channels(efx);
975 efx_select_used(efx);
976 939
977 /* Initialise the interrupt moderation settings */ 940 /* Initialise the interrupt moderation settings */
978 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec); 941 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
@@ -1058,7 +1021,8 @@ static void efx_start_all(struct efx_nic *efx)
1058 /* Mark the port as enabled so port reconfigurations can start, then 1021 /* Mark the port as enabled so port reconfigurations can start, then
1059 * restart the transmit interface early so the watchdog timer stops */ 1022 * restart the transmit interface early so the watchdog timer stops */
1060 efx_start_port(efx); 1023 efx_start_port(efx);
1061 efx_wake_queue(efx); 1024 if (efx_dev_registered(efx))
1025 efx_wake_queue(efx);
1062 1026
1063 efx_for_each_channel(channel, efx) 1027 efx_for_each_channel(channel, efx)
1064 efx_start_channel(channel); 1028 efx_start_channel(channel);
@@ -1109,7 +1073,7 @@ static void efx_stop_all(struct efx_nic *efx)
1109 falcon_disable_interrupts(efx); 1073 falcon_disable_interrupts(efx);
1110 if (efx->legacy_irq) 1074 if (efx->legacy_irq)
1111 synchronize_irq(efx->legacy_irq); 1075 synchronize_irq(efx->legacy_irq);
1112 efx_for_each_channel_with_interrupt(channel, efx) { 1076 efx_for_each_channel(channel, efx) {
1113 if (channel->irq) 1077 if (channel->irq)
1114 synchronize_irq(channel->irq); 1078 synchronize_irq(channel->irq);
1115 } 1079 }
@@ -1128,13 +1092,12 @@ static void efx_stop_all(struct efx_nic *efx)
1128 1092
1129 /* Isolate the MAC from the TX and RX engines, so that queue 1093 /* Isolate the MAC from the TX and RX engines, so that queue
1130 * flushes will complete in a timely fashion. */ 1094 * flushes will complete in a timely fashion. */
1131 falcon_deconfigure_mac_wrapper(efx);
1132 falcon_drain_tx_fifo(efx); 1095 falcon_drain_tx_fifo(efx);
1133 1096
1134 /* Stop the kernel transmit interface late, so the watchdog 1097 /* Stop the kernel transmit interface late, so the watchdog
1135 * timer isn't ticking over the flush */ 1098 * timer isn't ticking over the flush */
1136 efx_stop_queue(efx);
1137 if (efx_dev_registered(efx)) { 1099 if (efx_dev_registered(efx)) {
1100 efx_stop_queue(efx);
1138 netif_tx_lock_bh(efx->net_dev); 1101 netif_tx_lock_bh(efx->net_dev);
1139 netif_tx_unlock_bh(efx->net_dev); 1102 netif_tx_unlock_bh(efx->net_dev);
1140 } 1103 }
@@ -1151,24 +1114,16 @@ static void efx_remove_all(struct efx_nic *efx)
1151} 1114}
1152 1115
1153/* A convinience function to safely flush all the queues */ 1116/* A convinience function to safely flush all the queues */
1154int efx_flush_queues(struct efx_nic *efx) 1117void efx_flush_queues(struct efx_nic *efx)
1155{ 1118{
1156 int rc;
1157
1158 EFX_ASSERT_RESET_SERIALISED(efx); 1119 EFX_ASSERT_RESET_SERIALISED(efx);
1159 1120
1160 efx_stop_all(efx); 1121 efx_stop_all(efx);
1161 1122
1162 efx_fini_channels(efx); 1123 efx_fini_channels(efx);
1163 rc = efx_init_channels(efx); 1124 efx_init_channels(efx);
1164 if (rc) {
1165 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1166 return rc;
1167 }
1168 1125
1169 efx_start_all(efx); 1126 efx_start_all(efx);
1170
1171 return 0;
1172} 1127}
1173 1128
1174/************************************************************************** 1129/**************************************************************************
@@ -1249,7 +1204,7 @@ static void efx_monitor(struct work_struct *data)
1249 */ 1204 */
1250static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) 1205static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1251{ 1206{
1252 struct efx_nic *efx = net_dev->priv; 1207 struct efx_nic *efx = netdev_priv(net_dev);
1253 1208
1254 EFX_ASSERT_RESET_SERIALISED(efx); 1209 EFX_ASSERT_RESET_SERIALISED(efx);
1255 1210
@@ -1303,10 +1258,10 @@ static void efx_fini_napi(struct efx_nic *efx)
1303 */ 1258 */
1304static void efx_netpoll(struct net_device *net_dev) 1259static void efx_netpoll(struct net_device *net_dev)
1305{ 1260{
1306 struct efx_nic *efx = net_dev->priv; 1261 struct efx_nic *efx = netdev_priv(net_dev);
1307 struct efx_channel *channel; 1262 struct efx_channel *channel;
1308 1263
1309 efx_for_each_channel_with_interrupt(channel, efx) 1264 efx_for_each_channel(channel, efx)
1310 efx_schedule_channel(channel); 1265 efx_schedule_channel(channel);
1311} 1266}
1312 1267
@@ -1321,12 +1276,15 @@ static void efx_netpoll(struct net_device *net_dev)
1321/* Context: process, rtnl_lock() held. */ 1276/* Context: process, rtnl_lock() held. */
1322static int efx_net_open(struct net_device *net_dev) 1277static int efx_net_open(struct net_device *net_dev)
1323{ 1278{
1324 struct efx_nic *efx = net_dev->priv; 1279 struct efx_nic *efx = netdev_priv(net_dev);
1325 EFX_ASSERT_RESET_SERIALISED(efx); 1280 EFX_ASSERT_RESET_SERIALISED(efx);
1326 1281
1327 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name, 1282 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1328 raw_smp_processor_id()); 1283 raw_smp_processor_id());
1329 1284
1285 if (efx->phy_mode & PHY_MODE_SPECIAL)
1286 return -EBUSY;
1287
1330 efx_start_all(efx); 1288 efx_start_all(efx);
1331 return 0; 1289 return 0;
1332} 1290}
@@ -1337,8 +1295,7 @@ static int efx_net_open(struct net_device *net_dev)
1337 */ 1295 */
1338static int efx_net_stop(struct net_device *net_dev) 1296static int efx_net_stop(struct net_device *net_dev)
1339{ 1297{
1340 struct efx_nic *efx = net_dev->priv; 1298 struct efx_nic *efx = netdev_priv(net_dev);
1341 int rc;
1342 1299
1343 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name, 1300 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1344 raw_smp_processor_id()); 1301 raw_smp_processor_id());
@@ -1346,9 +1303,7 @@ static int efx_net_stop(struct net_device *net_dev)
1346 /* Stop the device and flush all the channels */ 1303 /* Stop the device and flush all the channels */
1347 efx_stop_all(efx); 1304 efx_stop_all(efx);
1348 efx_fini_channels(efx); 1305 efx_fini_channels(efx);
1349 rc = efx_init_channels(efx); 1306 efx_init_channels(efx);
1350 if (rc)
1351 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1352 1307
1353 return 0; 1308 return 0;
1354} 1309}
@@ -1356,7 +1311,7 @@ static int efx_net_stop(struct net_device *net_dev)
1356/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1311/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1357static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1312static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1358{ 1313{
1359 struct efx_nic *efx = net_dev->priv; 1314 struct efx_nic *efx = netdev_priv(net_dev);
1360 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1315 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1361 struct net_device_stats *stats = &net_dev->stats; 1316 struct net_device_stats *stats = &net_dev->stats;
1362 1317
@@ -1366,7 +1321,7 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1366 */ 1321 */
1367 if (!spin_trylock(&efx->stats_lock)) 1322 if (!spin_trylock(&efx->stats_lock))
1368 return stats; 1323 return stats;
1369 if (efx->state == STATE_RUNNING) { 1324 if (efx->stats_enabled) {
1370 falcon_update_stats_xmac(efx); 1325 falcon_update_stats_xmac(efx);
1371 falcon_update_nic_stats(efx); 1326 falcon_update_nic_stats(efx);
1372 } 1327 }
@@ -1403,7 +1358,7 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1403/* Context: netif_tx_lock held, BHs disabled. */ 1358/* Context: netif_tx_lock held, BHs disabled. */
1404static void efx_watchdog(struct net_device *net_dev) 1359static void efx_watchdog(struct net_device *net_dev)
1405{ 1360{
1406 struct efx_nic *efx = net_dev->priv; 1361 struct efx_nic *efx = netdev_priv(net_dev);
1407 1362
1408 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n", 1363 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
1409 atomic_read(&efx->netif_stop_count), efx->port_enabled, 1364 atomic_read(&efx->netif_stop_count), efx->port_enabled,
@@ -1417,7 +1372,7 @@ static void efx_watchdog(struct net_device *net_dev)
1417/* Context: process, rtnl_lock() held. */ 1372/* Context: process, rtnl_lock() held. */
1418static int efx_change_mtu(struct net_device *net_dev, int new_mtu) 1373static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1419{ 1374{
1420 struct efx_nic *efx = net_dev->priv; 1375 struct efx_nic *efx = netdev_priv(net_dev);
1421 int rc = 0; 1376 int rc = 0;
1422 1377
1423 EFX_ASSERT_RESET_SERIALISED(efx); 1378 EFX_ASSERT_RESET_SERIALISED(efx);
@@ -1431,21 +1386,15 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
1431 1386
1432 efx_fini_channels(efx); 1387 efx_fini_channels(efx);
1433 net_dev->mtu = new_mtu; 1388 net_dev->mtu = new_mtu;
1434 rc = efx_init_channels(efx); 1389 efx_init_channels(efx);
1435 if (rc)
1436 goto fail;
1437 1390
1438 efx_start_all(efx); 1391 efx_start_all(efx);
1439 return rc; 1392 return rc;
1440
1441 fail:
1442 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1443 return rc;
1444} 1393}
1445 1394
1446static int efx_set_mac_address(struct net_device *net_dev, void *data) 1395static int efx_set_mac_address(struct net_device *net_dev, void *data)
1447{ 1396{
1448 struct efx_nic *efx = net_dev->priv; 1397 struct efx_nic *efx = netdev_priv(net_dev);
1449 struct sockaddr *addr = data; 1398 struct sockaddr *addr = data;
1450 char *new_addr = addr->sa_data; 1399 char *new_addr = addr->sa_data;
1451 1400
@@ -1466,26 +1415,19 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1466 return 0; 1415 return 0;
1467} 1416}
1468 1417
1469/* Context: netif_tx_lock held, BHs disabled. */ 1418/* Context: netif_addr_lock held, BHs disabled. */
1470static void efx_set_multicast_list(struct net_device *net_dev) 1419static void efx_set_multicast_list(struct net_device *net_dev)
1471{ 1420{
1472 struct efx_nic *efx = net_dev->priv; 1421 struct efx_nic *efx = netdev_priv(net_dev);
1473 struct dev_mc_list *mc_list = net_dev->mc_list; 1422 struct dev_mc_list *mc_list = net_dev->mc_list;
1474 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1423 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1475 int promiscuous; 1424 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1425 bool changed = (efx->promiscuous != promiscuous);
1476 u32 crc; 1426 u32 crc;
1477 int bit; 1427 int bit;
1478 int i; 1428 int i;
1479 1429
1480 /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */ 1430 efx->promiscuous = promiscuous;
1481 promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
1482 if (efx->promiscuous != promiscuous) {
1483 efx->promiscuous = promiscuous;
1484 /* Close the window between efx_stop_port() and efx_flush_all()
1485 * by only queuing work when the port is enabled. */
1486 if (efx->port_enabled)
1487 queue_work(efx->workqueue, &efx->reconfigure_work);
1488 }
1489 1431
1490 /* Build multicast hash table */ 1432 /* Build multicast hash table */
1491 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) { 1433 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
@@ -1500,6 +1442,13 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1500 } 1442 }
1501 } 1443 }
1502 1444
1445 if (!efx->port_enabled)
1446 /* Delay pushing settings until efx_start_port() */
1447 return;
1448
1449 if (changed)
1450 queue_work(efx->workqueue, &efx->reconfigure_work);
1451
1503 /* Create and activate new global multicast hash table */ 1452 /* Create and activate new global multicast hash table */
1504 falcon_set_multicast_hash(efx); 1453 falcon_set_multicast_hash(efx);
1505} 1454}
@@ -1510,7 +1459,7 @@ static int efx_netdev_event(struct notifier_block *this,
1510 struct net_device *net_dev = ptr; 1459 struct net_device *net_dev = ptr;
1511 1460
1512 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1461 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
1513 struct efx_nic *efx = net_dev->priv; 1462 struct efx_nic *efx = netdev_priv(net_dev);
1514 1463
1515 strcpy(efx->name, net_dev->name); 1464 strcpy(efx->name, net_dev->name);
1516 } 1465 }
@@ -1568,7 +1517,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1568 if (!efx->net_dev) 1517 if (!efx->net_dev)
1569 return; 1518 return;
1570 1519
1571 BUG_ON(efx->net_dev->priv != efx); 1520 BUG_ON(netdev_priv(efx->net_dev) != efx);
1572 1521
1573 /* Free up any skbs still remaining. This has to happen before 1522 /* Free up any skbs still remaining. This has to happen before
1574 * we try to unregister the netdev as running their destructors 1523 * we try to unregister the netdev as running their destructors
@@ -1588,49 +1537,60 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1588 * 1537 *
1589 **************************************************************************/ 1538 **************************************************************************/
1590 1539
1591/* The final hardware and software finalisation before reset. */ 1540/* Tears down the entire software state and most of the hardware state
1592static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) 1541 * before reset. */
1542void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1593{ 1543{
1594 int rc; 1544 int rc;
1595 1545
1596 EFX_ASSERT_RESET_SERIALISED(efx); 1546 EFX_ASSERT_RESET_SERIALISED(efx);
1597 1547
1548 /* The net_dev->get_stats handler is quite slow, and will fail
1549 * if a fetch is pending over reset. Serialise against it. */
1550 spin_lock(&efx->stats_lock);
1551 efx->stats_enabled = false;
1552 spin_unlock(&efx->stats_lock);
1553
1554 efx_stop_all(efx);
1555 mutex_lock(&efx->mac_lock);
1556
1598 rc = falcon_xmac_get_settings(efx, ecmd); 1557 rc = falcon_xmac_get_settings(efx, ecmd);
1599 if (rc) { 1558 if (rc)
1600 EFX_ERR(efx, "could not back up PHY settings\n"); 1559 EFX_ERR(efx, "could not back up PHY settings\n");
1601 goto fail;
1602 }
1603 1560
1604 efx_fini_channels(efx); 1561 efx_fini_channels(efx);
1605 return 0;
1606
1607 fail:
1608 return rc;
1609} 1562}
1610 1563
1611/* The first part of software initialisation after a hardware reset 1564/* This function will always ensure that the locks acquired in
1612 * This function does not handle serialisation with the kernel, it 1565 * efx_reset_down() are released. A failure return code indicates
1613 * assumes the caller has done this */ 1566 * that we were unable to reinitialise the hardware, and the
1614static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd) 1567 * driver should be disabled. If ok is false, then the rx and tx
1568 * engines are not restarted, pending a RESET_DISABLE. */
1569int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1615{ 1570{
1616 int rc; 1571 int rc;
1617 1572
1618 rc = efx_init_channels(efx); 1573 EFX_ASSERT_RESET_SERIALISED(efx);
1619 if (rc)
1620 goto fail1;
1621 1574
1622 /* Restore MAC and PHY settings. */ 1575 rc = falcon_init_nic(efx);
1623 rc = falcon_xmac_set_settings(efx, ecmd);
1624 if (rc) { 1576 if (rc) {
1625 EFX_ERR(efx, "could not restore PHY settings\n"); 1577 EFX_ERR(efx, "failed to initialise NIC\n");
1626 goto fail2; 1578 ok = false;
1627 } 1579 }
1628 1580
1629 return 0; 1581 if (ok) {
1582 efx_init_channels(efx);
1630 1583
1631 fail2: 1584 if (falcon_xmac_set_settings(efx, ecmd))
1632 efx_fini_channels(efx); 1585 EFX_ERR(efx, "could not restore PHY settings\n");
1633 fail1: 1586 }
1587
1588 mutex_unlock(&efx->mac_lock);
1589
1590 if (ok) {
1591 efx_start_all(efx);
1592 efx->stats_enabled = true;
1593 }
1634 return rc; 1594 return rc;
1635} 1595}
1636 1596
@@ -1659,25 +1619,14 @@ static int efx_reset(struct efx_nic *efx)
1659 goto unlock_rtnl; 1619 goto unlock_rtnl;
1660 } 1620 }
1661 1621
1662 efx->state = STATE_RESETTING;
1663 EFX_INFO(efx, "resetting (%d)\n", method); 1622 EFX_INFO(efx, "resetting (%d)\n", method);
1664 1623
1665 /* The net_dev->get_stats handler is quite slow, and will fail 1624 efx_reset_down(efx, &ecmd);
1666 * if a fetch is pending over reset. Serialise against it. */
1667 spin_lock(&efx->stats_lock);
1668 spin_unlock(&efx->stats_lock);
1669
1670 efx_stop_all(efx);
1671 mutex_lock(&efx->mac_lock);
1672
1673 rc = efx_reset_down(efx, &ecmd);
1674 if (rc)
1675 goto fail1;
1676 1625
1677 rc = falcon_reset_hw(efx, method); 1626 rc = falcon_reset_hw(efx, method);
1678 if (rc) { 1627 if (rc) {
1679 EFX_ERR(efx, "failed to reset hardware\n"); 1628 EFX_ERR(efx, "failed to reset hardware\n");
1680 goto fail2; 1629 goto fail;
1681 } 1630 }
1682 1631
1683 /* Allow resets to be rescheduled. */ 1632 /* Allow resets to be rescheduled. */
@@ -1689,46 +1638,27 @@ static int efx_reset(struct efx_nic *efx)
1689 * can respond to requests. */ 1638 * can respond to requests. */
1690 pci_set_master(efx->pci_dev); 1639 pci_set_master(efx->pci_dev);
1691 1640
1692 /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
1693 * case so the driver can talk to external SRAM */
1694 rc = falcon_init_nic(efx);
1695 if (rc) {
1696 EFX_ERR(efx, "failed to initialise NIC\n");
1697 goto fail3;
1698 }
1699
1700 /* Leave device stopped if necessary */ 1641 /* Leave device stopped if necessary */
1701 if (method == RESET_TYPE_DISABLE) { 1642 if (method == RESET_TYPE_DISABLE) {
1702 /* Reinitialise the device anyway so the driver unload sequence
1703 * can talk to the external SRAM */
1704 falcon_init_nic(efx);
1705 rc = -EIO; 1643 rc = -EIO;
1706 goto fail4; 1644 goto fail;
1707 } 1645 }
1708 1646
1709 rc = efx_reset_up(efx, &ecmd); 1647 rc = efx_reset_up(efx, &ecmd, true);
1710 if (rc) 1648 if (rc)
1711 goto fail5; 1649 goto disable;
1712 1650
1713 mutex_unlock(&efx->mac_lock);
1714 EFX_LOG(efx, "reset complete\n"); 1651 EFX_LOG(efx, "reset complete\n");
1715
1716 efx->state = STATE_RUNNING;
1717 efx_start_all(efx);
1718
1719 unlock_rtnl: 1652 unlock_rtnl:
1720 rtnl_unlock(); 1653 rtnl_unlock();
1721 return 0; 1654 return 0;
1722 1655
1723 fail5: 1656 fail:
1724 fail4: 1657 efx_reset_up(efx, &ecmd, false);
1725 fail3: 1658 disable:
1726 fail2:
1727 fail1:
1728 EFX_ERR(efx, "has been disabled\n"); 1659 EFX_ERR(efx, "has been disabled\n");
1729 efx->state = STATE_DISABLED; 1660 efx->state = STATE_DISABLED;
1730 1661
1731 mutex_unlock(&efx->mac_lock);
1732 rtnl_unlock(); 1662 rtnl_unlock();
1733 efx_unregister_netdev(efx); 1663 efx_unregister_netdev(efx);
1734 efx_fini_port(efx); 1664 efx_fini_port(efx);
@@ -1801,7 +1731,7 @@ static struct pci_device_id efx_pci_table[] __devinitdata = {
1801 * 1731 *
1802 * Dummy PHY/MAC/Board operations 1732 * Dummy PHY/MAC/Board operations
1803 * 1733 *
1804 * Can be used where the MAC does not implement this operation 1734 * Can be used for some unimplemented operations
1805 * Needed so all function pointers are valid and do not have to be tested 1735 * Needed so all function pointers are valid and do not have to be tested
1806 * before use 1736 * before use
1807 * 1737 *
@@ -1811,7 +1741,7 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
1811 return 0; 1741 return 0;
1812} 1742}
1813void efx_port_dummy_op_void(struct efx_nic *efx) {} 1743void efx_port_dummy_op_void(struct efx_nic *efx) {}
1814void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {} 1744void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
1815 1745
1816static struct efx_phy_operations efx_dummy_phy_operations = { 1746static struct efx_phy_operations efx_dummy_phy_operations = {
1817 .init = efx_port_dummy_op_int, 1747 .init = efx_port_dummy_op_int,
@@ -1819,20 +1749,14 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
1819 .check_hw = efx_port_dummy_op_int, 1749 .check_hw = efx_port_dummy_op_int,
1820 .fini = efx_port_dummy_op_void, 1750 .fini = efx_port_dummy_op_void,
1821 .clear_interrupt = efx_port_dummy_op_void, 1751 .clear_interrupt = efx_port_dummy_op_void,
1822 .reset_xaui = efx_port_dummy_op_void,
1823}; 1752};
1824 1753
1825/* Dummy board operations */
1826static int efx_nic_dummy_op_int(struct efx_nic *nic)
1827{
1828 return 0;
1829}
1830
1831static struct efx_board efx_dummy_board_info = { 1754static struct efx_board efx_dummy_board_info = {
1832 .init = efx_nic_dummy_op_int, 1755 .init = efx_port_dummy_op_int,
1833 .init_leds = efx_port_dummy_op_int, 1756 .init_leds = efx_port_dummy_op_int,
1834 .set_fault_led = efx_port_dummy_op_blink, 1757 .set_fault_led = efx_port_dummy_op_blink,
1835 .fini = efx_port_dummy_op_void, 1758 .blink = efx_port_dummy_op_blink,
1759 .fini = efx_port_dummy_op_void,
1836}; 1760};
1837 1761
1838/************************************************************************** 1762/**************************************************************************
@@ -1865,7 +1789,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1865 efx->board_info = efx_dummy_board_info; 1789 efx->board_info = efx_dummy_board_info;
1866 1790
1867 efx->net_dev = net_dev; 1791 efx->net_dev = net_dev;
1868 efx->rx_checksum_enabled = 1; 1792 efx->rx_checksum_enabled = true;
1869 spin_lock_init(&efx->netif_stop_lock); 1793 spin_lock_init(&efx->netif_stop_lock);
1870 spin_lock_init(&efx->stats_lock); 1794 spin_lock_init(&efx->stats_lock);
1871 mutex_init(&efx->mac_lock); 1795 mutex_init(&efx->mac_lock);
@@ -1878,10 +1802,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1878 channel = &efx->channel[i]; 1802 channel = &efx->channel[i];
1879 channel->efx = efx; 1803 channel->efx = efx;
1880 channel->channel = i; 1804 channel->channel = i;
1881 channel->evqnum = i; 1805 channel->work_pending = false;
1882 channel->work_pending = 0;
1883 } 1806 }
1884 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) { 1807 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) {
1885 tx_queue = &efx->tx_queue[i]; 1808 tx_queue = &efx->tx_queue[i];
1886 tx_queue->efx = efx; 1809 tx_queue->efx = efx;
1887 tx_queue->queue = i; 1810 tx_queue->queue = i;
@@ -2056,19 +1979,16 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2056 goto fail5; 1979 goto fail5;
2057 } 1980 }
2058 1981
2059 rc = efx_init_channels(efx); 1982 efx_init_channels(efx);
2060 if (rc)
2061 goto fail6;
2062 1983
2063 rc = falcon_init_interrupt(efx); 1984 rc = falcon_init_interrupt(efx);
2064 if (rc) 1985 if (rc)
2065 goto fail7; 1986 goto fail6;
2066 1987
2067 return 0; 1988 return 0;
2068 1989
2069 fail7:
2070 efx_fini_channels(efx);
2071 fail6: 1990 fail6:
1991 efx_fini_channels(efx);
2072 efx_fini_port(efx); 1992 efx_fini_port(efx);
2073 fail5: 1993 fail5:
2074 fail4: 1994 fail4:
@@ -2105,7 +2025,10 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2105 NETIF_F_HIGHDMA | NETIF_F_TSO); 2025 NETIF_F_HIGHDMA | NETIF_F_TSO);
2106 if (lro) 2026 if (lro)
2107 net_dev->features |= NETIF_F_LRO; 2027 net_dev->features |= NETIF_F_LRO;
2108 efx = net_dev->priv; 2028 /* Mask for features that also apply to VLAN devices */
2029 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2030 NETIF_F_HIGHDMA | NETIF_F_TSO);
2031 efx = netdev_priv(net_dev);
2109 pci_set_drvdata(pci_dev, efx); 2032 pci_set_drvdata(pci_dev, efx);
2110 rc = efx_init_struct(efx, type, pci_dev, net_dev); 2033 rc = efx_init_struct(efx, type, pci_dev, net_dev);
2111 if (rc) 2034 if (rc)
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 3b2f69f4a9ab..d02937b70eee 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -28,15 +28,21 @@ extern void efx_wake_queue(struct efx_nic *efx);
28/* RX */ 28/* RX */
29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
30extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 30extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
31 unsigned int len, int checksummed, int discard); 31 unsigned int len, bool checksummed, bool discard);
32extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 32extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
33 33
34/* Channels */ 34/* Channels */
35extern void efx_process_channel_now(struct efx_channel *channel); 35extern void efx_process_channel_now(struct efx_channel *channel);
36extern int efx_flush_queues(struct efx_nic *efx); 36extern void efx_flush_queues(struct efx_nic *efx);
37 37
38/* Ports */ 38/* Ports */
39extern void efx_reconfigure_port(struct efx_nic *efx); 39extern void efx_reconfigure_port(struct efx_nic *efx);
40extern void __efx_reconfigure_port(struct efx_nic *efx);
41
42/* Reset handling */
43extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd);
44extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd,
45 bool ok);
40 46
41/* Global */ 47/* Global */
42extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); 48extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
@@ -50,7 +56,7 @@ extern void efx_hex_dump(const u8 *, unsigned int, const char *);
50/* Dummy PHY ops for PHY drivers */ 56/* Dummy PHY ops for PHY drivers */
51extern int efx_port_dummy_op_int(struct efx_nic *efx); 57extern int efx_port_dummy_op_int(struct efx_nic *efx);
52extern void efx_port_dummy_op_void(struct efx_nic *efx); 58extern void efx_port_dummy_op_void(struct efx_nic *efx);
53extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink); 59extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink);
54 60
55 61
56extern unsigned int efx_monitor_interval; 62extern unsigned int efx_monitor_interval;
@@ -59,7 +65,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
59{ 65{
60 EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n", 66 EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
61 channel->channel, raw_smp_processor_id()); 67 channel->channel, raw_smp_processor_id());
62 channel->work_pending = 1; 68 channel->work_pending = true;
63 69
64 netif_rx_schedule(channel->napi_dev, &channel->napi_str); 70 netif_rx_schedule(channel->napi_dev, &channel->napi_str);
65} 71}
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index c53290d08e2b..cec15dbb88e4 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -52,12 +52,11 @@ extern const char *efx_loopback_mode_names[];
52#define LOOPBACK_MASK(_efx) \ 52#define LOOPBACK_MASK(_efx) \
53 (1 << (_efx)->loopback_mode) 53 (1 << (_efx)->loopback_mode)
54 54
55#define LOOPBACK_INTERNAL(_efx) \ 55#define LOOPBACK_INTERNAL(_efx) \
56 ((LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)) ? 1 : 0) 56 (!!(LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx)))
57 57
58#define LOOPBACK_OUT_OF(_from, _to, _mask) \ 58#define LOOPBACK_OUT_OF(_from, _to, _mask) \
59 (((LOOPBACK_MASK(_from) & (_mask)) && \ 59 ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
60 ((LOOPBACK_MASK(_to) & (_mask)) == 0)) ? 1 : 0)
61 60
62/*****************************************************************************/ 61/*****************************************************************************/
63 62
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index e2c75d101610..fa98af58223e 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -17,6 +17,7 @@
17#include "ethtool.h" 17#include "ethtool.h"
18#include "falcon.h" 18#include "falcon.h"
19#include "gmii.h" 19#include "gmii.h"
20#include "spi.h"
20#include "mac.h" 21#include "mac.h"
21 22
22const char *efx_loopback_mode_names[] = { 23const char *efx_loopback_mode_names[] = {
@@ -32,8 +33,6 @@ const char *efx_loopback_mode_names[] = {
32 [LOOPBACK_NETWORK] = "NETWORK", 33 [LOOPBACK_NETWORK] = "NETWORK",
33}; 34};
34 35
35static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
36
37struct ethtool_string { 36struct ethtool_string {
38 char name[ETH_GSTRING_LEN]; 37 char name[ETH_GSTRING_LEN];
39}; 38};
@@ -173,6 +172,11 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
173/* Number of ethtool statistics */ 172/* Number of ethtool statistics */
174#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) 173#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
175 174
175/* EEPROM range with gPXE configuration */
176#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
177#define EFX_ETHTOOL_EEPROM_MIN 0x100U
178#define EFX_ETHTOOL_EEPROM_MAX 0x400U
179
176/************************************************************************** 180/**************************************************************************
177 * 181 *
178 * Ethtool operations 182 * Ethtool operations
@@ -183,7 +187,7 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
183/* Identify device by flashing LEDs */ 187/* Identify device by flashing LEDs */
184static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds) 188static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
185{ 189{
186 struct efx_nic *efx = net_dev->priv; 190 struct efx_nic *efx = netdev_priv(net_dev);
187 191
188 efx->board_info.blink(efx, 1); 192 efx->board_info.blink(efx, 1);
189 schedule_timeout_interruptible(seconds * HZ); 193 schedule_timeout_interruptible(seconds * HZ);
@@ -195,7 +199,7 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
195int efx_ethtool_get_settings(struct net_device *net_dev, 199int efx_ethtool_get_settings(struct net_device *net_dev,
196 struct ethtool_cmd *ecmd) 200 struct ethtool_cmd *ecmd)
197{ 201{
198 struct efx_nic *efx = net_dev->priv; 202 struct efx_nic *efx = netdev_priv(net_dev);
199 int rc; 203 int rc;
200 204
201 mutex_lock(&efx->mac_lock); 205 mutex_lock(&efx->mac_lock);
@@ -209,7 +213,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
209int efx_ethtool_set_settings(struct net_device *net_dev, 213int efx_ethtool_set_settings(struct net_device *net_dev,
210 struct ethtool_cmd *ecmd) 214 struct ethtool_cmd *ecmd)
211{ 215{
212 struct efx_nic *efx = net_dev->priv; 216 struct efx_nic *efx = netdev_priv(net_dev);
213 int rc; 217 int rc;
214 218
215 mutex_lock(&efx->mac_lock); 219 mutex_lock(&efx->mac_lock);
@@ -224,7 +228,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
224static void efx_ethtool_get_drvinfo(struct net_device *net_dev, 228static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
225 struct ethtool_drvinfo *info) 229 struct ethtool_drvinfo *info)
226{ 230{
227 struct efx_nic *efx = net_dev->priv; 231 struct efx_nic *efx = netdev_priv(net_dev);
228 232
229 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver)); 233 strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
230 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); 234 strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
@@ -329,7 +333,10 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
329 unsigned int n = 0; 333 unsigned int n = 0;
330 enum efx_loopback_mode mode; 334 enum efx_loopback_mode mode;
331 335
332 /* Interrupt */ 336 efx_fill_test(n++, strings, data, &tests->mii,
337 "core", 0, "mii", NULL);
338 efx_fill_test(n++, strings, data, &tests->nvram,
339 "core", 0, "nvram", NULL);
333 efx_fill_test(n++, strings, data, &tests->interrupt, 340 efx_fill_test(n++, strings, data, &tests->interrupt,
334 "core", 0, "interrupt", NULL); 341 "core", 0, "interrupt", NULL);
335 342
@@ -349,16 +356,17 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
349 "eventq.poll", NULL); 356 "eventq.poll", NULL);
350 } 357 }
351 358
352 /* PHY presence */ 359 efx_fill_test(n++, strings, data, &tests->registers,
353 efx_fill_test(n++, strings, data, &tests->phy_ok, 360 "core", 0, "registers", NULL);
354 EFX_PORT_NAME, "phy_ok", NULL); 361 efx_fill_test(n++, strings, data, &tests->phy,
362 EFX_PORT_NAME, "phy", NULL);
355 363
356 /* Loopback tests */ 364 /* Loopback tests */
357 efx_fill_test(n++, strings, data, &tests->loopback_speed, 365 efx_fill_test(n++, strings, data, &tests->loopback_speed,
358 EFX_PORT_NAME, "loopback.speed", NULL); 366 EFX_PORT_NAME, "loopback.speed", NULL);
359 efx_fill_test(n++, strings, data, &tests->loopback_full_duplex, 367 efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
360 EFX_PORT_NAME, "loopback.full_duplex", NULL); 368 EFX_PORT_NAME, "loopback.full_duplex", NULL);
361 for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { 369 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
362 if (!(efx->loopback_modes & (1 << mode))) 370 if (!(efx->loopback_modes & (1 << mode)))
363 continue; 371 continue;
364 n = efx_fill_loopback_test(efx, 372 n = efx_fill_loopback_test(efx,
@@ -369,22 +377,24 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
369 return n; 377 return n;
370} 378}
371 379
372static int efx_ethtool_get_stats_count(struct net_device *net_dev) 380static int efx_ethtool_get_sset_count(struct net_device *net_dev,
381 int string_set)
373{ 382{
374 return EFX_ETHTOOL_NUM_STATS; 383 switch (string_set) {
375} 384 case ETH_SS_STATS:
376 385 return EFX_ETHTOOL_NUM_STATS;
377static int efx_ethtool_self_test_count(struct net_device *net_dev) 386 case ETH_SS_TEST:
378{ 387 return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
379 struct efx_nic *efx = net_dev->priv; 388 NULL, NULL, NULL);
380 389 default:
381 return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL); 390 return -EINVAL;
391 }
382} 392}
383 393
384static void efx_ethtool_get_strings(struct net_device *net_dev, 394static void efx_ethtool_get_strings(struct net_device *net_dev,
385 u32 string_set, u8 *strings) 395 u32 string_set, u8 *strings)
386{ 396{
387 struct efx_nic *efx = net_dev->priv; 397 struct efx_nic *efx = netdev_priv(net_dev);
388 struct ethtool_string *ethtool_strings = 398 struct ethtool_string *ethtool_strings =
389 (struct ethtool_string *)strings; 399 (struct ethtool_string *)strings;
390 int i; 400 int i;
@@ -410,7 +420,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
410 struct ethtool_stats *stats, 420 struct ethtool_stats *stats,
411 u64 *data) 421 u64 *data)
412{ 422{
413 struct efx_nic *efx = net_dev->priv; 423 struct efx_nic *efx = netdev_priv(net_dev);
414 struct efx_mac_stats *mac_stats = &efx->mac_stats; 424 struct efx_mac_stats *mac_stats = &efx->mac_stats;
415 struct efx_ethtool_stat *stat; 425 struct efx_ethtool_stat *stat;
416 struct efx_channel *channel; 426 struct efx_channel *channel;
@@ -442,60 +452,21 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
442 } 452 }
443} 453}
444 454
445static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
446{
447 int rc;
448
449 /* Our TSO requires TX checksumming, so force TX checksumming
450 * on when TSO is enabled.
451 */
452 if (enable) {
453 rc = efx_ethtool_set_tx_csum(net_dev, 1);
454 if (rc)
455 return rc;
456 }
457
458 return ethtool_op_set_tso(net_dev, enable);
459}
460
461static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
462{
463 struct efx_nic *efx = net_dev->priv;
464 int rc;
465
466 rc = ethtool_op_set_tx_csum(net_dev, enable);
467 if (rc)
468 return rc;
469
470 efx_flush_queues(efx);
471
472 /* Our TSO requires TX checksumming, so disable TSO when
473 * checksumming is disabled
474 */
475 if (!enable) {
476 rc = efx_ethtool_set_tso(net_dev, 0);
477 if (rc)
478 return rc;
479 }
480
481 return 0;
482}
483
484static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) 455static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
485{ 456{
486 struct efx_nic *efx = net_dev->priv; 457 struct efx_nic *efx = netdev_priv(net_dev);
487 458
488 /* No way to stop the hardware doing the checks; we just 459 /* No way to stop the hardware doing the checks; we just
489 * ignore the result. 460 * ignore the result.
490 */ 461 */
491 efx->rx_checksum_enabled = (enable ? 1 : 0); 462 efx->rx_checksum_enabled = !!enable;
492 463
493 return 0; 464 return 0;
494} 465}
495 466
496static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) 467static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
497{ 468{
498 struct efx_nic *efx = net_dev->priv; 469 struct efx_nic *efx = netdev_priv(net_dev);
499 470
500 return efx->rx_checksum_enabled; 471 return efx->rx_checksum_enabled;
501} 472}
@@ -503,7 +474,7 @@ static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
503static void efx_ethtool_self_test(struct net_device *net_dev, 474static void efx_ethtool_self_test(struct net_device *net_dev,
504 struct ethtool_test *test, u64 *data) 475 struct ethtool_test *test, u64 *data)
505{ 476{
506 struct efx_nic *efx = net_dev->priv; 477 struct efx_nic *efx = netdev_priv(net_dev);
507 struct efx_self_tests efx_tests; 478 struct efx_self_tests efx_tests;
508 int offline, already_up; 479 int offline, already_up;
509 int rc; 480 int rc;
@@ -533,15 +504,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
533 goto out; 504 goto out;
534 505
535 /* Perform offline tests only if online tests passed */ 506 /* Perform offline tests only if online tests passed */
536 if (offline) { 507 if (offline)
537 /* Stop the kernel from sending packets during the test. */ 508 rc = efx_offline_test(efx, &efx_tests,
538 efx_stop_queue(efx); 509 efx->loopback_modes);
539 rc = efx_flush_queues(efx);
540 if (!rc)
541 rc = efx_offline_test(efx, &efx_tests,
542 efx->loopback_modes);
543 efx_wake_queue(efx);
544 }
545 510
546 out: 511 out:
547 if (!already_up) 512 if (!already_up)
@@ -561,22 +526,65 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
561/* Restart autonegotiation */ 526/* Restart autonegotiation */
562static int efx_ethtool_nway_reset(struct net_device *net_dev) 527static int efx_ethtool_nway_reset(struct net_device *net_dev)
563{ 528{
564 struct efx_nic *efx = net_dev->priv; 529 struct efx_nic *efx = netdev_priv(net_dev);
565 530
566 return mii_nway_restart(&efx->mii); 531 return mii_nway_restart(&efx->mii);
567} 532}
568 533
569static u32 efx_ethtool_get_link(struct net_device *net_dev) 534static u32 efx_ethtool_get_link(struct net_device *net_dev)
570{ 535{
571 struct efx_nic *efx = net_dev->priv; 536 struct efx_nic *efx = netdev_priv(net_dev);
572 537
573 return efx->link_up; 538 return efx->link_up;
574} 539}
575 540
541static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
542{
543 struct efx_nic *efx = netdev_priv(net_dev);
544 struct efx_spi_device *spi = efx->spi_eeprom;
545
546 if (!spi)
547 return 0;
548 return min(spi->size, EFX_ETHTOOL_EEPROM_MAX) -
549 min(spi->size, EFX_ETHTOOL_EEPROM_MIN);
550}
551
552static int efx_ethtool_get_eeprom(struct net_device *net_dev,
553 struct ethtool_eeprom *eeprom, u8 *buf)
554{
555 struct efx_nic *efx = netdev_priv(net_dev);
556 struct efx_spi_device *spi = efx->spi_eeprom;
557 size_t len;
558 int rc;
559
560 rc = falcon_spi_read(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN,
561 eeprom->len, &len, buf);
562 eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
563 eeprom->len = len;
564 return rc;
565}
566
567static int efx_ethtool_set_eeprom(struct net_device *net_dev,
568 struct ethtool_eeprom *eeprom, u8 *buf)
569{
570 struct efx_nic *efx = netdev_priv(net_dev);
571 struct efx_spi_device *spi = efx->spi_eeprom;
572 size_t len;
573 int rc;
574
575 if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
576 return -EINVAL;
577
578 rc = falcon_spi_write(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN,
579 eeprom->len, &len, buf);
580 eeprom->len = len;
581 return rc;
582}
583
576static int efx_ethtool_get_coalesce(struct net_device *net_dev, 584static int efx_ethtool_get_coalesce(struct net_device *net_dev,
577 struct ethtool_coalesce *coalesce) 585 struct ethtool_coalesce *coalesce)
578{ 586{
579 struct efx_nic *efx = net_dev->priv; 587 struct efx_nic *efx = netdev_priv(net_dev);
580 struct efx_tx_queue *tx_queue; 588 struct efx_tx_queue *tx_queue;
581 struct efx_rx_queue *rx_queue; 589 struct efx_rx_queue *rx_queue;
582 struct efx_channel *channel; 590 struct efx_channel *channel;
@@ -614,7 +622,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
614static int efx_ethtool_set_coalesce(struct net_device *net_dev, 622static int efx_ethtool_set_coalesce(struct net_device *net_dev,
615 struct ethtool_coalesce *coalesce) 623 struct ethtool_coalesce *coalesce)
616{ 624{
617 struct efx_nic *efx = net_dev->priv; 625 struct efx_nic *efx = netdev_priv(net_dev);
618 struct efx_channel *channel; 626 struct efx_channel *channel;
619 struct efx_tx_queue *tx_queue; 627 struct efx_tx_queue *tx_queue;
620 unsigned tx_usecs, rx_usecs; 628 unsigned tx_usecs, rx_usecs;
@@ -657,7 +665,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
657static int efx_ethtool_set_pauseparam(struct net_device *net_dev, 665static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
658 struct ethtool_pauseparam *pause) 666 struct ethtool_pauseparam *pause)
659{ 667{
660 struct efx_nic *efx = net_dev->priv; 668 struct efx_nic *efx = netdev_priv(net_dev);
661 enum efx_fc_type flow_control = efx->flow_control; 669 enum efx_fc_type flow_control = efx->flow_control;
662 int rc; 670 int rc;
663 671
@@ -680,11 +688,11 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
680static void efx_ethtool_get_pauseparam(struct net_device *net_dev, 688static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
681 struct ethtool_pauseparam *pause) 689 struct ethtool_pauseparam *pause)
682{ 690{
683 struct efx_nic *efx = net_dev->priv; 691 struct efx_nic *efx = netdev_priv(net_dev);
684 692
685 pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0; 693 pause->rx_pause = !!(efx->flow_control & EFX_FC_RX);
686 pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0; 694 pause->tx_pause = !!(efx->flow_control & EFX_FC_TX);
687 pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0; 695 pause->autoneg = !!(efx->flow_control & EFX_FC_AUTO);
688} 696}
689 697
690 698
@@ -694,6 +702,9 @@ struct ethtool_ops efx_ethtool_ops = {
694 .get_drvinfo = efx_ethtool_get_drvinfo, 702 .get_drvinfo = efx_ethtool_get_drvinfo,
695 .nway_reset = efx_ethtool_nway_reset, 703 .nway_reset = efx_ethtool_nway_reset,
696 .get_link = efx_ethtool_get_link, 704 .get_link = efx_ethtool_get_link,
705 .get_eeprom_len = efx_ethtool_get_eeprom_len,
706 .get_eeprom = efx_ethtool_get_eeprom,
707 .set_eeprom = efx_ethtool_set_eeprom,
697 .get_coalesce = efx_ethtool_get_coalesce, 708 .get_coalesce = efx_ethtool_get_coalesce,
698 .set_coalesce = efx_ethtool_set_coalesce, 709 .set_coalesce = efx_ethtool_set_coalesce,
699 .get_pauseparam = efx_ethtool_get_pauseparam, 710 .get_pauseparam = efx_ethtool_get_pauseparam,
@@ -701,17 +712,16 @@ struct ethtool_ops efx_ethtool_ops = {
701 .get_rx_csum = efx_ethtool_get_rx_csum, 712 .get_rx_csum = efx_ethtool_get_rx_csum,
702 .set_rx_csum = efx_ethtool_set_rx_csum, 713 .set_rx_csum = efx_ethtool_set_rx_csum,
703 .get_tx_csum = ethtool_op_get_tx_csum, 714 .get_tx_csum = ethtool_op_get_tx_csum,
704 .set_tx_csum = efx_ethtool_set_tx_csum, 715 .set_tx_csum = ethtool_op_set_tx_csum,
705 .get_sg = ethtool_op_get_sg, 716 .get_sg = ethtool_op_get_sg,
706 .set_sg = ethtool_op_set_sg, 717 .set_sg = ethtool_op_set_sg,
707 .get_tso = ethtool_op_get_tso, 718 .get_tso = ethtool_op_get_tso,
708 .set_tso = efx_ethtool_set_tso, 719 .set_tso = ethtool_op_set_tso,
709 .get_flags = ethtool_op_get_flags, 720 .get_flags = ethtool_op_get_flags,
710 .set_flags = ethtool_op_set_flags, 721 .set_flags = ethtool_op_set_flags,
711 .self_test_count = efx_ethtool_self_test_count, 722 .get_sset_count = efx_ethtool_get_sset_count,
712 .self_test = efx_ethtool_self_test, 723 .self_test = efx_ethtool_self_test,
713 .get_strings = efx_ethtool_get_strings, 724 .get_strings = efx_ethtool_get_strings,
714 .phys_id = efx_ethtool_phys_id, 725 .phys_id = efx_ethtool_phys_id,
715 .get_stats_count = efx_ethtool_get_stats_count,
716 .get_ethtool_stats = efx_ethtool_get_stats, 726 .get_ethtool_stats = efx_ethtool_get_stats,
717}; 727};
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 9138ee5b7b7b..31ed1f49de00 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -108,10 +108,10 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
108/* Max number of internal errors. After this resets will not be performed */ 108/* Max number of internal errors. After this resets will not be performed */
109#define FALCON_MAX_INT_ERRORS 4 109#define FALCON_MAX_INT_ERRORS 4
110 110
111/* Maximum period that we wait for flush events. If the flush event 111/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
112 * doesn't arrive in this period of time then we check if the queue 112 */
113 * was disabled anyway. */ 113#define FALCON_FLUSH_INTERVAL 10
114#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */ 114#define FALCON_FLUSH_POLL_COUNT 100
115 115
116/************************************************************************** 116/**************************************************************************
117 * 117 *
@@ -242,7 +242,7 @@ static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
242 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing 242 * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
243 * it to be used for event queues, descriptor rings etc. 243 * it to be used for event queues, descriptor rings etc.
244 */ 244 */
245static int 245static void
246falcon_init_special_buffer(struct efx_nic *efx, 246falcon_init_special_buffer(struct efx_nic *efx,
247 struct efx_special_buffer *buffer) 247 struct efx_special_buffer *buffer)
248{ 248{
@@ -266,8 +266,6 @@ falcon_init_special_buffer(struct efx_nic *efx,
266 BUF_OWNER_ID_FBUF, 0); 266 BUF_OWNER_ID_FBUF, 0);
267 falcon_write_sram(efx, &buf_desc, index); 267 falcon_write_sram(efx, &buf_desc, index);
268 } 268 }
269
270 return 0;
271} 269}
272 270
273/* Unmaps a buffer from Falcon and clears the buffer table entries */ 271/* Unmaps a buffer from Falcon and clears the buffer table entries */
@@ -449,16 +447,15 @@ int falcon_probe_tx(struct efx_tx_queue *tx_queue)
449 sizeof(efx_qword_t)); 447 sizeof(efx_qword_t));
450} 448}
451 449
452int falcon_init_tx(struct efx_tx_queue *tx_queue) 450void falcon_init_tx(struct efx_tx_queue *tx_queue)
453{ 451{
454 efx_oword_t tx_desc_ptr; 452 efx_oword_t tx_desc_ptr;
455 struct efx_nic *efx = tx_queue->efx; 453 struct efx_nic *efx = tx_queue->efx;
456 int rc; 454
455 tx_queue->flushed = false;
457 456
458 /* Pin TX descriptor ring */ 457 /* Pin TX descriptor ring */
459 rc = falcon_init_special_buffer(efx, &tx_queue->txd); 458 falcon_init_special_buffer(efx, &tx_queue->txd);
460 if (rc)
461 return rc;
462 459
463 /* Push TX descriptor ring to card */ 460 /* Push TX descriptor ring to card */
464 EFX_POPULATE_OWORD_10(tx_desc_ptr, 461 EFX_POPULATE_OWORD_10(tx_desc_ptr,
@@ -466,7 +463,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
466 TX_ISCSI_DDIG_EN, 0, 463 TX_ISCSI_DDIG_EN, 0,
467 TX_ISCSI_HDIG_EN, 0, 464 TX_ISCSI_HDIG_EN, 0,
468 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 465 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
469 TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum, 466 TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
470 TX_DESCQ_OWNER_ID, 0, 467 TX_DESCQ_OWNER_ID, 0,
471 TX_DESCQ_LABEL, tx_queue->queue, 468 TX_DESCQ_LABEL, tx_queue->queue,
472 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, 469 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
@@ -474,9 +471,9 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
474 TX_NON_IP_DROP_DIS_B0, 1); 471 TX_NON_IP_DROP_DIS_B0, 1);
475 472
476 if (falcon_rev(efx) >= FALCON_REV_B0) { 473 if (falcon_rev(efx) >= FALCON_REV_B0) {
477 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 474 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
478 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 475 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
479 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); 476 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
480 } 477 }
481 478
482 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 479 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
@@ -485,73 +482,28 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
485 if (falcon_rev(efx) < FALCON_REV_B0) { 482 if (falcon_rev(efx) < FALCON_REV_B0) {
486 efx_oword_t reg; 483 efx_oword_t reg;
487 484
488 BUG_ON(tx_queue->queue >= 128); /* HW limit */ 485 /* Only 128 bits in this register */
486 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
489 487
490 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 488 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
491 if (efx->net_dev->features & NETIF_F_IP_CSUM) 489 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
492 clear_bit_le(tx_queue->queue, (void *)&reg); 490 clear_bit_le(tx_queue->queue, (void *)&reg);
493 else 491 else
494 set_bit_le(tx_queue->queue, (void *)&reg); 492 set_bit_le(tx_queue->queue, (void *)&reg);
495 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 493 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
496 } 494 }
497
498 return 0;
499} 495}
500 496
501static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) 497static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
502{ 498{
503 struct efx_nic *efx = tx_queue->efx; 499 struct efx_nic *efx = tx_queue->efx;
504 struct efx_channel *channel = &efx->channel[0];
505 efx_oword_t tx_flush_descq; 500 efx_oword_t tx_flush_descq;
506 unsigned int read_ptr, i;
507 501
508 /* Post a flush command */ 502 /* Post a flush command */
509 EFX_POPULATE_OWORD_2(tx_flush_descq, 503 EFX_POPULATE_OWORD_2(tx_flush_descq,
510 TX_FLUSH_DESCQ_CMD, 1, 504 TX_FLUSH_DESCQ_CMD, 1,
511 TX_FLUSH_DESCQ, tx_queue->queue); 505 TX_FLUSH_DESCQ, tx_queue->queue);
512 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); 506 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
513 msleep(FALCON_FLUSH_TIMEOUT);
514
515 if (EFX_WORKAROUND_7803(efx))
516 return 0;
517
518 /* Look for a flush completed event */
519 read_ptr = channel->eventq_read_ptr;
520 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
521 efx_qword_t *event = falcon_event(channel, read_ptr);
522 int ev_code, ev_sub_code, ev_queue;
523 if (!falcon_event_present(event))
524 break;
525
526 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
527 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
528 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
529 if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
530 (ev_queue == tx_queue->queue)) {
531 EFX_LOG(efx, "tx queue %d flush command succesful\n",
532 tx_queue->queue);
533 return 0;
534 }
535
536 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
537 }
538
539 if (EFX_WORKAROUND_11557(efx)) {
540 efx_oword_t reg;
541 int enabled;
542
543 falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
544 tx_queue->queue);
545 enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
546 if (!enabled) {
547 EFX_LOG(efx, "tx queue %d disabled without a "
548 "flush event seen\n", tx_queue->queue);
549 return 0;
550 }
551 }
552
553 EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
554 return -ETIMEDOUT;
555} 507}
556 508
557void falcon_fini_tx(struct efx_tx_queue *tx_queue) 509void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -559,9 +511,8 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue)
559 struct efx_nic *efx = tx_queue->efx; 511 struct efx_nic *efx = tx_queue->efx;
560 efx_oword_t tx_desc_ptr; 512 efx_oword_t tx_desc_ptr;
561 513
562 /* Stop the hardware using the queue */ 514 /* The queue should have been flushed */
563 if (falcon_flush_tx_queue(tx_queue)) 515 WARN_ON(!tx_queue->flushed);
564 EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
565 516
566 /* Remove TX descriptor ring from card */ 517 /* Remove TX descriptor ring from card */
567 EFX_ZERO_OWORD(tx_desc_ptr); 518 EFX_ZERO_OWORD(tx_desc_ptr);
@@ -638,29 +589,28 @@ int falcon_probe_rx(struct efx_rx_queue *rx_queue)
638 sizeof(efx_qword_t)); 589 sizeof(efx_qword_t));
639} 590}
640 591
641int falcon_init_rx(struct efx_rx_queue *rx_queue) 592void falcon_init_rx(struct efx_rx_queue *rx_queue)
642{ 593{
643 efx_oword_t rx_desc_ptr; 594 efx_oword_t rx_desc_ptr;
644 struct efx_nic *efx = rx_queue->efx; 595 struct efx_nic *efx = rx_queue->efx;
645 int rc; 596 bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
646 int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; 597 bool iscsi_digest_en = is_b0;
647 int iscsi_digest_en = is_b0;
648 598
649 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", 599 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
650 rx_queue->queue, rx_queue->rxd.index, 600 rx_queue->queue, rx_queue->rxd.index,
651 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 601 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
652 602
603 rx_queue->flushed = false;
604
653 /* Pin RX descriptor ring */ 605 /* Pin RX descriptor ring */
654 rc = falcon_init_special_buffer(efx, &rx_queue->rxd); 606 falcon_init_special_buffer(efx, &rx_queue->rxd);
655 if (rc)
656 return rc;
657 607
658 /* Push RX descriptor ring to card */ 608 /* Push RX descriptor ring to card */
659 EFX_POPULATE_OWORD_10(rx_desc_ptr, 609 EFX_POPULATE_OWORD_10(rx_desc_ptr,
660 RX_ISCSI_DDIG_EN, iscsi_digest_en, 610 RX_ISCSI_DDIG_EN, iscsi_digest_en,
661 RX_ISCSI_HDIG_EN, iscsi_digest_en, 611 RX_ISCSI_HDIG_EN, iscsi_digest_en,
662 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 612 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
663 RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum, 613 RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
664 RX_DESCQ_OWNER_ID, 0, 614 RX_DESCQ_OWNER_ID, 0,
665 RX_DESCQ_LABEL, rx_queue->queue, 615 RX_DESCQ_LABEL, rx_queue->queue,
666 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, 616 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
@@ -670,14 +620,11 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
670 RX_DESCQ_EN, 1); 620 RX_DESCQ_EN, 1);
671 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 621 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
672 rx_queue->queue); 622 rx_queue->queue);
673 return 0;
674} 623}
675 624
676static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 625static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
677{ 626{
678 struct efx_nic *efx = rx_queue->efx; 627 struct efx_nic *efx = rx_queue->efx;
679 struct efx_channel *channel = &efx->channel[0];
680 unsigned int read_ptr, i;
681 efx_oword_t rx_flush_descq; 628 efx_oword_t rx_flush_descq;
682 629
683 /* Post a flush command */ 630 /* Post a flush command */
@@ -685,75 +632,15 @@ static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
685 RX_FLUSH_DESCQ_CMD, 1, 632 RX_FLUSH_DESCQ_CMD, 1,
686 RX_FLUSH_DESCQ, rx_queue->queue); 633 RX_FLUSH_DESCQ, rx_queue->queue);
687 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); 634 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
688 msleep(FALCON_FLUSH_TIMEOUT);
689
690 if (EFX_WORKAROUND_7803(efx))
691 return 0;
692
693 /* Look for a flush completed event */
694 read_ptr = channel->eventq_read_ptr;
695 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
696 efx_qword_t *event = falcon_event(channel, read_ptr);
697 int ev_code, ev_sub_code, ev_queue, ev_failed;
698 if (!falcon_event_present(event))
699 break;
700
701 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
702 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
703 ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
704 ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
705
706 if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
707 (ev_queue == rx_queue->queue)) {
708 if (ev_failed) {
709 EFX_INFO(efx, "rx queue %d flush command "
710 "failed\n", rx_queue->queue);
711 return -EAGAIN;
712 } else {
713 EFX_LOG(efx, "rx queue %d flush command "
714 "succesful\n", rx_queue->queue);
715 return 0;
716 }
717 }
718
719 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
720 }
721
722 if (EFX_WORKAROUND_11557(efx)) {
723 efx_oword_t reg;
724 int enabled;
725
726 falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
727 rx_queue->queue);
728 enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
729 if (!enabled) {
730 EFX_LOG(efx, "rx queue %d disabled without a "
731 "flush event seen\n", rx_queue->queue);
732 return 0;
733 }
734 }
735
736 EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
737 return -ETIMEDOUT;
738} 635}
739 636
740void falcon_fini_rx(struct efx_rx_queue *rx_queue) 637void falcon_fini_rx(struct efx_rx_queue *rx_queue)
741{ 638{
742 efx_oword_t rx_desc_ptr; 639 efx_oword_t rx_desc_ptr;
743 struct efx_nic *efx = rx_queue->efx; 640 struct efx_nic *efx = rx_queue->efx;
744 int i, rc;
745 641
746 /* Try and flush the rx queue. This may need to be repeated */ 642 /* The queue should already have been flushed */
747 for (i = 0; i < 5; i++) { 643 WARN_ON(!rx_queue->flushed);
748 rc = falcon_flush_rx_queue(rx_queue);
749 if (rc == -EAGAIN)
750 continue;
751 break;
752 }
753 if (rc) {
754 EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
755 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
756 }
757 644
758 /* Remove RX descriptor ring from card */ 645 /* Remove RX descriptor ring from card */
759 EFX_ZERO_OWORD(rx_desc_ptr); 646 EFX_ZERO_OWORD(rx_desc_ptr);
@@ -793,7 +680,7 @@ void falcon_eventq_read_ack(struct efx_channel *channel)
793 680
794 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); 681 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
795 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base, 682 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
796 channel->evqnum); 683 channel->channel);
797} 684}
798 685
799/* Use HW to insert a SW defined event */ 686/* Use HW to insert a SW defined event */
@@ -802,7 +689,7 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
802 efx_oword_t drv_ev_reg; 689 efx_oword_t drv_ev_reg;
803 690
804 EFX_POPULATE_OWORD_2(drv_ev_reg, 691 EFX_POPULATE_OWORD_2(drv_ev_reg,
805 DRV_EV_QID, channel->evqnum, 692 DRV_EV_QID, channel->channel,
806 DRV_EV_DATA, 693 DRV_EV_DATA,
807 EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); 694 EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
808 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); 695 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
@@ -813,8 +700,8 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
813 * Falcon batches TX completion events; the message we receive is of 700 * Falcon batches TX completion events; the message we receive is of
814 * the form "complete all TX events up to this index". 701 * the form "complete all TX events up to this index".
815 */ 702 */
816static inline void falcon_handle_tx_event(struct efx_channel *channel, 703static void falcon_handle_tx_event(struct efx_channel *channel,
817 efx_qword_t *event) 704 efx_qword_t *event)
818{ 705{
819 unsigned int tx_ev_desc_ptr; 706 unsigned int tx_ev_desc_ptr;
820 unsigned int tx_ev_q_label; 707 unsigned int tx_ev_q_label;
@@ -847,39 +734,19 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
847 } 734 }
848} 735}
849 736
850/* Check received packet's destination MAC address. */
851static int check_dest_mac(struct efx_rx_queue *rx_queue,
852 const efx_qword_t *event)
853{
854 struct efx_rx_buffer *rx_buf;
855 struct efx_nic *efx = rx_queue->efx;
856 int rx_ev_desc_ptr;
857 struct ethhdr *eh;
858
859 if (efx->promiscuous)
860 return 1;
861
862 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
863 rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
864 eh = (struct ethhdr *)rx_buf->data;
865 if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
866 return 0;
867 return 1;
868}
869
870/* Detect errors included in the rx_evt_pkt_ok bit. */ 737/* Detect errors included in the rx_evt_pkt_ok bit. */
871static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 738static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
872 const efx_qword_t *event, 739 const efx_qword_t *event,
873 unsigned *rx_ev_pkt_ok, 740 bool *rx_ev_pkt_ok,
874 int *discard, int byte_count) 741 bool *discard)
875{ 742{
876 struct efx_nic *efx = rx_queue->efx; 743 struct efx_nic *efx = rx_queue->efx;
877 unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 744 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
878 unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 745 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
879 unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 746 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
880 unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm; 747 bool rx_ev_other_err, rx_ev_pause_frm;
881 unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; 748 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
882 int snap, non_ip; 749 unsigned rx_ev_pkt_type;
883 750
884 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 751 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
885 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); 752 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
@@ -903,41 +770,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
903 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 770 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
904 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 771 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
905 772
906 snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
907 (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
908 non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
909
910 /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
911 * length field of an LLC frame, which sets TOBE_DISC. We could set
912 * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
913 * protect the RX block).
914 *
915 * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
916 * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
917 * LLC can't encapsulate IP, so by definition
918 * these packets are NON_IP.
919 *
920 * Unicast mismatch will also cause TOBE_DISC, so the driver needs
921 * to check this.
922 */
923 if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
924 /* If all the other flags are zero then we can state the
925 * entire packet is ok, which will flag to the kernel not
926 * to recalculate checksums.
927 */
928 if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
929 *rx_ev_pkt_ok = 1;
930
931 rx_ev_tobe_disc = 0;
932
933 /* TOBE_DISC is set for unicast mismatch. But given that
934 * we can't trust TOBE_DISC here, we must validate the dest
935 * MAC address ourselves.
936 */
937 if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
938 rx_ev_tobe_disc = 1;
939 }
940
941 /* Count errors that are not in MAC stats. */ 773 /* Count errors that are not in MAC stats. */
942 if (rx_ev_frm_trunc) 774 if (rx_ev_frm_trunc)
943 ++rx_queue->channel->n_rx_frm_trunc; 775 ++rx_queue->channel->n_rx_frm_trunc;
@@ -961,7 +793,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
961#ifdef EFX_ENABLE_DEBUG 793#ifdef EFX_ENABLE_DEBUG
962 if (rx_ev_other_err) { 794 if (rx_ev_other_err) {
963 EFX_INFO_RL(efx, " RX queue %d unexpected RX event " 795 EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
964 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n", 796 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
965 rx_queue->queue, EFX_QWORD_VAL(*event), 797 rx_queue->queue, EFX_QWORD_VAL(*event),
966 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 798 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
967 rx_ev_ip_hdr_chksum_err ? 799 rx_ev_ip_hdr_chksum_err ?
@@ -972,8 +804,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
972 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 804 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
973 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 805 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
974 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 806 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
975 rx_ev_pause_frm ? " [PAUSE]" : "", 807 rx_ev_pause_frm ? " [PAUSE]" : "");
976 snap ? " [SNAP/LLC]" : "");
977 } 808 }
978#endif 809#endif
979 810
@@ -1006,13 +837,13 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
1006 * Also "is multicast" and "matches multicast filter" flags can be used to 837 * Also "is multicast" and "matches multicast filter" flags can be used to
1007 * discard non-matching multicast packets. 838 * discard non-matching multicast packets.
1008 */ 839 */
1009static inline int falcon_handle_rx_event(struct efx_channel *channel, 840static void falcon_handle_rx_event(struct efx_channel *channel,
1010 const efx_qword_t *event) 841 const efx_qword_t *event)
1011{ 842{
1012 unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt; 843 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
1013 unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt; 844 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
1014 unsigned expected_ptr; 845 unsigned expected_ptr;
1015 int discard = 0, checksummed; 846 bool rx_ev_pkt_ok, discard = false, checksummed;
1016 struct efx_rx_queue *rx_queue; 847 struct efx_rx_queue *rx_queue;
1017 struct efx_nic *efx = channel->efx; 848 struct efx_nic *efx = channel->efx;
1018 849
@@ -1022,16 +853,14 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel,
1022 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 853 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
1023 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); 854 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
1024 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); 855 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
856 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
1025 857
1026 rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL); 858 rx_queue = &efx->rx_queue[channel->channel];
1027 rx_queue = &efx->rx_queue[rx_ev_q_label];
1028 859
1029 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); 860 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
1030 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; 861 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
1031 if (unlikely(rx_ev_desc_ptr != expected_ptr)) { 862 if (unlikely(rx_ev_desc_ptr != expected_ptr))
1032 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 863 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
1033 return rx_ev_q_label;
1034 }
1035 864
1036 if (likely(rx_ev_pkt_ok)) { 865 if (likely(rx_ev_pkt_ok)) {
1037 /* If packet is marked as OK and packet type is TCP/IPv4 or 866 /* If packet is marked as OK and packet type is TCP/IPv4 or
@@ -1040,8 +869,8 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel,
1040 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); 869 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
1041 } else { 870 } else {
1042 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, 871 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
1043 &discard, rx_ev_byte_cnt); 872 &discard);
1044 checksummed = 0; 873 checksummed = false;
1045 } 874 }
1046 875
1047 /* Detect multicast packets that didn't match the filter */ 876 /* Detect multicast packets that didn't match the filter */
@@ -1051,14 +880,12 @@ static inline int falcon_handle_rx_event(struct efx_channel *channel,
1051 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); 880 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
1052 881
1053 if (unlikely(!rx_ev_mcast_hash_match)) 882 if (unlikely(!rx_ev_mcast_hash_match))
1054 discard = 1; 883 discard = true;
1055 } 884 }
1056 885
1057 /* Handle received packet */ 886 /* Handle received packet */
1058 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, 887 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
1059 checksummed, discard); 888 checksummed, discard);
1060
1061 return rx_ev_q_label;
1062} 889}
1063 890
1064/* Global events are basically PHY events */ 891/* Global events are basically PHY events */
@@ -1066,23 +893,23 @@ static void falcon_handle_global_event(struct efx_channel *channel,
1066 efx_qword_t *event) 893 efx_qword_t *event)
1067{ 894{
1068 struct efx_nic *efx = channel->efx; 895 struct efx_nic *efx = channel->efx;
1069 int is_phy_event = 0, handled = 0; 896 bool is_phy_event = false, handled = false;
1070 897
1071 /* Check for interrupt on either port. Some boards have a 898 /* Check for interrupt on either port. Some boards have a
1072 * single PHY wired to the interrupt line for port 1. */ 899 * single PHY wired to the interrupt line for port 1. */
1073 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || 900 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
1074 EFX_QWORD_FIELD(*event, G_PHY1_INTR) || 901 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
1075 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 902 EFX_QWORD_FIELD(*event, XG_PHY_INTR))
1076 is_phy_event = 1; 903 is_phy_event = true;
1077 904
1078 if ((falcon_rev(efx) >= FALCON_REV_B0) && 905 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
1079 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 906 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0))
1080 is_phy_event = 1; 907 is_phy_event = true;
1081 908
1082 if (is_phy_event) { 909 if (is_phy_event) {
1083 efx->phy_op->clear_interrupt(efx); 910 efx->phy_op->clear_interrupt(efx);
1084 queue_work(efx->workqueue, &efx->reconfigure_work); 911 queue_work(efx->workqueue, &efx->reconfigure_work);
1085 handled = 1; 912 handled = true;
1086 } 913 }
1087 914
1088 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) { 915 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
@@ -1092,7 +919,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
1092 atomic_inc(&efx->rx_reset); 919 atomic_inc(&efx->rx_reset);
1093 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? 920 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
1094 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 921 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1095 handled = 1; 922 handled = true;
1096 } 923 }
1097 924
1098 if (!handled) 925 if (!handled)
@@ -1163,13 +990,12 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
1163 } 990 }
1164} 991}
1165 992
1166int falcon_process_eventq(struct efx_channel *channel, int *rx_quota) 993int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1167{ 994{
1168 unsigned int read_ptr; 995 unsigned int read_ptr;
1169 efx_qword_t event, *p_event; 996 efx_qword_t event, *p_event;
1170 int ev_code; 997 int ev_code;
1171 int rxq; 998 int rx_packets = 0;
1172 int rxdmaqs = 0;
1173 999
1174 read_ptr = channel->eventq_read_ptr; 1000 read_ptr = channel->eventq_read_ptr;
1175 1001
@@ -1191,9 +1017,8 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
1191 1017
1192 switch (ev_code) { 1018 switch (ev_code) {
1193 case RX_IP_EV_DECODE: 1019 case RX_IP_EV_DECODE:
1194 rxq = falcon_handle_rx_event(channel, &event); 1020 falcon_handle_rx_event(channel, &event);
1195 rxdmaqs |= (1 << rxq); 1021 ++rx_packets;
1196 (*rx_quota)--;
1197 break; 1022 break;
1198 case TX_IP_EV_DECODE: 1023 case TX_IP_EV_DECODE:
1199 falcon_handle_tx_event(channel, &event); 1024 falcon_handle_tx_event(channel, &event);
@@ -1220,10 +1045,10 @@ int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
1220 /* Increment read pointer */ 1045 /* Increment read pointer */
1221 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1046 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1222 1047
1223 } while (*rx_quota); 1048 } while (rx_packets < rx_quota);
1224 1049
1225 channel->eventq_read_ptr = read_ptr; 1050 channel->eventq_read_ptr = read_ptr;
1226 return rxdmaqs; 1051 return rx_packets;
1227} 1052}
1228 1053
1229void falcon_set_int_moderation(struct efx_channel *channel) 1054void falcon_set_int_moderation(struct efx_channel *channel)
@@ -1251,7 +1076,7 @@ void falcon_set_int_moderation(struct efx_channel *channel)
1251 TIMER_VAL, 0); 1076 TIMER_VAL, 0);
1252 } 1077 }
1253 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, 1078 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
1254 channel->evqnum); 1079 channel->channel);
1255 1080
1256} 1081}
1257 1082
@@ -1265,20 +1090,17 @@ int falcon_probe_eventq(struct efx_channel *channel)
1265 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); 1090 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
1266} 1091}
1267 1092
1268int falcon_init_eventq(struct efx_channel *channel) 1093void falcon_init_eventq(struct efx_channel *channel)
1269{ 1094{
1270 efx_oword_t evq_ptr; 1095 efx_oword_t evq_ptr;
1271 struct efx_nic *efx = channel->efx; 1096 struct efx_nic *efx = channel->efx;
1272 int rc;
1273 1097
1274 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", 1098 EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
1275 channel->channel, channel->eventq.index, 1099 channel->channel, channel->eventq.index,
1276 channel->eventq.index + channel->eventq.entries - 1); 1100 channel->eventq.index + channel->eventq.entries - 1);
1277 1101
1278 /* Pin event queue buffer */ 1102 /* Pin event queue buffer */
1279 rc = falcon_init_special_buffer(efx, &channel->eventq); 1103 falcon_init_special_buffer(efx, &channel->eventq);
1280 if (rc)
1281 return rc;
1282 1104
1283 /* Fill event queue with all ones (i.e. empty events) */ 1105 /* Fill event queue with all ones (i.e. empty events) */
1284 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1106 memset(channel->eventq.addr, 0xff, channel->eventq.len);
@@ -1289,11 +1111,9 @@ int falcon_init_eventq(struct efx_channel *channel)
1289 EVQ_SIZE, FALCON_EVQ_ORDER, 1111 EVQ_SIZE, FALCON_EVQ_ORDER,
1290 EVQ_BUF_BASE_ID, channel->eventq.index); 1112 EVQ_BUF_BASE_ID, channel->eventq.index);
1291 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, 1113 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1292 channel->evqnum); 1114 channel->channel);
1293 1115
1294 falcon_set_int_moderation(channel); 1116 falcon_set_int_moderation(channel);
1295
1296 return 0;
1297} 1117}
1298 1118
1299void falcon_fini_eventq(struct efx_channel *channel) 1119void falcon_fini_eventq(struct efx_channel *channel)
@@ -1304,7 +1124,7 @@ void falcon_fini_eventq(struct efx_channel *channel)
1304 /* Remove event queue from card */ 1124 /* Remove event queue from card */
1305 EFX_ZERO_OWORD(eventq_ptr); 1125 EFX_ZERO_OWORD(eventq_ptr);
1306 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, 1126 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1307 channel->evqnum); 1127 channel->channel);
1308 1128
1309 /* Unpin event queue */ 1129 /* Unpin event queue */
1310 falcon_fini_special_buffer(efx, &channel->eventq); 1130 falcon_fini_special_buffer(efx, &channel->eventq);
@@ -1331,6 +1151,121 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1331 falcon_generate_event(channel, &test_event); 1151 falcon_generate_event(channel, &test_event);
1332} 1152}
1333 1153
1154/**************************************************************************
1155 *
1156 * Flush handling
1157 *
1158 **************************************************************************/
1159
1160
1161static void falcon_poll_flush_events(struct efx_nic *efx)
1162{
1163 struct efx_channel *channel = &efx->channel[0];
1164 struct efx_tx_queue *tx_queue;
1165 struct efx_rx_queue *rx_queue;
1166 unsigned int read_ptr, i;
1167
1168 read_ptr = channel->eventq_read_ptr;
1169 for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
1170 efx_qword_t *event = falcon_event(channel, read_ptr);
1171 int ev_code, ev_sub_code, ev_queue;
1172 bool ev_failed;
1173 if (!falcon_event_present(event))
1174 break;
1175
1176 ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
1177 if (ev_code != DRIVER_EV_DECODE)
1178 continue;
1179
1180 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
1181 switch (ev_sub_code) {
1182 case TX_DESCQ_FLS_DONE_EV_DECODE:
1183 ev_queue = EFX_QWORD_FIELD(*event,
1184 DRIVER_EV_TX_DESCQ_ID);
1185 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1186 tx_queue = efx->tx_queue + ev_queue;
1187 tx_queue->flushed = true;
1188 }
1189 break;
1190 case RX_DESCQ_FLS_DONE_EV_DECODE:
1191 ev_queue = EFX_QWORD_FIELD(*event,
1192 DRIVER_EV_RX_DESCQ_ID);
1193 ev_failed = EFX_QWORD_FIELD(*event,
1194 DRIVER_EV_RX_FLUSH_FAIL);
1195 if (ev_queue < efx->n_rx_queues) {
1196 rx_queue = efx->rx_queue + ev_queue;
1197
1198 /* retry the rx flush */
1199 if (ev_failed)
1200 falcon_flush_rx_queue(rx_queue);
1201 else
1202 rx_queue->flushed = true;
1203 }
1204 break;
1205 }
1206
1207 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
1208 }
1209}
1210
1211/* Handle tx and rx flushes at the same time, since they run in
1212 * parallel in the hardware and there's no reason for us to
1213 * serialise them */
1214int falcon_flush_queues(struct efx_nic *efx)
1215{
1216 struct efx_rx_queue *rx_queue;
1217 struct efx_tx_queue *tx_queue;
1218 int i;
1219 bool outstanding;
1220
1221 /* Issue flush requests */
1222 efx_for_each_tx_queue(tx_queue, efx) {
1223 tx_queue->flushed = false;
1224 falcon_flush_tx_queue(tx_queue);
1225 }
1226 efx_for_each_rx_queue(rx_queue, efx) {
1227 rx_queue->flushed = false;
1228 falcon_flush_rx_queue(rx_queue);
1229 }
1230
1231 /* Poll the evq looking for flush completions. Since we're not pushing
1232 * any more rx or tx descriptors at this point, we're in no danger of
1233 * overflowing the evq whilst we wait */
1234 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1235 msleep(FALCON_FLUSH_INTERVAL);
1236 falcon_poll_flush_events(efx);
1237
1238 /* Check if every queue has been succesfully flushed */
1239 outstanding = false;
1240 efx_for_each_tx_queue(tx_queue, efx)
1241 outstanding |= !tx_queue->flushed;
1242 efx_for_each_rx_queue(rx_queue, efx)
1243 outstanding |= !rx_queue->flushed;
1244 if (!outstanding)
1245 return 0;
1246 }
1247
1248 /* Mark the queues as all flushed. We're going to return failure
1249 * leading to a reset, or fake up success anyway. "flushed" now
1250 * indicates that we tried to flush. */
1251 efx_for_each_tx_queue(tx_queue, efx) {
1252 if (!tx_queue->flushed)
1253 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1254 tx_queue->queue);
1255 tx_queue->flushed = true;
1256 }
1257 efx_for_each_rx_queue(rx_queue, efx) {
1258 if (!rx_queue->flushed)
1259 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1260 rx_queue->queue);
1261 rx_queue->flushed = true;
1262 }
1263
1264 if (EFX_WORKAROUND_7803(efx))
1265 return 0;
1266
1267 return -ETIMEDOUT;
1268}
1334 1269
1335/************************************************************************** 1270/**************************************************************************
1336 * 1271 *
@@ -1371,7 +1306,7 @@ void falcon_enable_interrupts(struct efx_nic *efx)
1371 1306
1372 /* Force processing of all the channels to get the EVQ RPTRs up to 1307 /* Force processing of all the channels to get the EVQ RPTRs up to
1373 date */ 1308 date */
1374 efx_for_each_channel_with_interrupt(channel, efx) 1309 efx_for_each_channel(channel, efx)
1375 efx_schedule_channel(channel); 1310 efx_schedule_channel(channel);
1376} 1311}
1377 1312
@@ -1439,10 +1374,11 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1439 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); 1374 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1440 } 1375 }
1441 1376
1442 /* Disable DMA bus mastering on both devices */ 1377 /* Disable both devices */
1443 pci_disable_device(efx->pci_dev); 1378 pci_disable_device(efx->pci_dev);
1444 if (FALCON_IS_DUAL_FUNC(efx)) 1379 if (FALCON_IS_DUAL_FUNC(efx))
1445 pci_disable_device(nic_data->pci_dev2); 1380 pci_disable_device(nic_data->pci_dev2);
1381 falcon_disable_interrupts(efx);
1446 1382
1447 if (++n_int_errors < FALCON_MAX_INT_ERRORS) { 1383 if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
1448 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); 1384 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
@@ -1589,7 +1525,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1589 offset < RX_RSS_INDIR_TBL_B0 + 0x800; 1525 offset < RX_RSS_INDIR_TBL_B0 + 0x800;
1590 offset += 0x10) { 1526 offset += 0x10) {
1591 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, 1527 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
1592 i % efx->rss_queues); 1528 i % efx->n_rx_queues);
1593 falcon_writel(efx, &dword, offset); 1529 falcon_writel(efx, &dword, offset);
1594 i++; 1530 i++;
1595 } 1531 }
@@ -1621,7 +1557,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1621 } 1557 }
1622 1558
1623 /* Hook MSI or MSI-X interrupt */ 1559 /* Hook MSI or MSI-X interrupt */
1624 efx_for_each_channel_with_interrupt(channel, efx) { 1560 efx_for_each_channel(channel, efx) {
1625 rc = request_irq(channel->irq, falcon_msi_interrupt, 1561 rc = request_irq(channel->irq, falcon_msi_interrupt,
1626 IRQF_PROBE_SHARED, /* Not shared */ 1562 IRQF_PROBE_SHARED, /* Not shared */
1627 efx->name, channel); 1563 efx->name, channel);
@@ -1634,7 +1570,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1634 return 0; 1570 return 0;
1635 1571
1636 fail2: 1572 fail2:
1637 efx_for_each_channel_with_interrupt(channel, efx) 1573 efx_for_each_channel(channel, efx)
1638 free_irq(channel->irq, channel); 1574 free_irq(channel->irq, channel);
1639 fail1: 1575 fail1:
1640 return rc; 1576 return rc;
@@ -1646,7 +1582,7 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1646 efx_oword_t reg; 1582 efx_oword_t reg;
1647 1583
1648 /* Disable MSI/MSI-X interrupts */ 1584 /* Disable MSI/MSI-X interrupts */
1649 efx_for_each_channel_with_interrupt(channel, efx) { 1585 efx_for_each_channel(channel, efx) {
1650 if (channel->irq) 1586 if (channel->irq)
1651 free_irq(channel->irq, channel); 1587 free_irq(channel->irq, channel);
1652 } 1588 }
@@ -1669,69 +1605,200 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1669 ************************************************************************** 1605 **************************************************************************
1670 */ 1606 */
1671 1607
1672#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) 1608#define FALCON_SPI_MAX_LEN ((unsigned) sizeof(efx_oword_t))
1673 1609
1674/* Wait for SPI command completion */ 1610/* Wait for SPI command completion */
1675static int falcon_spi_wait(struct efx_nic *efx) 1611static int falcon_spi_wait(struct efx_nic *efx)
1676{ 1612{
1613 unsigned long timeout = jiffies + DIV_ROUND_UP(HZ, 10);
1677 efx_oword_t reg; 1614 efx_oword_t reg;
1678 int cmd_en, timer_active; 1615 bool cmd_en, timer_active;
1679 int count;
1680 1616
1681 count = 0; 1617 for (;;) {
1682 do {
1683 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER); 1618 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
1684 cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN); 1619 cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
1685 timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE); 1620 timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
1686 if (!cmd_en && !timer_active) 1621 if (!cmd_en && !timer_active)
1687 return 0; 1622 return 0;
1688 udelay(10); 1623 if (time_after_eq(jiffies, timeout)) {
1689 } while (++count < 10000); /* wait upto 100msec */ 1624 EFX_ERR(efx, "timed out waiting for SPI\n");
1690 EFX_ERR(efx, "timed out waiting for SPI\n"); 1625 return -ETIMEDOUT;
1691 return -ETIMEDOUT; 1626 }
1627 cpu_relax();
1628 }
1692} 1629}
1693 1630
1694static int 1631static int falcon_spi_cmd(const struct efx_spi_device *spi,
1695falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command, 1632 unsigned int command, int address,
1696 unsigned int address, unsigned int addr_len, 1633 const void *in, void *out, unsigned int len)
1697 void *data, unsigned int len)
1698{ 1634{
1635 struct efx_nic *efx = spi->efx;
1636 bool addressed = (address >= 0);
1637 bool reading = (out != NULL);
1699 efx_oword_t reg; 1638 efx_oword_t reg;
1700 int rc; 1639 int rc;
1701 1640
1702 BUG_ON(len > FALCON_SPI_MAX_LEN); 1641 /* Input validation */
1642 if (len > FALCON_SPI_MAX_LEN)
1643 return -EINVAL;
1703 1644
1704 /* Check SPI not currently being accessed */ 1645 /* Check SPI not currently being accessed */
1705 rc = falcon_spi_wait(efx); 1646 rc = falcon_spi_wait(efx);
1706 if (rc) 1647 if (rc)
1707 return rc; 1648 return rc;
1708 1649
1709 /* Program address register */ 1650 /* Program address register, if we have an address */
1710 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); 1651 if (addressed) {
1711 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER); 1652 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
1653 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
1654 }
1655
1656 /* Program data register, if we have data */
1657 if (in != NULL) {
1658 memcpy(&reg, in, len);
1659 falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER);
1660 }
1712 1661
1713 /* Issue read command */ 1662 /* Issue read/write command */
1714 EFX_POPULATE_OWORD_7(reg, 1663 EFX_POPULATE_OWORD_7(reg,
1715 EE_SPI_HCMD_CMD_EN, 1, 1664 EE_SPI_HCMD_CMD_EN, 1,
1716 EE_SPI_HCMD_SF_SEL, device_id, 1665 EE_SPI_HCMD_SF_SEL, spi->device_id,
1717 EE_SPI_HCMD_DABCNT, len, 1666 EE_SPI_HCMD_DABCNT, len,
1718 EE_SPI_HCMD_READ, EE_SPI_READ, 1667 EE_SPI_HCMD_READ, reading,
1719 EE_SPI_HCMD_DUBCNT, 0, 1668 EE_SPI_HCMD_DUBCNT, 0,
1720 EE_SPI_HCMD_ADBCNT, addr_len, 1669 EE_SPI_HCMD_ADBCNT,
1670 (addressed ? spi->addr_len : 0),
1721 EE_SPI_HCMD_ENC, command); 1671 EE_SPI_HCMD_ENC, command);
1722 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER); 1672 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
1723 1673
1724 /* Wait for read to complete */ 1674 /* Wait for read/write to complete */
1725 rc = falcon_spi_wait(efx); 1675 rc = falcon_spi_wait(efx);
1726 if (rc) 1676 if (rc)
1727 return rc; 1677 return rc;
1728 1678
1729 /* Read data */ 1679 /* Read data */
1730 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER); 1680 if (out != NULL) {
1731 memcpy(data, &reg, len); 1681 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
1682 memcpy(out, &reg, len);
1683 }
1684
1732 return 0; 1685 return 0;
1733} 1686}
1734 1687
1688static unsigned int
1689falcon_spi_write_limit(const struct efx_spi_device *spi, unsigned int start)
1690{
1691 return min(FALCON_SPI_MAX_LEN,
1692 (spi->block_size - (start & (spi->block_size - 1))));
1693}
1694
1695static inline u8
1696efx_spi_munge_command(const struct efx_spi_device *spi,
1697 const u8 command, const unsigned int address)
1698{
1699 return command | (((address >> 8) & spi->munge_address) << 3);
1700}
1701
1702
1703static int falcon_spi_fast_wait(const struct efx_spi_device *spi)
1704{
1705 u8 status;
1706 int i, rc;
1707
1708 /* Wait up to 1000us for flash/EEPROM to finish a fast operation. */
1709 for (i = 0; i < 50; i++) {
1710 udelay(20);
1711
1712 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
1713 &status, sizeof(status));
1714 if (rc)
1715 return rc;
1716 if (!(status & SPI_STATUS_NRDY))
1717 return 0;
1718 }
1719 EFX_ERR(spi->efx,
1720 "timed out waiting for device %d last status=0x%02x\n",
1721 spi->device_id, status);
1722 return -ETIMEDOUT;
1723}
1724
1725int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1726 size_t len, size_t *retlen, u8 *buffer)
1727{
1728 unsigned int command, block_len, pos = 0;
1729 int rc = 0;
1730
1731 while (pos < len) {
1732 block_len = min((unsigned int)len - pos,
1733 FALCON_SPI_MAX_LEN);
1734
1735 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1736 rc = falcon_spi_cmd(spi, command, start + pos, NULL,
1737 buffer + pos, block_len);
1738 if (rc)
1739 break;
1740 pos += block_len;
1741
1742 /* Avoid locking up the system */
1743 cond_resched();
1744 if (signal_pending(current)) {
1745 rc = -EINTR;
1746 break;
1747 }
1748 }
1749
1750 if (retlen)
1751 *retlen = pos;
1752 return rc;
1753}
1754
1755int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1756 size_t len, size_t *retlen, const u8 *buffer)
1757{
1758 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1759 unsigned int command, block_len, pos = 0;
1760 int rc = 0;
1761
1762 while (pos < len) {
1763 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
1764 if (rc)
1765 break;
1766
1767 block_len = min((unsigned int)len - pos,
1768 falcon_spi_write_limit(spi, start + pos));
1769 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1770 rc = falcon_spi_cmd(spi, command, start + pos,
1771 buffer + pos, NULL, block_len);
1772 if (rc)
1773 break;
1774
1775 rc = falcon_spi_fast_wait(spi);
1776 if (rc)
1777 break;
1778
1779 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1780 rc = falcon_spi_cmd(spi, command, start + pos,
1781 NULL, verify_buffer, block_len);
1782 if (memcmp(verify_buffer, buffer + pos, block_len)) {
1783 rc = -EIO;
1784 break;
1785 }
1786
1787 pos += block_len;
1788
1789 /* Avoid locking up the system */
1790 cond_resched();
1791 if (signal_pending(current)) {
1792 rc = -EINTR;
1793 break;
1794 }
1795 }
1796
1797 if (retlen)
1798 *retlen = pos;
1799 return rc;
1800}
1801
1735/************************************************************************** 1802/**************************************************************************
1736 * 1803 *
1737 * MAC wrapper 1804 * MAC wrapper
@@ -1812,7 +1879,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1812{ 1879{
1813 efx_oword_t reg; 1880 efx_oword_t reg;
1814 int link_speed; 1881 int link_speed;
1815 unsigned int tx_fc; 1882 bool tx_fc;
1816 1883
1817 if (efx->link_options & GM_LPA_10000) 1884 if (efx->link_options & GM_LPA_10000)
1818 link_speed = 0x3; 1885 link_speed = 0x3;
@@ -1847,7 +1914,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1847 /* Transmission of pause frames when RX crosses the threshold is 1914 /* Transmission of pause frames when RX crosses the threshold is
1848 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. 1915 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1849 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ 1916 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1850 tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0; 1917 tx_fc = !!(efx->flow_control & EFX_FC_TX);
1851 falcon_read(efx, &reg, RX_CFG_REG_KER); 1918 falcon_read(efx, &reg, RX_CFG_REG_KER);
1852 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1919 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1853 1920
@@ -1887,8 +1954,10 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
1887 1954
1888 /* Wait for transfer to complete */ 1955 /* Wait for transfer to complete */
1889 for (i = 0; i < 400; i++) { 1956 for (i = 0; i < 400; i++) {
1890 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) 1957 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
1958 rmb(); /* Ensure the stats are valid. */
1891 return 0; 1959 return 0;
1960 }
1892 udelay(10); 1961 udelay(10);
1893 } 1962 }
1894 1963
@@ -1951,7 +2020,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
1951static void falcon_mdio_write(struct net_device *net_dev, int phy_id, 2020static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
1952 int addr, int value) 2021 int addr, int value)
1953{ 2022{
1954 struct efx_nic *efx = net_dev->priv; 2023 struct efx_nic *efx = netdev_priv(net_dev);
1955 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; 2024 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
1956 efx_oword_t reg; 2025 efx_oword_t reg;
1957 2026
@@ -2019,7 +2088,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
2019 * could be read, -1 will be returned. */ 2088 * could be read, -1 will be returned. */
2020static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) 2089static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
2021{ 2090{
2022 struct efx_nic *efx = net_dev->priv; 2091 struct efx_nic *efx = netdev_priv(net_dev);
2023 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; 2092 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
2024 efx_oword_t reg; 2093 efx_oword_t reg;
2025 int value = -1; 2094 int value = -1;
@@ -2120,7 +2189,7 @@ int falcon_probe_port(struct efx_nic *efx)
2120 return rc; 2189 return rc;
2121 2190
2122 /* Set up GMII structure for PHY */ 2191 /* Set up GMII structure for PHY */
2123 efx->mii.supports_gmii = 1; 2192 efx->mii.supports_gmii = true;
2124 falcon_init_mdio(&efx->mii); 2193 falcon_init_mdio(&efx->mii);
2125 2194
2126 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2195 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
@@ -2168,6 +2237,170 @@ void falcon_set_multicast_hash(struct efx_nic *efx)
2168 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); 2237 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
2169} 2238}
2170 2239
2240
2241/**************************************************************************
2242 *
2243 * Falcon test code
2244 *
2245 **************************************************************************/
2246
2247int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2248{
2249 struct falcon_nvconfig *nvconfig;
2250 struct efx_spi_device *spi;
2251 void *region;
2252 int rc, magic_num, struct_ver;
2253 __le16 *word, *limit;
2254 u32 csum;
2255
2256 region = kmalloc(NVCONFIG_END, GFP_KERNEL);
2257 if (!region)
2258 return -ENOMEM;
2259 nvconfig = region + NVCONFIG_OFFSET;
2260
2261 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
2262 rc = falcon_spi_read(spi, 0, NVCONFIG_END, NULL, region);
2263 if (rc) {
2264 EFX_ERR(efx, "Failed to read %s\n",
2265 efx->spi_flash ? "flash" : "EEPROM");
2266 rc = -EIO;
2267 goto out;
2268 }
2269
2270 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2271 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2272
2273 rc = -EINVAL;
2274 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) {
2275 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2276 goto out;
2277 }
2278 if (struct_ver < 2) {
2279 EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
2280 goto out;
2281 } else if (struct_ver < 4) {
2282 word = &nvconfig->board_magic_num;
2283 limit = (__le16 *) (nvconfig + 1);
2284 } else {
2285 word = region;
2286 limit = region + NVCONFIG_END;
2287 }
2288 for (csum = 0; word < limit; ++word)
2289 csum += le16_to_cpu(*word);
2290
2291 if (~csum & 0xffff) {
2292 EFX_ERR(efx, "NVRAM has incorrect checksum\n");
2293 goto out;
2294 }
2295
2296 rc = 0;
2297 if (nvconfig_out)
2298 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
2299
2300 out:
2301 kfree(region);
2302 return rc;
2303}
2304
2305/* Registers tested in the falcon register test */
2306static struct {
2307 unsigned address;
2308 efx_oword_t mask;
2309} efx_test_registers[] = {
2310 { ADR_REGION_REG_KER,
2311 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2312 { RX_CFG_REG_KER,
2313 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2314 { TX_CFG_REG_KER,
2315 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2316 { TX_CFG2_REG_KER,
2317 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2318 { MAC0_CTRL_REG_KER,
2319 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2320 { SRM_TX_DC_CFG_REG_KER,
2321 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2322 { RX_DC_CFG_REG_KER,
2323 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2324 { RX_DC_PF_WM_REG_KER,
2325 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2326 { DP_CTRL_REG,
2327 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2328 { XM_GLB_CFG_REG,
2329 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2330 { XM_TX_CFG_REG,
2331 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2332 { XM_RX_CFG_REG,
2333 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2334 { XM_RX_PARAM_REG,
2335 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2336 { XM_FC_REG,
2337 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2338 { XM_ADR_LO_REG,
2339 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2340 { XX_SD_CTL_REG,
2341 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2342};
2343
2344static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
2345 const efx_oword_t *mask)
2346{
2347 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
2348 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
2349}
2350
2351int falcon_test_registers(struct efx_nic *efx)
2352{
2353 unsigned address = 0, i, j;
2354 efx_oword_t mask, imask, original, reg, buf;
2355
2356 /* Falcon should be in loopback to isolate the XMAC from the PHY */
2357 WARN_ON(!LOOPBACK_INTERNAL(efx));
2358
2359 for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
2360 address = efx_test_registers[i].address;
2361 mask = imask = efx_test_registers[i].mask;
2362 EFX_INVERT_OWORD(imask);
2363
2364 falcon_read(efx, &original, address);
2365
2366 /* bit sweep on and off */
2367 for (j = 0; j < 128; j++) {
2368 if (!EFX_EXTRACT_OWORD32(mask, j, j))
2369 continue;
2370
2371 /* Test this testable bit can be set in isolation */
2372 EFX_AND_OWORD(reg, original, mask);
2373 EFX_SET_OWORD32(reg, j, j, 1);
2374
2375 falcon_write(efx, &reg, address);
2376 falcon_read(efx, &buf, address);
2377
2378 if (efx_masked_compare_oword(&reg, &buf, &mask))
2379 goto fail;
2380
2381 /* Test this testable bit can be cleared in isolation */
2382 EFX_OR_OWORD(reg, original, mask);
2383 EFX_SET_OWORD32(reg, j, j, 0);
2384
2385 falcon_write(efx, &reg, address);
2386 falcon_read(efx, &buf, address);
2387
2388 if (efx_masked_compare_oword(&reg, &buf, &mask))
2389 goto fail;
2390 }
2391
2392 falcon_write(efx, &original, address);
2393 }
2394
2395 return 0;
2396
2397fail:
2398 EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
2399 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
2400 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
2401 return -EIO;
2402}
2403
2171/************************************************************************** 2404/**************************************************************************
2172 * 2405 *
2173 * Device reset 2406 * Device reset
@@ -2305,68 +2538,103 @@ static int falcon_reset_sram(struct efx_nic *efx)
2305 return -ETIMEDOUT; 2538 return -ETIMEDOUT;
2306} 2539}
2307 2540
2541static int falcon_spi_device_init(struct efx_nic *efx,
2542 struct efx_spi_device **spi_device_ret,
2543 unsigned int device_id, u32 device_type)
2544{
2545 struct efx_spi_device *spi_device;
2546
2547 if (device_type != 0) {
2548 spi_device = kmalloc(sizeof(*spi_device), GFP_KERNEL);
2549 if (!spi_device)
2550 return -ENOMEM;
2551 spi_device->device_id = device_id;
2552 spi_device->size =
2553 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2554 spi_device->addr_len =
2555 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2556 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2557 spi_device->addr_len == 1);
2558 spi_device->block_size =
2559 1 << SPI_DEV_TYPE_FIELD(device_type,
2560 SPI_DEV_TYPE_BLOCK_SIZE);
2561
2562 spi_device->efx = efx;
2563 } else {
2564 spi_device = NULL;
2565 }
2566
2567 kfree(*spi_device_ret);
2568 *spi_device_ret = spi_device;
2569 return 0;
2570}
2571
2572
2573static void falcon_remove_spi_devices(struct efx_nic *efx)
2574{
2575 kfree(efx->spi_eeprom);
2576 efx->spi_eeprom = NULL;
2577 kfree(efx->spi_flash);
2578 efx->spi_flash = NULL;
2579}
2580
2308/* Extract non-volatile configuration */ 2581/* Extract non-volatile configuration */
2309static int falcon_probe_nvconfig(struct efx_nic *efx) 2582static int falcon_probe_nvconfig(struct efx_nic *efx)
2310{ 2583{
2311 struct falcon_nvconfig *nvconfig; 2584 struct falcon_nvconfig *nvconfig;
2312 efx_oword_t nic_stat; 2585 int board_rev;
2313 int device_id;
2314 unsigned addr_len;
2315 size_t offset, len;
2316 int magic_num, struct_ver, board_rev;
2317 int rc; 2586 int rc;
2318 2587
2319 /* Find the boot device. */
2320 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2321 if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) {
2322 device_id = EE_SPI_FLASH;
2323 addr_len = 3;
2324 } else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) {
2325 device_id = EE_SPI_EEPROM;
2326 addr_len = 2;
2327 } else {
2328 return -ENODEV;
2329 }
2330
2331 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); 2588 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2589 if (!nvconfig)
2590 return -ENOMEM;
2332 2591
2333 /* Read the whole configuration structure into memory. */ 2592 rc = falcon_read_nvram(efx, nvconfig);
2334 for (offset = 0; offset < sizeof(*nvconfig); offset += len) { 2593 if (rc == -EINVAL) {
2335 len = min(sizeof(*nvconfig) - offset, 2594 EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
2336 (size_t) FALCON_SPI_MAX_LEN);
2337 rc = falcon_spi_read(efx, device_id, SPI_READ,
2338 NVCONFIG_BASE + offset, addr_len,
2339 (char *)nvconfig + offset, len);
2340 if (rc)
2341 goto out;
2342 }
2343
2344 /* Read the MAC addresses */
2345 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2346
2347 /* Read the board configuration. */
2348 magic_num = le16_to_cpu(nvconfig->board_magic_num);
2349 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2350
2351 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) {
2352 EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x "
2353 "therefore using defaults\n", magic_num, struct_ver);
2354 efx->phy_type = PHY_TYPE_NONE; 2595 efx->phy_type = PHY_TYPE_NONE;
2355 efx->mii.phy_id = PHY_ADDR_INVALID; 2596 efx->mii.phy_id = PHY_ADDR_INVALID;
2356 board_rev = 0; 2597 board_rev = 0;
2598 rc = 0;
2599 } else if (rc) {
2600 goto fail1;
2357 } else { 2601 } else {
2358 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2; 2602 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
2603 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
2359 2604
2360 efx->phy_type = v2->port0_phy_type; 2605 efx->phy_type = v2->port0_phy_type;
2361 efx->mii.phy_id = v2->port0_phy_addr; 2606 efx->mii.phy_id = v2->port0_phy_addr;
2362 board_rev = le16_to_cpu(v2->board_revision); 2607 board_rev = le16_to_cpu(v2->board_revision);
2608
2609 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2610 __le32 fl = v3->spi_device_type[EE_SPI_FLASH];
2611 __le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
2612 rc = falcon_spi_device_init(efx, &efx->spi_flash,
2613 EE_SPI_FLASH,
2614 le32_to_cpu(fl));
2615 if (rc)
2616 goto fail2;
2617 rc = falcon_spi_device_init(efx, &efx->spi_eeprom,
2618 EE_SPI_EEPROM,
2619 le32_to_cpu(ee));
2620 if (rc)
2621 goto fail2;
2622 }
2363 } 2623 }
2364 2624
2625 /* Read the MAC addresses */
2626 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
2627
2365 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id); 2628 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
2366 2629
2367 efx_set_board_info(efx, board_rev); 2630 efx_set_board_info(efx, board_rev);
2368 2631
2369 out: 2632 kfree(nvconfig);
2633 return 0;
2634
2635 fail2:
2636 falcon_remove_spi_devices(efx);
2637 fail1:
2370 kfree(nvconfig); 2638 kfree(nvconfig);
2371 return rc; 2639 return rc;
2372} 2640}
@@ -2417,6 +2685,86 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2417 return 0; 2685 return 0;
2418} 2686}
2419 2687
2688/* Probe all SPI devices on the NIC */
2689static void falcon_probe_spi_devices(struct efx_nic *efx)
2690{
2691 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2692 bool has_flash, has_eeprom, boot_is_external;
2693
2694 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
2695 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2696 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2697
2698 has_flash = EFX_OWORD_FIELD(nic_stat, SF_PRST);
2699 has_eeprom = EFX_OWORD_FIELD(nic_stat, EE_PRST);
2700 boot_is_external = EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE);
2701
2702 if (has_flash) {
2703 /* Default flash SPI device: Atmel AT25F1024
2704 * 128 KB, 24-bit address, 32 KB erase block,
2705 * 256 B write block
2706 */
2707 u32 flash_device_type =
2708 (17 << SPI_DEV_TYPE_SIZE_LBN)
2709 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2710 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
2711 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
2712 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2713
2714 falcon_spi_device_init(efx, &efx->spi_flash,
2715 EE_SPI_FLASH, flash_device_type);
2716
2717 if (!boot_is_external) {
2718 /* Disable VPD and set clock dividers to safe
2719 * values for initial programming.
2720 */
2721 EFX_LOG(efx, "Booted from internal ASIC settings;"
2722 " setting SPI config\n");
2723 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
2724 /* 125 MHz / 7 ~= 20 MHz */
2725 EE_SF_CLOCK_DIV, 7,
2726 /* 125 MHz / 63 ~= 2 MHz */
2727 EE_EE_CLOCK_DIV, 63);
2728 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2729 }
2730 }
2731
2732 if (has_eeprom) {
2733 u32 eeprom_device_type;
2734
2735 /* If it has no flash, it must have a large EEPROM
2736 * for chip config; otherwise check whether 9-bit
2737 * addressing is used for VPD configuration
2738 */
2739 if (has_flash &&
2740 (!boot_is_external ||
2741 EFX_OWORD_FIELD(ee_vpd_cfg, EE_VPD_EN_AD9_MODE))) {
2742 /* Default SPI device: Atmel AT25040 or similar
2743 * 512 B, 9-bit address, 8 B write block
2744 */
2745 eeprom_device_type =
2746 (9 << SPI_DEV_TYPE_SIZE_LBN)
2747 | (1 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2748 | (3 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2749 } else {
2750 /* "Large" SPI device: Atmel AT25640 or similar
2751 * 8 KB, 16-bit address, 32 B write block
2752 */
2753 eeprom_device_type =
2754 (13 << SPI_DEV_TYPE_SIZE_LBN)
2755 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2756 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2757 }
2758
2759 falcon_spi_device_init(efx, &efx->spi_eeprom,
2760 EE_SPI_EEPROM, eeprom_device_type);
2761 }
2762
2763 EFX_LOG(efx, "flash is %s, EEPROM is %s\n",
2764 (has_flash ? "present" : "absent"),
2765 (has_eeprom ? "present" : "absent"));
2766}
2767
2420int falcon_probe_nic(struct efx_nic *efx) 2768int falcon_probe_nic(struct efx_nic *efx)
2421{ 2769{
2422 struct falcon_nic_data *nic_data; 2770 struct falcon_nic_data *nic_data;
@@ -2424,6 +2772,8 @@ int falcon_probe_nic(struct efx_nic *efx)
2424 2772
2425 /* Allocate storage for hardware specific data */ 2773 /* Allocate storage for hardware specific data */
2426 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2774 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2775 if (!nic_data)
2776 return -ENOMEM;
2427 efx->nic_data = nic_data; 2777 efx->nic_data = nic_data;
2428 2778
2429 /* Determine number of ports etc. */ 2779 /* Determine number of ports etc. */
@@ -2467,6 +2817,8 @@ int falcon_probe_nic(struct efx_nic *efx)
2467 (unsigned long long)efx->irq_status.dma_addr, 2817 (unsigned long long)efx->irq_status.dma_addr,
2468 efx->irq_status.addr, virt_to_phys(efx->irq_status.addr)); 2818 efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
2469 2819
2820 falcon_probe_spi_devices(efx);
2821
2470 /* Read in the non-volatile configuration */ 2822 /* Read in the non-volatile configuration */
2471 rc = falcon_probe_nvconfig(efx); 2823 rc = falcon_probe_nvconfig(efx);
2472 if (rc) 2824 if (rc)
@@ -2486,6 +2838,7 @@ int falcon_probe_nic(struct efx_nic *efx)
2486 return 0; 2838 return 0;
2487 2839
2488 fail5: 2840 fail5:
2841 falcon_remove_spi_devices(efx);
2489 falcon_free_buffer(efx, &efx->irq_status); 2842 falcon_free_buffer(efx, &efx->irq_status);
2490 fail4: 2843 fail4:
2491 fail3: 2844 fail3:
@@ -2573,19 +2926,14 @@ int falcon_init_nic(struct efx_nic *efx)
2573 EFX_INVERT_OWORD(temp); 2926 EFX_INVERT_OWORD(temp);
2574 falcon_write(efx, &temp, FATAL_INTR_REG_KER); 2927 falcon_write(efx, &temp, FATAL_INTR_REG_KER);
2575 2928
2576 /* Set number of RSS queues for receive path. */
2577 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2578 if (falcon_rev(efx) >= FALCON_REV_B0)
2579 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
2580 else
2581 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
2582 if (EFX_WORKAROUND_7244(efx)) { 2929 if (EFX_WORKAROUND_7244(efx)) {
2930 falcon_read(efx, &temp, RX_FILTER_CTL_REG);
2583 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); 2931 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
2584 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); 2932 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
2585 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); 2933 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
2586 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); 2934 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
2935 falcon_write(efx, &temp, RX_FILTER_CTL_REG);
2587 } 2936 }
2588 falcon_write(efx, &temp, RX_FILTER_CTL_REG);
2589 2937
2590 falcon_setup_rss_indir_table(efx); 2938 falcon_setup_rss_indir_table(efx);
2591 2939
@@ -2641,8 +2989,8 @@ int falcon_init_nic(struct efx_nic *efx)
2641 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh); 2989 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
2642 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256); 2990 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
2643 /* RX control FIFO thresholds [32 entries] */ 2991 /* RX control FIFO thresholds [32 entries] */
2644 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25); 2992 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
2645 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20); 2993 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
2646 falcon_write(efx, &temp, RX_CFG_REG_KER); 2994 falcon_write(efx, &temp, RX_CFG_REG_KER);
2647 2995
2648 /* Set destination of both TX and RX Flush events */ 2996 /* Set destination of both TX and RX Flush events */
@@ -2662,6 +3010,7 @@ void falcon_remove_nic(struct efx_nic *efx)
2662 rc = i2c_del_adapter(&efx->i2c_adap); 3010 rc = i2c_del_adapter(&efx->i2c_adap);
2663 BUG_ON(rc); 3011 BUG_ON(rc);
2664 3012
3013 falcon_remove_spi_devices(efx);
2665 falcon_free_buffer(efx, &efx->irq_status); 3014 falcon_free_buffer(efx, &efx->irq_status);
2666 3015
2667 falcon_reset_hw(efx, RESET_TYPE_ALL); 3016 falcon_reset_hw(efx, RESET_TYPE_ALL);
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 492f9bc28840..be025ba7a6c6 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -40,24 +40,24 @@ extern struct efx_nic_type falcon_b_nic_type;
40 40
41/* TX data path */ 41/* TX data path */
42extern int falcon_probe_tx(struct efx_tx_queue *tx_queue); 42extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
43extern int falcon_init_tx(struct efx_tx_queue *tx_queue); 43extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
44extern void falcon_fini_tx(struct efx_tx_queue *tx_queue); 44extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
45extern void falcon_remove_tx(struct efx_tx_queue *tx_queue); 45extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
46extern void falcon_push_buffers(struct efx_tx_queue *tx_queue); 46extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
47 47
48/* RX data path */ 48/* RX data path */
49extern int falcon_probe_rx(struct efx_rx_queue *rx_queue); 49extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
50extern int falcon_init_rx(struct efx_rx_queue *rx_queue); 50extern void falcon_init_rx(struct efx_rx_queue *rx_queue);
51extern void falcon_fini_rx(struct efx_rx_queue *rx_queue); 51extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
52extern void falcon_remove_rx(struct efx_rx_queue *rx_queue); 52extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
53extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue); 53extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
54 54
55/* Event data path */ 55/* Event data path */
56extern int falcon_probe_eventq(struct efx_channel *channel); 56extern int falcon_probe_eventq(struct efx_channel *channel);
57extern int falcon_init_eventq(struct efx_channel *channel); 57extern void falcon_init_eventq(struct efx_channel *channel);
58extern void falcon_fini_eventq(struct efx_channel *channel); 58extern void falcon_fini_eventq(struct efx_channel *channel);
59extern void falcon_remove_eventq(struct efx_channel *channel); 59extern void falcon_remove_eventq(struct efx_channel *channel);
60extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota); 60extern int falcon_process_eventq(struct efx_channel *channel, int rx_quota);
61extern void falcon_eventq_read_ack(struct efx_channel *channel); 61extern void falcon_eventq_read_ack(struct efx_channel *channel);
62 62
63/* Ports */ 63/* Ports */
@@ -65,7 +65,7 @@ extern int falcon_probe_port(struct efx_nic *efx);
65extern void falcon_remove_port(struct efx_nic *efx); 65extern void falcon_remove_port(struct efx_nic *efx);
66 66
67/* MAC/PHY */ 67/* MAC/PHY */
68extern int falcon_xaui_link_ok(struct efx_nic *efx); 68extern bool falcon_xaui_link_ok(struct efx_nic *efx);
69extern int falcon_dma_stats(struct efx_nic *efx, 69extern int falcon_dma_stats(struct efx_nic *efx,
70 unsigned int done_offset); 70 unsigned int done_offset);
71extern void falcon_drain_tx_fifo(struct efx_nic *efx); 71extern void falcon_drain_tx_fifo(struct efx_nic *efx);
@@ -86,6 +86,7 @@ extern void falcon_fini_interrupt(struct efx_nic *efx);
86extern int falcon_probe_nic(struct efx_nic *efx); 86extern int falcon_probe_nic(struct efx_nic *efx);
87extern int falcon_probe_resources(struct efx_nic *efx); 87extern int falcon_probe_resources(struct efx_nic *efx);
88extern int falcon_init_nic(struct efx_nic *efx); 88extern int falcon_init_nic(struct efx_nic *efx);
89extern int falcon_flush_queues(struct efx_nic *efx);
89extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 90extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
90extern void falcon_remove_resources(struct efx_nic *efx); 91extern void falcon_remove_resources(struct efx_nic *efx);
91extern void falcon_remove_nic(struct efx_nic *efx); 92extern void falcon_remove_nic(struct efx_nic *efx);
@@ -93,6 +94,12 @@ extern void falcon_update_nic_stats(struct efx_nic *efx);
93extern void falcon_set_multicast_hash(struct efx_nic *efx); 94extern void falcon_set_multicast_hash(struct efx_nic *efx);
94extern int falcon_reset_xaui(struct efx_nic *efx); 95extern int falcon_reset_xaui(struct efx_nic *efx);
95 96
97/* Tests */
98struct falcon_nvconfig;
99extern int falcon_read_nvram(struct efx_nic *efx,
100 struct falcon_nvconfig *nvconfig);
101extern int falcon_test_registers(struct efx_nic *efx);
102
96/************************************************************************** 103/**************************************************************************
97 * 104 *
98 * Falcon MAC stats 105 * Falcon MAC stats
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 6d003114eeab..5d584b0dbb51 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -92,6 +92,17 @@
92/* SPI host data register */ 92/* SPI host data register */
93#define EE_SPI_HDATA_REG_KER 0x0120 93#define EE_SPI_HDATA_REG_KER 0x0120
94 94
95/* SPI/VPD config register */
96#define EE_VPD_CFG_REG_KER 0x0140
97#define EE_VPD_EN_LBN 0
98#define EE_VPD_EN_WIDTH 1
99#define EE_VPD_EN_AD9_MODE_LBN 1
100#define EE_VPD_EN_AD9_MODE_WIDTH 1
101#define EE_EE_CLOCK_DIV_LBN 112
102#define EE_EE_CLOCK_DIV_WIDTH 7
103#define EE_SF_CLOCK_DIV_LBN 120
104#define EE_SF_CLOCK_DIV_WIDTH 7
105
95/* PCIE CORE ACCESS REG */ 106/* PCIE CORE ACCESS REG */
96#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68 107#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
97#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70 108#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
@@ -106,7 +117,6 @@
106#define SF_PRST_WIDTH 1 117#define SF_PRST_WIDTH 1
107#define EE_PRST_LBN 8 118#define EE_PRST_LBN 8
108#define EE_PRST_WIDTH 1 119#define EE_PRST_WIDTH 1
109/* See pic_mode_t for decoding of this field */
110/* These bit definitions are extrapolated from the list of numerical 120/* These bit definitions are extrapolated from the list of numerical
111 * values for STRAP_PINS. 121 * values for STRAP_PINS.
112 */ 122 */
@@ -115,6 +125,9 @@
115#define STRAP_PCIE_LBN 0 125#define STRAP_PCIE_LBN 0
116#define STRAP_PCIE_WIDTH 1 126#define STRAP_PCIE_WIDTH 1
117 127
128#define BOOTED_USING_NVDEVICE_LBN 3
129#define BOOTED_USING_NVDEVICE_WIDTH 1
130
118/* GPIO control register */ 131/* GPIO control register */
119#define GPIO_CTL_REG_KER 0x0210 132#define GPIO_CTL_REG_KER 0x0210
120#define GPIO_OUTPUTS_LBN (16) 133#define GPIO_OUTPUTS_LBN (16)
@@ -479,18 +492,8 @@
479#define MAC_MCAST_HASH_REG0_KER 0xca0 492#define MAC_MCAST_HASH_REG0_KER 0xca0
480#define MAC_MCAST_HASH_REG1_KER 0xcb0 493#define MAC_MCAST_HASH_REG1_KER 0xcb0
481 494
482/* GMAC registers */
483#define FALCON_GMAC_REGBANK 0xe00
484#define FALCON_GMAC_REGBANK_SIZE 0x200
485#define FALCON_GMAC_REG_SIZE 0x10
486
487/* XMAC registers */
488#define FALCON_XMAC_REGBANK 0x1200
489#define FALCON_XMAC_REGBANK_SIZE 0x200
490#define FALCON_XMAC_REG_SIZE 0x10
491
492/* XGMAC address register low */ 495/* XGMAC address register low */
493#define XM_ADR_LO_REG_MAC 0x00 496#define XM_ADR_LO_REG 0x1200
494#define XM_ADR_3_LBN 24 497#define XM_ADR_3_LBN 24
495#define XM_ADR_3_WIDTH 8 498#define XM_ADR_3_WIDTH 8
496#define XM_ADR_2_LBN 16 499#define XM_ADR_2_LBN 16
@@ -501,14 +504,14 @@
501#define XM_ADR_0_WIDTH 8 504#define XM_ADR_0_WIDTH 8
502 505
503/* XGMAC address register high */ 506/* XGMAC address register high */
504#define XM_ADR_HI_REG_MAC 0x01 507#define XM_ADR_HI_REG 0x1210
505#define XM_ADR_5_LBN 8 508#define XM_ADR_5_LBN 8
506#define XM_ADR_5_WIDTH 8 509#define XM_ADR_5_WIDTH 8
507#define XM_ADR_4_LBN 0 510#define XM_ADR_4_LBN 0
508#define XM_ADR_4_WIDTH 8 511#define XM_ADR_4_WIDTH 8
509 512
510/* XGMAC global configuration */ 513/* XGMAC global configuration */
511#define XM_GLB_CFG_REG_MAC 0x02 514#define XM_GLB_CFG_REG 0x1220
512#define XM_RX_STAT_EN_LBN 11 515#define XM_RX_STAT_EN_LBN 11
513#define XM_RX_STAT_EN_WIDTH 1 516#define XM_RX_STAT_EN_WIDTH 1
514#define XM_TX_STAT_EN_LBN 10 517#define XM_TX_STAT_EN_LBN 10
@@ -521,7 +524,7 @@
521#define XM_CORE_RST_WIDTH 1 524#define XM_CORE_RST_WIDTH 1
522 525
523/* XGMAC transmit configuration */ 526/* XGMAC transmit configuration */
524#define XM_TX_CFG_REG_MAC 0x03 527#define XM_TX_CFG_REG 0x1230
525#define XM_IPG_LBN 16 528#define XM_IPG_LBN 16
526#define XM_IPG_WIDTH 4 529#define XM_IPG_WIDTH 4
527#define XM_FCNTL_LBN 10 530#define XM_FCNTL_LBN 10
@@ -536,7 +539,7 @@
536#define XM_TXEN_WIDTH 1 539#define XM_TXEN_WIDTH 1
537 540
538/* XGMAC receive configuration */ 541/* XGMAC receive configuration */
539#define XM_RX_CFG_REG_MAC 0x04 542#define XM_RX_CFG_REG 0x1240
540#define XM_PASS_CRC_ERR_LBN 25 543#define XM_PASS_CRC_ERR_LBN 25
541#define XM_PASS_CRC_ERR_WIDTH 1 544#define XM_PASS_CRC_ERR_WIDTH 1
542#define XM_ACPT_ALL_MCAST_LBN 11 545#define XM_ACPT_ALL_MCAST_LBN 11
@@ -549,7 +552,7 @@
549#define XM_RXEN_WIDTH 1 552#define XM_RXEN_WIDTH 1
550 553
551/* XGMAC management interrupt mask register */ 554/* XGMAC management interrupt mask register */
552#define XM_MGT_INT_MSK_REG_MAC_B0 0x5 555#define XM_MGT_INT_MSK_REG_B0 0x1250
553#define XM_MSK_PRMBLE_ERR_LBN 2 556#define XM_MSK_PRMBLE_ERR_LBN 2
554#define XM_MSK_PRMBLE_ERR_WIDTH 1 557#define XM_MSK_PRMBLE_ERR_WIDTH 1
555#define XM_MSK_RMTFLT_LBN 1 558#define XM_MSK_RMTFLT_LBN 1
@@ -558,29 +561,29 @@
558#define XM_MSK_LCLFLT_WIDTH 1 561#define XM_MSK_LCLFLT_WIDTH 1
559 562
560/* XGMAC flow control register */ 563/* XGMAC flow control register */
561#define XM_FC_REG_MAC 0x7 564#define XM_FC_REG 0x1270
562#define XM_PAUSE_TIME_LBN 16 565#define XM_PAUSE_TIME_LBN 16
563#define XM_PAUSE_TIME_WIDTH 16 566#define XM_PAUSE_TIME_WIDTH 16
564#define XM_DIS_FCNTL_LBN 0 567#define XM_DIS_FCNTL_LBN 0
565#define XM_DIS_FCNTL_WIDTH 1 568#define XM_DIS_FCNTL_WIDTH 1
566 569
567/* XGMAC pause time count register */ 570/* XGMAC pause time count register */
568#define XM_PAUSE_TIME_REG_MAC 0x9 571#define XM_PAUSE_TIME_REG 0x1290
569 572
570/* XGMAC transmit parameter register */ 573/* XGMAC transmit parameter register */
571#define XM_TX_PARAM_REG_MAC 0x0d 574#define XM_TX_PARAM_REG 0x012d0
572#define XM_TX_JUMBO_MODE_LBN 31 575#define XM_TX_JUMBO_MODE_LBN 31
573#define XM_TX_JUMBO_MODE_WIDTH 1 576#define XM_TX_JUMBO_MODE_WIDTH 1
574#define XM_MAX_TX_FRM_SIZE_LBN 16 577#define XM_MAX_TX_FRM_SIZE_LBN 16
575#define XM_MAX_TX_FRM_SIZE_WIDTH 14 578#define XM_MAX_TX_FRM_SIZE_WIDTH 14
576 579
577/* XGMAC receive parameter register */ 580/* XGMAC receive parameter register */
578#define XM_RX_PARAM_REG_MAC 0x0e 581#define XM_RX_PARAM_REG 0x12e0
579#define XM_MAX_RX_FRM_SIZE_LBN 0 582#define XM_MAX_RX_FRM_SIZE_LBN 0
580#define XM_MAX_RX_FRM_SIZE_WIDTH 14 583#define XM_MAX_RX_FRM_SIZE_WIDTH 14
581 584
582/* XGMAC management interrupt status register */ 585/* XGMAC management interrupt status register */
583#define XM_MGT_INT_REG_MAC_B0 0x0f 586#define XM_MGT_INT_REG_B0 0x12f0
584#define XM_PRMBLE_ERR 2 587#define XM_PRMBLE_ERR 2
585#define XM_PRMBLE_WIDTH 1 588#define XM_PRMBLE_WIDTH 1
586#define XM_RMTFLT_LBN 1 589#define XM_RMTFLT_LBN 1
@@ -589,7 +592,7 @@
589#define XM_LCLFLT_WIDTH 1 592#define XM_LCLFLT_WIDTH 1
590 593
591/* XGXS/XAUI powerdown/reset register */ 594/* XGXS/XAUI powerdown/reset register */
592#define XX_PWR_RST_REG_MAC 0x10 595#define XX_PWR_RST_REG 0x1300
593 596
594#define XX_PWRDND_EN_LBN 15 597#define XX_PWRDND_EN_LBN 15
595#define XX_PWRDND_EN_WIDTH 1 598#define XX_PWRDND_EN_WIDTH 1
@@ -619,7 +622,7 @@
619#define XX_RST_XX_EN_WIDTH 1 622#define XX_RST_XX_EN_WIDTH 1
620 623
621/* XGXS/XAUI powerdown/reset control register */ 624/* XGXS/XAUI powerdown/reset control register */
622#define XX_SD_CTL_REG_MAC 0x11 625#define XX_SD_CTL_REG 0x1310
623#define XX_HIDRVD_LBN 15 626#define XX_HIDRVD_LBN 15
624#define XX_HIDRVD_WIDTH 1 627#define XX_HIDRVD_WIDTH 1
625#define XX_LODRVD_LBN 14 628#define XX_LODRVD_LBN 14
@@ -645,7 +648,7 @@
645#define XX_LPBKA_LBN 0 648#define XX_LPBKA_LBN 0
646#define XX_LPBKA_WIDTH 1 649#define XX_LPBKA_WIDTH 1
647 650
648#define XX_TXDRV_CTL_REG_MAC 0x12 651#define XX_TXDRV_CTL_REG 0x1320
649#define XX_DEQD_LBN 28 652#define XX_DEQD_LBN 28
650#define XX_DEQD_WIDTH 4 653#define XX_DEQD_WIDTH 4
651#define XX_DEQC_LBN 24 654#define XX_DEQC_LBN 24
@@ -664,7 +667,7 @@
664#define XX_DTXA_WIDTH 4 667#define XX_DTXA_WIDTH 4
665 668
666/* XAUI XGXS core status register */ 669/* XAUI XGXS core status register */
667#define XX_CORE_STAT_REG_MAC 0x16 670#define XX_CORE_STAT_REG 0x1360
668#define XX_FORCE_SIG_LBN 24 671#define XX_FORCE_SIG_LBN 24
669#define XX_FORCE_SIG_WIDTH 8 672#define XX_FORCE_SIG_WIDTH 8
670#define XX_FORCE_SIG_DECODE_FORCED 0xff 673#define XX_FORCE_SIG_DECODE_FORCED 0xff
@@ -1127,7 +1130,28 @@ struct falcon_nvconfig_board_v2 {
1127 __le16 board_revision; 1130 __le16 board_revision;
1128} __packed; 1131} __packed;
1129 1132
1130#define NVCONFIG_BASE 0x300 1133/* Board configuration v3 extra information */
1134struct falcon_nvconfig_board_v3 {
1135 __le32 spi_device_type[2];
1136} __packed;
1137
1138/* Bit numbers for spi_device_type */
1139#define SPI_DEV_TYPE_SIZE_LBN 0
1140#define SPI_DEV_TYPE_SIZE_WIDTH 5
1141#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
1142#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
1143#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
1144#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
1145#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
1146#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
1147#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
1148#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
1149#define SPI_DEV_TYPE_FIELD(type, field) \
1150 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
1151
1152#define NVCONFIG_OFFSET 0x300
1153#define NVCONFIG_END 0x400
1154
1131#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C 1155#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1132struct falcon_nvconfig { 1156struct falcon_nvconfig {
1133 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */ 1157 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
@@ -1144,6 +1168,8 @@ struct falcon_nvconfig {
1144 __le16 board_struct_ver; 1168 __le16 board_struct_ver;
1145 __le16 board_checksum; 1169 __le16 board_checksum;
1146 struct falcon_nvconfig_board_v2 board_v2; 1170 struct falcon_nvconfig_board_v2 board_v2;
1171 efx_oword_t ee_base_page_reg; /* 0x3B0 */
1172 struct falcon_nvconfig_board_v3 board_v3;
1147} __packed; 1173} __packed;
1148 1174
1149#endif /* EFX_FALCON_HWDEFS_H */ 1175#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
index 6670cdfc41ab..c16da3149fa9 100644
--- a/drivers/net/sfc/falcon_io.h
+++ b/drivers/net/sfc/falcon_io.h
@@ -13,7 +13,6 @@
13 13
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include "net_driver.h"
17 16
18/************************************************************************** 17/**************************************************************************
19 * 18 *
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 55c0d9760be8..d4012314dd01 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -23,56 +23,24 @@
23 23
24/************************************************************************** 24/**************************************************************************
25 * 25 *
26 * MAC register access
27 *
28 **************************************************************************/
29
30/* Offset of an XMAC register within Falcon */
31#define FALCON_XMAC_REG(mac_reg) \
32 (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
33
34void falcon_xmac_writel(struct efx_nic *efx,
35 efx_dword_t *value, unsigned int mac_reg)
36{
37 efx_oword_t temp;
38
39 EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
40 falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg));
41}
42
43void falcon_xmac_readl(struct efx_nic *efx,
44 efx_dword_t *value, unsigned int mac_reg)
45{
46 efx_oword_t temp;
47
48 falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg));
49 EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
50}
51
52/**************************************************************************
53 *
54 * MAC operations 26 * MAC operations
55 * 27 *
56 *************************************************************************/ 28 *************************************************************************/
57static int falcon_reset_xmac(struct efx_nic *efx) 29static int falcon_reset_xmac(struct efx_nic *efx)
58{ 30{
59 efx_dword_t reg; 31 efx_oword_t reg;
60 int count; 32 int count;
61 33
62 EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1); 34 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
63 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC); 35 falcon_write(efx, &reg, XM_GLB_CFG_REG);
64 36
65 for (count = 0; count < 10000; count++) { /* wait upto 100ms */ 37 for (count = 0; count < 10000; count++) { /* wait upto 100ms */
66 falcon_xmac_readl(efx, &reg, XM_GLB_CFG_REG_MAC); 38 falcon_read(efx, &reg, XM_GLB_CFG_REG);
67 if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0) 39 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
68 return 0; 40 return 0;
69 udelay(10); 41 udelay(10);
70 } 42 }
71 43
72 /* This often fails when DSP is disabled, ignore it */
73 if (sfe4001_phy_flash_cfg != 0)
74 return 0;
75
76 EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); 44 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
77 return -ETIMEDOUT; 45 return -ETIMEDOUT;
78} 46}
@@ -80,25 +48,25 @@ static int falcon_reset_xmac(struct efx_nic *efx)
80/* Configure the XAUI driver that is an output from Falcon */ 48/* Configure the XAUI driver that is an output from Falcon */
81static void falcon_setup_xaui(struct efx_nic *efx) 49static void falcon_setup_xaui(struct efx_nic *efx)
82{ 50{
83 efx_dword_t sdctl, txdrv; 51 efx_oword_t sdctl, txdrv;
84 52
85 /* Move the XAUI into low power, unless there is no PHY, in 53 /* Move the XAUI into low power, unless there is no PHY, in
86 * which case the XAUI will have to drive a cable. */ 54 * which case the XAUI will have to drive a cable. */
87 if (efx->phy_type == PHY_TYPE_NONE) 55 if (efx->phy_type == PHY_TYPE_NONE)
88 return; 56 return;
89 57
90 falcon_xmac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC); 58 falcon_read(efx, &sdctl, XX_SD_CTL_REG);
91 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT); 59 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
92 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT); 60 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
93 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT); 61 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
94 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT); 62 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
95 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT); 63 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
96 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT); 64 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
97 EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT); 65 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
98 EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT); 66 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
99 falcon_xmac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC); 67 falcon_write(efx, &sdctl, XX_SD_CTL_REG);
100 68
101 EFX_POPULATE_DWORD_8(txdrv, 69 EFX_POPULATE_OWORD_8(txdrv,
102 XX_DEQD, XX_TXDRV_DEQ_DEFAULT, 70 XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
103 XX_DEQC, XX_TXDRV_DEQ_DEFAULT, 71 XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
104 XX_DEQB, XX_TXDRV_DEQ_DEFAULT, 72 XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
@@ -107,93 +75,21 @@ static void falcon_setup_xaui(struct efx_nic *efx)
107 XX_DTXC, XX_TXDRV_DTX_DEFAULT, 75 XX_DTXC, XX_TXDRV_DTX_DEFAULT,
108 XX_DTXB, XX_TXDRV_DTX_DEFAULT, 76 XX_DTXB, XX_TXDRV_DTX_DEFAULT,
109 XX_DTXA, XX_TXDRV_DTX_DEFAULT); 77 XX_DTXA, XX_TXDRV_DTX_DEFAULT);
110 falcon_xmac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC); 78 falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG);
111} 79}
112 80
113static void falcon_hold_xaui_in_rst(struct efx_nic *efx) 81int falcon_reset_xaui(struct efx_nic *efx)
114{
115 efx_dword_t reg;
116
117 EFX_ZERO_DWORD(reg);
118 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1);
119 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1);
120 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1);
121 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1);
122 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
123 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
124 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1);
125 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1);
126 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1);
127 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1);
128 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
129 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
130 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
131 udelay(10);
132}
133
134static int _falcon_reset_xaui_a(struct efx_nic *efx)
135{
136 efx_dword_t reg;
137
138 falcon_hold_xaui_in_rst(efx);
139 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
140
141 /* Follow the RAMBUS XAUI data reset sequencing
142 * Channels A and B first: power down, reset PLL, reset, clear
143 */
144 EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0);
145 EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0);
146 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
147 udelay(10);
148
149 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
150 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
151 udelay(10);
152
153 EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0);
154 EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0);
155 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
156 udelay(10);
157
158 /* Channels C and D: power down, reset PLL, reset, clear */
159 EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0);
160 EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0);
161 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
162 udelay(10);
163
164 EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
165 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
166 udelay(10);
167
168 EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0);
169 EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0);
170 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
171 udelay(10);
172
173 /* Setup XAUI */
174 falcon_setup_xaui(efx);
175 udelay(10);
176
177 /* Take XGXS out of reset */
178 EFX_ZERO_DWORD(reg);
179 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
180 udelay(10);
181
182 return 0;
183}
184
185static int _falcon_reset_xaui_b(struct efx_nic *efx)
186{ 82{
187 efx_dword_t reg; 83 efx_oword_t reg;
188 int count; 84 int count;
189 85
190 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); 86 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
191 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC); 87 falcon_write(efx, &reg, XX_PWR_RST_REG);
192 88
193 /* Give some time for the link to establish */ 89 /* Give some time for the link to establish */
194 for (count = 0; count < 1000; count++) { /* wait upto 10ms */ 90 for (count = 0; count < 1000; count++) { /* wait upto 10ms */
195 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC); 91 falcon_read(efx, &reg, XX_PWR_RST_REG);
196 if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) { 92 if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
197 falcon_setup_xaui(efx); 93 falcon_setup_xaui(efx);
198 return 0; 94 return 0;
199 } 95 }
@@ -203,55 +99,41 @@ static int _falcon_reset_xaui_b(struct efx_nic *efx)
203 return -ETIMEDOUT; 99 return -ETIMEDOUT;
204} 100}
205 101
206int falcon_reset_xaui(struct efx_nic *efx) 102static bool falcon_xgmii_status(struct efx_nic *efx)
207{ 103{
208 int rc; 104 efx_oword_t reg;
209
210 if (EFX_WORKAROUND_9388(efx)) {
211 falcon_hold_xaui_in_rst(efx);
212 efx->phy_op->reset_xaui(efx);
213 rc = _falcon_reset_xaui_a(efx);
214 } else {
215 rc = _falcon_reset_xaui_b(efx);
216 }
217 return rc;
218}
219
220static int falcon_xgmii_status(struct efx_nic *efx)
221{
222 efx_dword_t reg;
223 105
224 if (falcon_rev(efx) < FALCON_REV_B0) 106 if (falcon_rev(efx) < FALCON_REV_B0)
225 return 1; 107 return true;
226 108
227 /* The ISR latches, so clear it and re-read */ 109 /* The ISR latches, so clear it and re-read */
228 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 110 falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
229 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 111 falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
230 112
231 if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || 113 if (EFX_OWORD_FIELD(reg, XM_LCLFLT) ||
232 EFX_DWORD_FIELD(reg, XM_RMTFLT)) { 114 EFX_OWORD_FIELD(reg, XM_RMTFLT)) {
233 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); 115 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
234 return 0; 116 return false;
235 } 117 }
236 118
237 return 1; 119 return true;
238} 120}
239 121
240static void falcon_mask_status_intr(struct efx_nic *efx, int enable) 122static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
241{ 123{
242 efx_dword_t reg; 124 efx_oword_t reg;
243 125
244 if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 126 if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
245 return; 127 return;
246 128
247 /* Flush the ISR */ 129 /* Flush the ISR */
248 if (enable) 130 if (enable)
249 falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0); 131 falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
250 132
251 EFX_POPULATE_DWORD_2(reg, 133 EFX_POPULATE_OWORD_2(reg,
252 XM_MSK_RMTFLT, !enable, 134 XM_MSK_RMTFLT, !enable,
253 XM_MSK_LCLFLT, !enable); 135 XM_MSK_LCLFLT, !enable);
254 falcon_xmac_writel(efx, &reg, XM_MGT_INT_MSK_REG_MAC_B0); 136 falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0);
255} 137}
256 138
257int falcon_init_xmac(struct efx_nic *efx) 139int falcon_init_xmac(struct efx_nic *efx)
@@ -274,7 +156,7 @@ int falcon_init_xmac(struct efx_nic *efx)
274 if (rc) 156 if (rc)
275 goto fail2; 157 goto fail2;
276 158
277 falcon_mask_status_intr(efx, 1); 159 falcon_mask_status_intr(efx, true);
278 return 0; 160 return 0;
279 161
280 fail2: 162 fail2:
@@ -283,34 +165,34 @@ int falcon_init_xmac(struct efx_nic *efx)
283 return rc; 165 return rc;
284} 166}
285 167
286int falcon_xaui_link_ok(struct efx_nic *efx) 168bool falcon_xaui_link_ok(struct efx_nic *efx)
287{ 169{
288 efx_dword_t reg; 170 efx_oword_t reg;
289 int align_done, sync_status, link_ok = 0; 171 bool align_done, link_ok = false;
172 int sync_status;
290 173
291 if (LOOPBACK_INTERNAL(efx)) 174 if (LOOPBACK_INTERNAL(efx))
292 return 1; 175 return true;
293 176
294 /* Read link status */ 177 /* Read link status */
295 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 178 falcon_read(efx, &reg, XX_CORE_STAT_REG);
296 179
297 align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE); 180 align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE);
298 sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT); 181 sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT);
299 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED)) 182 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
300 link_ok = 1; 183 link_ok = true;
301 184
302 /* Clear link status ready for next read */ 185 /* Clear link status ready for next read */
303 EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET); 186 EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
304 EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET); 187 EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
305 EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); 188 EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
306 falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC); 189 falcon_write(efx, &reg, XX_CORE_STAT_REG);
307 190
308 /* If the link is up, then check the phy side of the xaui link 191 /* If the link is up, then check the phy side of the xaui link
309 * (error conditions from the wire side propoagate back through 192 * (error conditions from the wire side propoagate back through
310 * the phy to the xaui side). */ 193 * the phy to the xaui side). */
311 if (efx->link_up && link_ok) { 194 if (efx->link_up && link_ok) {
312 int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS); 195 if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS))
313 if (has_phyxs)
314 link_ok = mdio_clause45_phyxgxs_lane_sync(efx); 196 link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
315 } 197 }
316 198
@@ -325,15 +207,15 @@ int falcon_xaui_link_ok(struct efx_nic *efx)
325static void falcon_reconfigure_xmac_core(struct efx_nic *efx) 207static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
326{ 208{
327 unsigned int max_frame_len; 209 unsigned int max_frame_len;
328 efx_dword_t reg; 210 efx_oword_t reg;
329 int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0; 211 bool rx_fc = !!(efx->flow_control & EFX_FC_RX);
330 212
331 /* Configure MAC - cut-thru mode is hard wired on */ 213 /* Configure MAC - cut-thru mode is hard wired on */
332 EFX_POPULATE_DWORD_3(reg, 214 EFX_POPULATE_DWORD_3(reg,
333 XM_RX_JUMBO_MODE, 1, 215 XM_RX_JUMBO_MODE, 1,
334 XM_TX_STAT_EN, 1, 216 XM_TX_STAT_EN, 1,
335 XM_RX_STAT_EN, 1); 217 XM_RX_STAT_EN, 1);
336 falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC); 218 falcon_write(efx, &reg, XM_GLB_CFG_REG);
337 219
338 /* Configure TX */ 220 /* Configure TX */
339 EFX_POPULATE_DWORD_6(reg, 221 EFX_POPULATE_DWORD_6(reg,
@@ -343,7 +225,7 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
343 XM_TXCRC, 1, 225 XM_TXCRC, 1,
344 XM_FCNTL, 1, 226 XM_FCNTL, 1,
345 XM_IPG, 0x3); 227 XM_IPG, 0x3);
346 falcon_xmac_writel(efx, &reg, XM_TX_CFG_REG_MAC); 228 falcon_write(efx, &reg, XM_TX_CFG_REG);
347 229
348 /* Configure RX */ 230 /* Configure RX */
349 EFX_POPULATE_DWORD_5(reg, 231 EFX_POPULATE_DWORD_5(reg,
@@ -352,21 +234,21 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
352 XM_ACPT_ALL_MCAST, 1, 234 XM_ACPT_ALL_MCAST, 1,
353 XM_ACPT_ALL_UCAST, efx->promiscuous, 235 XM_ACPT_ALL_UCAST, efx->promiscuous,
354 XM_PASS_CRC_ERR, 1); 236 XM_PASS_CRC_ERR, 1);
355 falcon_xmac_writel(efx, &reg, XM_RX_CFG_REG_MAC); 237 falcon_write(efx, &reg, XM_RX_CFG_REG);
356 238
357 /* Set frame length */ 239 /* Set frame length */
358 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 240 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
359 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len); 241 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
360 falcon_xmac_writel(efx, &reg, XM_RX_PARAM_REG_MAC); 242 falcon_write(efx, &reg, XM_RX_PARAM_REG);
361 EFX_POPULATE_DWORD_2(reg, 243 EFX_POPULATE_DWORD_2(reg,
362 XM_MAX_TX_FRM_SIZE, max_frame_len, 244 XM_MAX_TX_FRM_SIZE, max_frame_len,
363 XM_TX_JUMBO_MODE, 1); 245 XM_TX_JUMBO_MODE, 1);
364 falcon_xmac_writel(efx, &reg, XM_TX_PARAM_REG_MAC); 246 falcon_write(efx, &reg, XM_TX_PARAM_REG);
365 247
366 EFX_POPULATE_DWORD_2(reg, 248 EFX_POPULATE_DWORD_2(reg,
367 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ 249 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
368 XM_DIS_FCNTL, rx_fc ? 0 : 1); 250 XM_DIS_FCNTL, !rx_fc);
369 falcon_xmac_writel(efx, &reg, XM_FC_REG_MAC); 251 falcon_write(efx, &reg, XM_FC_REG);
370 252
371 /* Set MAC address */ 253 /* Set MAC address */
372 EFX_POPULATE_DWORD_4(reg, 254 EFX_POPULATE_DWORD_4(reg,
@@ -374,83 +256,75 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
374 XM_ADR_1, efx->net_dev->dev_addr[1], 256 XM_ADR_1, efx->net_dev->dev_addr[1],
375 XM_ADR_2, efx->net_dev->dev_addr[2], 257 XM_ADR_2, efx->net_dev->dev_addr[2],
376 XM_ADR_3, efx->net_dev->dev_addr[3]); 258 XM_ADR_3, efx->net_dev->dev_addr[3]);
377 falcon_xmac_writel(efx, &reg, XM_ADR_LO_REG_MAC); 259 falcon_write(efx, &reg, XM_ADR_LO_REG);
378 EFX_POPULATE_DWORD_2(reg, 260 EFX_POPULATE_DWORD_2(reg,
379 XM_ADR_4, efx->net_dev->dev_addr[4], 261 XM_ADR_4, efx->net_dev->dev_addr[4],
380 XM_ADR_5, efx->net_dev->dev_addr[5]); 262 XM_ADR_5, efx->net_dev->dev_addr[5]);
381 falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC); 263 falcon_write(efx, &reg, XM_ADR_HI_REG);
382} 264}
383 265
384static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) 266static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
385{ 267{
386 efx_dword_t reg; 268 efx_oword_t reg;
387 int xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS) ? 1 : 0; 269 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
388 int xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI) ? 1 : 0; 270 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
389 int xgmii_loopback = 271 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
390 (efx->loopback_mode == LOOPBACK_XGMII) ? 1 : 0;
391 272
392 /* XGXS block is flaky and will need to be reset if moving 273 /* XGXS block is flaky and will need to be reset if moving
393 * into our out of XGMII, XGXS or XAUI loopbacks. */ 274 * into our out of XGMII, XGXS or XAUI loopbacks. */
394 if (EFX_WORKAROUND_5147(efx)) { 275 if (EFX_WORKAROUND_5147(efx)) {
395 int old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; 276 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
396 int reset_xgxs; 277 bool reset_xgxs;
397 278
398 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 279 falcon_read(efx, &reg, XX_CORE_STAT_REG);
399 old_xgxs_loopback = EFX_DWORD_FIELD(reg, XX_XGXS_LB_EN); 280 old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN);
400 old_xgmii_loopback = EFX_DWORD_FIELD(reg, XX_XGMII_LB_EN); 281 old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN);
401 282
402 falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC); 283 falcon_read(efx, &reg, XX_SD_CTL_REG);
403 old_xaui_loopback = EFX_DWORD_FIELD(reg, XX_LPBKA); 284 old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA);
404 285
405 /* The PHY driver may have turned XAUI off */ 286 /* The PHY driver may have turned XAUI off */
406 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || 287 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
407 (xaui_loopback != old_xaui_loopback) || 288 (xaui_loopback != old_xaui_loopback) ||
408 (xgmii_loopback != old_xgmii_loopback)); 289 (xgmii_loopback != old_xgmii_loopback));
409 if (reset_xgxs) { 290
410 falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC); 291 if (reset_xgxs)
411 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); 292 falcon_reset_xaui(efx);
412 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
413 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
414 udelay(1);
415 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 0);
416 EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 0);
417 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
418 udelay(1);
419 }
420 } 293 }
421 294
422 falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC); 295 falcon_read(efx, &reg, XX_CORE_STAT_REG);
423 EFX_SET_DWORD_FIELD(reg, XX_FORCE_SIG, 296 EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG,
424 (xgxs_loopback || xaui_loopback) ? 297 (xgxs_loopback || xaui_loopback) ?
425 XX_FORCE_SIG_DECODE_FORCED : 0); 298 XX_FORCE_SIG_DECODE_FORCED : 0);
426 EFX_SET_DWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); 299 EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback);
427 EFX_SET_DWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); 300 EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback);
428 falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC); 301 falcon_write(efx, &reg, XX_CORE_STAT_REG);
429 302
430 falcon_xmac_readl(efx, &reg, XX_SD_CTL_REG_MAC); 303 falcon_read(efx, &reg, XX_SD_CTL_REG);
431 EFX_SET_DWORD_FIELD(reg, XX_LPBKD, xaui_loopback); 304 EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback);
432 EFX_SET_DWORD_FIELD(reg, XX_LPBKC, xaui_loopback); 305 EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback);
433 EFX_SET_DWORD_FIELD(reg, XX_LPBKB, xaui_loopback); 306 EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback);
434 EFX_SET_DWORD_FIELD(reg, XX_LPBKA, xaui_loopback); 307 EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback);
435 falcon_xmac_writel(efx, &reg, XX_SD_CTL_REG_MAC); 308 falcon_write(efx, &reg, XX_SD_CTL_REG);
436} 309}
437 310
438 311
439/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails 312/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
440 * to come back up. Bash it until it comes back up */ 313 * to come back up. Bash it until it comes back up */
441static int falcon_check_xaui_link_up(struct efx_nic *efx) 314static bool falcon_check_xaui_link_up(struct efx_nic *efx)
442{ 315{
443 int max_tries, tries; 316 int max_tries, tries;
444 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; 317 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
445 max_tries = tries; 318 max_tries = tries;
446 319
447 if ((efx->loopback_mode == LOOPBACK_NETWORK) || 320 if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
448 (efx->phy_type == PHY_TYPE_NONE)) 321 (efx->phy_type == PHY_TYPE_NONE) ||
449 return 0; 322 efx_phy_mode_disabled(efx->phy_mode))
323 return false;
450 324
451 while (tries) { 325 while (tries) {
452 if (falcon_xaui_link_ok(efx)) 326 if (falcon_xaui_link_ok(efx))
453 return 1; 327 return true;
454 328
455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", 329 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
456 __func__, tries); 330 __func__, tries);
@@ -461,18 +335,22 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
461 335
462 EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n", 336 EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n",
463 max_tries); 337 max_tries);
464 return 0; 338 return false;
465} 339}
466 340
467void falcon_reconfigure_xmac(struct efx_nic *efx) 341void falcon_reconfigure_xmac(struct efx_nic *efx)
468{ 342{
469 int xaui_link_ok; 343 bool xaui_link_ok;
470 344
471 falcon_mask_status_intr(efx, 0); 345 falcon_mask_status_intr(efx, false);
472 346
473 falcon_deconfigure_mac_wrapper(efx); 347 falcon_deconfigure_mac_wrapper(efx);
474 348
475 efx->tx_disabled = LOOPBACK_INTERNAL(efx); 349 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
350 if (LOOPBACK_INTERNAL(efx))
351 efx->phy_mode |= PHY_MODE_TX_DISABLED;
352 else
353 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
476 efx->phy_op->reconfigure(efx); 354 efx->phy_op->reconfigure(efx);
477 355
478 falcon_reconfigure_xgxs_core(efx); 356 falcon_reconfigure_xgxs_core(efx);
@@ -484,7 +362,7 @@ void falcon_reconfigure_xmac(struct efx_nic *efx)
484 xaui_link_ok = falcon_check_xaui_link_up(efx); 362 xaui_link_ok = falcon_check_xaui_link_up(efx);
485 363
486 if (xaui_link_ok && efx->link_up) 364 if (xaui_link_ok && efx->link_up)
487 falcon_mask_status_intr(efx, 1); 365 falcon_mask_status_intr(efx, true);
488} 366}
489 367
490void falcon_fini_xmac(struct efx_nic *efx) 368void falcon_fini_xmac(struct efx_nic *efx)
@@ -554,21 +432,23 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
554 432
555 /* Update derived statistics */ 433 /* Update derived statistics */
556 mac_stats->tx_good_bytes = 434 mac_stats->tx_good_bytes =
557 (mac_stats->tx_bytes - mac_stats->tx_bad_bytes); 435 (mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
436 mac_stats->tx_control * 64);
558 mac_stats->rx_bad_bytes = 437 mac_stats->rx_bad_bytes =
559 (mac_stats->rx_bytes - mac_stats->rx_good_bytes); 438 (mac_stats->rx_bytes - mac_stats->rx_good_bytes -
439 mac_stats->rx_control * 64);
560} 440}
561 441
562int falcon_check_xmac(struct efx_nic *efx) 442int falcon_check_xmac(struct efx_nic *efx)
563{ 443{
564 unsigned xaui_link_ok; 444 bool xaui_link_ok;
565 int rc; 445 int rc;
566 446
567 if ((efx->loopback_mode == LOOPBACK_NETWORK) || 447 if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
568 (efx->phy_type == PHY_TYPE_NONE)) 448 efx_phy_mode_disabled(efx->phy_mode))
569 return 0; 449 return 0;
570 450
571 falcon_mask_status_intr(efx, 0); 451 falcon_mask_status_intr(efx, false);
572 xaui_link_ok = falcon_xaui_link_ok(efx); 452 xaui_link_ok = falcon_xaui_link_ok(efx);
573 453
574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) 454 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
@@ -579,7 +459,7 @@ int falcon_check_xmac(struct efx_nic *efx)
579 459
580 /* Unmask interrupt if everything was (and still is) ok */ 460 /* Unmask interrupt if everything was (and still is) ok */
581 if (xaui_link_ok && efx->link_up) 461 if (xaui_link_ok && efx->link_up)
582 falcon_mask_status_intr(efx, 1); 462 falcon_mask_status_intr(efx, true);
583 463
584 return rc; 464 return rc;
585} 465}
@@ -620,7 +500,7 @@ int falcon_xmac_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
620 500
621int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) 501int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
622{ 502{
623 int reset; 503 bool reset;
624 504
625 if (flow_control & EFX_FC_AUTO) { 505 if (flow_control & EFX_FC_AUTO) {
626 EFX_LOG(efx, "10G does not support flow control " 506 EFX_LOG(efx, "10G does not support flow control "
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index edd07d4dee18..a31571c69137 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,10 +13,6 @@
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15 15
16extern void falcon_xmac_writel(struct efx_nic *efx,
17 efx_dword_t *value, unsigned int mac_reg);
18extern void falcon_xmac_readl(struct efx_nic *efx,
19 efx_dword_t *value, unsigned int mac_reg);
20extern int falcon_init_xmac(struct efx_nic *efx); 16extern int falcon_init_xmac(struct efx_nic *efx);
21extern void falcon_reconfigure_xmac(struct efx_nic *efx); 17extern void falcon_reconfigure_xmac(struct efx_nic *efx);
22extern void falcon_update_stats_xmac(struct efx_nic *efx); 18extern void falcon_update_stats_xmac(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index c4f540e93b79..003e48dcb2f3 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -159,20 +159,21 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
159 return 0; 159 return 0;
160} 160}
161 161
162int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) 162bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
163{ 163{
164 int phy_id = efx->mii.phy_id; 164 int phy_id = efx->mii.phy_id;
165 int status; 165 int status;
166 int ok = 1; 166 bool ok = true;
167 int mmd = 0; 167 int mmd = 0;
168 int good;
169 168
170 /* If the port is in loopback, then we should only consider a subset 169 /* If the port is in loopback, then we should only consider a subset
171 * of mmd's */ 170 * of mmd's */
172 if (LOOPBACK_INTERNAL(efx)) 171 if (LOOPBACK_INTERNAL(efx))
173 return 1; 172 return true;
174 else if (efx->loopback_mode == LOOPBACK_NETWORK) 173 else if (efx->loopback_mode == LOOPBACK_NETWORK)
175 return 0; 174 return false;
175 else if (efx_phy_mode_disabled(efx->phy_mode))
176 return false;
176 else if (efx->loopback_mode == LOOPBACK_PHYXS) 177 else if (efx->loopback_mode == LOOPBACK_PHYXS)
177 mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS | 178 mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS |
178 MDIO_MMDREG_DEVS0_PCS | 179 MDIO_MMDREG_DEVS0_PCS |
@@ -192,8 +193,7 @@ int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
192 status = mdio_clause45_read(efx, phy_id, 193 status = mdio_clause45_read(efx, phy_id,
193 mmd, MDIO_MMDREG_STAT1); 194 mmd, MDIO_MMDREG_STAT1);
194 195
195 good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN); 196 ok = ok && (status & (1 << MDIO_MMDREG_STAT1_LINK_LBN));
196 ok = ok && good;
197 } 197 }
198 mmd_mask = (mmd_mask >> 1); 198 mmd_mask = (mmd_mask >> 1);
199 mmd++; 199 mmd++;
@@ -208,7 +208,7 @@ void mdio_clause45_transmit_disable(struct efx_nic *efx)
208 208
209 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, 209 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
210 MDIO_MMDREG_TXDIS); 210 MDIO_MMDREG_TXDIS);
211 if (efx->tx_disabled) 211 if (efx->phy_mode & PHY_MODE_TX_DISABLED)
212 ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); 212 ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
213 else 213 else
214 ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN); 214 ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index cb99f3f4491c..19c42eaf7fb4 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -199,18 +199,19 @@ static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd)
199 return (id_hi << 16) | (id_low); 199 return (id_hi << 16) | (id_low);
200} 200}
201 201
202static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx) 202static inline bool mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
203{ 203{
204 int i, sync, lane_status; 204 int i, lane_status;
205 bool sync;
205 206
206 for (i = 0; i < 2; ++i) 207 for (i = 0; i < 2; ++i)
207 lane_status = mdio_clause45_read(efx, efx->mii.phy_id, 208 lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
208 MDIO_MMD_PHYXS, 209 MDIO_MMD_PHYXS,
209 MDIO_PHYXS_LANE_STATE); 210 MDIO_PHYXS_LANE_STATE);
210 211
211 sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0; 212 sync = !!(lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN));
212 if (!sync) 213 if (!sync)
213 EFX_INFO(efx, "XGXS lane status: %x\n", lane_status); 214 EFX_LOG(efx, "XGXS lane status: %x\n", lane_status);
214 return sync; 215 return sync;
215} 216}
216 217
@@ -230,8 +231,8 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
230 unsigned int mmd_mask, unsigned int fatal_mask); 231 unsigned int mmd_mask, unsigned int fatal_mask);
231 232
232/* Check the link status of specified mmds in bit mask */ 233/* Check the link status of specified mmds in bit mask */
233extern int mdio_clause45_links_ok(struct efx_nic *efx, 234extern bool mdio_clause45_links_ok(struct efx_nic *efx,
234 unsigned int mmd_mask); 235 unsigned int mmd_mask);
235 236
236/* Generic transmit disable support though PMAPMD */ 237/* Generic transmit disable support though PMAPMD */
237extern void mdio_clause45_transmit_disable(struct efx_nic *efx); 238extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 219c74a772c3..cdb11fad6050 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -88,9 +88,12 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
88 **************************************************************************/ 88 **************************************************************************/
89 89
90#define EFX_MAX_CHANNELS 32 90#define EFX_MAX_CHANNELS 32
91#define EFX_MAX_TX_QUEUES 1
92#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 91#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
93 92
93#define EFX_TX_QUEUE_OFFLOAD_CSUM 0
94#define EFX_TX_QUEUE_NO_CSUM 1
95#define EFX_TX_QUEUE_COUNT 2
96
94/** 97/**
95 * struct efx_special_buffer - An Efx special buffer 98 * struct efx_special_buffer - An Efx special buffer
96 * @addr: CPU base address of the buffer 99 * @addr: CPU base address of the buffer
@@ -127,7 +130,6 @@ struct efx_special_buffer {
127 * This field is zero when the queue slot is empty. 130 * This field is zero when the queue slot is empty.
128 * @continuation: True if this fragment is not the end of a packet. 131 * @continuation: True if this fragment is not the end of a packet.
129 * @unmap_single: True if pci_unmap_single should be used. 132 * @unmap_single: True if pci_unmap_single should be used.
130 * @unmap_addr: DMA address to unmap
131 * @unmap_len: Length of this fragment to unmap 133 * @unmap_len: Length of this fragment to unmap
132 */ 134 */
133struct efx_tx_buffer { 135struct efx_tx_buffer {
@@ -135,9 +137,8 @@ struct efx_tx_buffer {
135 struct efx_tso_header *tsoh; 137 struct efx_tso_header *tsoh;
136 dma_addr_t dma_addr; 138 dma_addr_t dma_addr;
137 unsigned short len; 139 unsigned short len;
138 unsigned char continuation; 140 bool continuation;
139 unsigned char unmap_single; 141 bool unmap_single;
140 dma_addr_t unmap_addr;
141 unsigned short unmap_len; 142 unsigned short unmap_len;
142}; 143};
143 144
@@ -156,13 +157,13 @@ struct efx_tx_buffer {
156 * 157 *
157 * @efx: The associated Efx NIC 158 * @efx: The associated Efx NIC
158 * @queue: DMA queue number 159 * @queue: DMA queue number
159 * @used: Queue is used by net driver
160 * @channel: The associated channel 160 * @channel: The associated channel
161 * @buffer: The software buffer ring 161 * @buffer: The software buffer ring
162 * @txd: The hardware descriptor ring 162 * @txd: The hardware descriptor ring
163 * @flushed: Used when handling queue flushing
163 * @read_count: Current read pointer. 164 * @read_count: Current read pointer.
164 * This is the number of buffers that have been removed from both rings. 165 * This is the number of buffers that have been removed from both rings.
165 * @stopped: Stopped flag. 166 * @stopped: Stopped count.
166 * Set if this TX queue is currently stopping its port. 167 * Set if this TX queue is currently stopping its port.
167 * @insert_count: Current insert pointer 168 * @insert_count: Current insert pointer
168 * This is the number of buffers that have been added to the 169 * This is the number of buffers that have been added to the
@@ -188,11 +189,11 @@ struct efx_tx_queue {
188 /* Members which don't change on the fast path */ 189 /* Members which don't change on the fast path */
189 struct efx_nic *efx ____cacheline_aligned_in_smp; 190 struct efx_nic *efx ____cacheline_aligned_in_smp;
190 int queue; 191 int queue;
191 int used;
192 struct efx_channel *channel; 192 struct efx_channel *channel;
193 struct efx_nic *nic; 193 struct efx_nic *nic;
194 struct efx_tx_buffer *buffer; 194 struct efx_tx_buffer *buffer;
195 struct efx_special_buffer txd; 195 struct efx_special_buffer txd;
196 bool flushed;
196 197
197 /* Members used mainly on the completion path */ 198 /* Members used mainly on the completion path */
198 unsigned int read_count ____cacheline_aligned_in_smp; 199 unsigned int read_count ____cacheline_aligned_in_smp;
@@ -232,7 +233,6 @@ struct efx_rx_buffer {
232 * struct efx_rx_queue - An Efx RX queue 233 * struct efx_rx_queue - An Efx RX queue
233 * @efx: The associated Efx NIC 234 * @efx: The associated Efx NIC
234 * @queue: DMA queue number 235 * @queue: DMA queue number
235 * @used: Queue is used by net driver
236 * @channel: The associated channel 236 * @channel: The associated channel
237 * @buffer: The software buffer ring 237 * @buffer: The software buffer ring
238 * @rxd: The hardware descriptor ring 238 * @rxd: The hardware descriptor ring
@@ -262,11 +262,11 @@ struct efx_rx_buffer {
262 * the remaining space in the allocation. 262 * the remaining space in the allocation.
263 * @buf_dma_addr: Page's DMA address. 263 * @buf_dma_addr: Page's DMA address.
264 * @buf_data: Page's host address. 264 * @buf_data: Page's host address.
265 * @flushed: Use when handling queue flushing
265 */ 266 */
266struct efx_rx_queue { 267struct efx_rx_queue {
267 struct efx_nic *efx; 268 struct efx_nic *efx;
268 int queue; 269 int queue;
269 int used;
270 struct efx_channel *channel; 270 struct efx_channel *channel;
271 struct efx_rx_buffer *buffer; 271 struct efx_rx_buffer *buffer;
272 struct efx_special_buffer rxd; 272 struct efx_special_buffer rxd;
@@ -288,6 +288,7 @@ struct efx_rx_queue {
288 struct page *buf_page; 288 struct page *buf_page;
289 dma_addr_t buf_dma_addr; 289 dma_addr_t buf_dma_addr;
290 char *buf_data; 290 char *buf_data;
291 bool flushed;
291}; 292};
292 293
293/** 294/**
@@ -325,12 +326,10 @@ enum efx_rx_alloc_method {
325 * queue. 326 * queue.
326 * 327 *
327 * @efx: Associated Efx NIC 328 * @efx: Associated Efx NIC
328 * @evqnum: Event queue number
329 * @channel: Channel instance number 329 * @channel: Channel instance number
330 * @used_flags: Channel is used by net driver 330 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator 331 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only) 332 * @irq: IRQ number (MSI and MSI-X only)
333 * @has_interrupt: Channel has an interrupt
334 * @irq_moderation: IRQ moderation value (in us) 333 * @irq_moderation: IRQ moderation value (in us)
335 * @napi_dev: Net device used with NAPI 334 * @napi_dev: Net device used with NAPI
336 * @napi_str: NAPI control structure 335 * @napi_str: NAPI control structure
@@ -357,17 +356,14 @@ enum efx_rx_alloc_method {
357 */ 356 */
358struct efx_channel { 357struct efx_channel {
359 struct efx_nic *efx; 358 struct efx_nic *efx;
360 int evqnum;
361 int channel; 359 int channel;
362 int used_flags; 360 int used_flags;
363 int enabled; 361 bool enabled;
364 int irq; 362 int irq;
365 unsigned int has_interrupt;
366 unsigned int irq_moderation; 363 unsigned int irq_moderation;
367 struct net_device *napi_dev; 364 struct net_device *napi_dev;
368 struct napi_struct napi_str; 365 struct napi_struct napi_str;
369 struct work_struct reset_work; 366 bool work_pending;
370 int work_pending;
371 struct efx_special_buffer eventq; 367 struct efx_special_buffer eventq;
372 unsigned int eventq_read_ptr; 368 unsigned int eventq_read_ptr;
373 unsigned int last_eventq_read_ptr; 369 unsigned int last_eventq_read_ptr;
@@ -390,7 +386,7 @@ struct efx_channel {
390 * access with prefetches. 386 * access with prefetches.
391 */ 387 */
392 struct efx_rx_buffer *rx_pkt; 388 struct efx_rx_buffer *rx_pkt;
393 int rx_pkt_csummed; 389 bool rx_pkt_csummed;
394 390
395}; 391};
396 392
@@ -403,8 +399,8 @@ struct efx_channel {
403 */ 399 */
404struct efx_blinker { 400struct efx_blinker {
405 int led_num; 401 int led_num;
406 int state; 402 bool state;
407 int resubmit; 403 bool resubmit;
408 struct timer_list timer; 404 struct timer_list timer;
409}; 405};
410 406
@@ -432,8 +428,8 @@ struct efx_board {
432 * have a separate init callback that happens later than 428 * have a separate init callback that happens later than
433 * board init. */ 429 * board init. */
434 int (*init_leds)(struct efx_nic *efx); 430 int (*init_leds)(struct efx_nic *efx);
435 void (*set_fault_led) (struct efx_nic *efx, int state); 431 void (*set_fault_led) (struct efx_nic *efx, bool state);
436 void (*blink) (struct efx_nic *efx, int start); 432 void (*blink) (struct efx_nic *efx, bool start);
437 void (*fini) (struct efx_nic *nic); 433 void (*fini) (struct efx_nic *nic);
438 struct efx_blinker blinker; 434 struct efx_blinker blinker;
439 struct i2c_client *hwmon_client, *ioexp_client; 435 struct i2c_client *hwmon_client, *ioexp_client;
@@ -467,8 +463,7 @@ enum nic_state {
467 STATE_INIT = 0, 463 STATE_INIT = 0,
468 STATE_RUNNING = 1, 464 STATE_RUNNING = 1,
469 STATE_FINI = 2, 465 STATE_FINI = 2,
470 STATE_RESETTING = 3, /* rtnl_lock always held */ 466 STATE_DISABLED = 3,
471 STATE_DISABLED = 4,
472 STATE_MAX, 467 STATE_MAX,
473}; 468};
474 469
@@ -479,7 +474,7 @@ enum nic_state {
479 * This is the equivalent of NET_IP_ALIGN [which controls the alignment 474 * This is the equivalent of NET_IP_ALIGN [which controls the alignment
480 * of the skb->head for hardware DMA]. 475 * of the skb->head for hardware DMA].
481 */ 476 */
482#if defined(__i386__) || defined(__x86_64__) 477#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
483#define EFX_PAGE_IP_ALIGN 0 478#define EFX_PAGE_IP_ALIGN 0
484#else 479#else
485#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN 480#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
@@ -512,7 +507,6 @@ enum efx_fc_type {
512 * @clear_interrupt: Clear down interrupt 507 * @clear_interrupt: Clear down interrupt
513 * @blink: Blink LEDs 508 * @blink: Blink LEDs
514 * @check_hw: Check hardware 509 * @check_hw: Check hardware
515 * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
516 * @mmds: MMD presence mask 510 * @mmds: MMD presence mask
517 * @loopbacks: Supported loopback modes mask 511 * @loopbacks: Supported loopback modes mask
518 */ 512 */
@@ -522,11 +516,28 @@ struct efx_phy_operations {
522 void (*reconfigure) (struct efx_nic *efx); 516 void (*reconfigure) (struct efx_nic *efx);
523 void (*clear_interrupt) (struct efx_nic *efx); 517 void (*clear_interrupt) (struct efx_nic *efx);
524 int (*check_hw) (struct efx_nic *efx); 518 int (*check_hw) (struct efx_nic *efx);
525 void (*reset_xaui) (struct efx_nic *efx); 519 int (*test) (struct efx_nic *efx);
526 int mmds; 520 int mmds;
527 unsigned loopbacks; 521 unsigned loopbacks;
528}; 522};
529 523
524/**
525 * @enum efx_phy_mode - PHY operating mode flags
526 * @PHY_MODE_NORMAL: on and should pass traffic
527 * @PHY_MODE_TX_DISABLED: on with TX disabled
528 * @PHY_MODE_SPECIAL: on but will not pass traffic
529 */
530enum efx_phy_mode {
531 PHY_MODE_NORMAL = 0,
532 PHY_MODE_TX_DISABLED = 1,
533 PHY_MODE_SPECIAL = 8,
534};
535
536static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
537{
538 return !!(mode & ~PHY_MODE_TX_DISABLED);
539}
540
530/* 541/*
531 * Efx extended statistics 542 * Efx extended statistics
532 * 543 *
@@ -632,7 +643,7 @@ union efx_multicast_hash {
632 * @tx_queue: TX DMA queues 643 * @tx_queue: TX DMA queues
633 * @rx_queue: RX DMA queues 644 * @rx_queue: RX DMA queues
634 * @channel: Channels 645 * @channel: Channels
635 * @rss_queues: Number of RSS queues 646 * @n_rx_queues: Number of RX queues
636 * @rx_buffer_len: RX buffer length 647 * @rx_buffer_len: RX buffer length
637 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 648 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
638 * @irq_status: Interrupt status buffer 649 * @irq_status: Interrupt status buffer
@@ -640,15 +651,20 @@ union efx_multicast_hash {
640 * This register is written with the SMP processor ID whenever an 651 * This register is written with the SMP processor ID whenever an
641 * interrupt is handled. It is used by falcon_test_interrupt() 652 * interrupt is handled. It is used by falcon_test_interrupt()
642 * to verify that an interrupt has occurred. 653 * to verify that an interrupt has occurred.
654 * @spi_flash: SPI flash device
655 * This field will be %NULL if no flash device is present.
656 * @spi_eeprom: SPI EEPROM device
657 * This field will be %NULL if no EEPROM device is present.
643 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 658 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
644 * @nic_data: Hardware dependant state 659 * @nic_data: Hardware dependant state
645 * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and 660 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
646 * efx_reconfigure_port() 661 * @port_inhibited, efx_monitor() and efx_reconfigure_port()
647 * @port_enabled: Port enabled indicator. 662 * @port_enabled: Port enabled indicator.
648 * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and 663 * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
649 * efx_reconfigure_work with kernel interfaces. Safe to read under any 664 * efx_reconfigure_work with kernel interfaces. Safe to read under any
650 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must 665 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
651 * be held to modify it. 666 * be held to modify it.
667 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
652 * @port_initialized: Port initialized? 668 * @port_initialized: Port initialized?
653 * @net_dev: Operating system network device. Consider holding the rtnl lock 669 * @net_dev: Operating system network device. Consider holding the rtnl lock
654 * @rx_checksum_enabled: RX checksumming enabled 670 * @rx_checksum_enabled: RX checksumming enabled
@@ -658,14 +674,16 @@ union efx_multicast_hash {
658 * can provide. Generic code converts these into a standard 674 * can provide. Generic code converts these into a standard
659 * &struct net_device_stats. 675 * &struct net_device_stats.
660 * @stats_buffer: DMA buffer for statistics 676 * @stats_buffer: DMA buffer for statistics
661 * @stats_lock: Statistics update lock 677 * @stats_lock: Statistics update lock. Serialises statistics fetches
678 * @stats_enabled: Temporarily disable statistics fetches.
679 * Serialised by @stats_lock
662 * @mac_address: Permanent MAC address 680 * @mac_address: Permanent MAC address
663 * @phy_type: PHY type 681 * @phy_type: PHY type
664 * @phy_lock: PHY access lock 682 * @phy_lock: PHY access lock
665 * @phy_op: PHY interface 683 * @phy_op: PHY interface
666 * @phy_data: PHY private data (including PHY-specific stats) 684 * @phy_data: PHY private data (including PHY-specific stats)
667 * @mii: PHY interface 685 * @mii: PHY interface
668 * @tx_disabled: PHY transmitter turned off 686 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
669 * @link_up: Link status 687 * @link_up: Link status
670 * @link_options: Link options (MII/GMII format) 688 * @link_options: Link options (MII/GMII format)
671 * @n_link_state_changes: Number of times the link has changed state 689 * @n_link_state_changes: Number of times the link has changed state
@@ -700,27 +718,31 @@ struct efx_nic {
700 enum nic_state state; 718 enum nic_state state;
701 enum reset_type reset_pending; 719 enum reset_type reset_pending;
702 720
703 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES]; 721 struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT];
704 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 722 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
705 struct efx_channel channel[EFX_MAX_CHANNELS]; 723 struct efx_channel channel[EFX_MAX_CHANNELS];
706 724
707 int rss_queues; 725 int n_rx_queues;
708 unsigned int rx_buffer_len; 726 unsigned int rx_buffer_len;
709 unsigned int rx_buffer_order; 727 unsigned int rx_buffer_order;
710 728
711 struct efx_buffer irq_status; 729 struct efx_buffer irq_status;
712 volatile signed int last_irq_cpu; 730 volatile signed int last_irq_cpu;
713 731
732 struct efx_spi_device *spi_flash;
733 struct efx_spi_device *spi_eeprom;
734
714 unsigned n_rx_nodesc_drop_cnt; 735 unsigned n_rx_nodesc_drop_cnt;
715 736
716 struct falcon_nic_data *nic_data; 737 struct falcon_nic_data *nic_data;
717 738
718 struct mutex mac_lock; 739 struct mutex mac_lock;
719 int port_enabled; 740 bool port_enabled;
741 bool port_inhibited;
720 742
721 int port_initialized; 743 bool port_initialized;
722 struct net_device *net_dev; 744 struct net_device *net_dev;
723 int rx_checksum_enabled; 745 bool rx_checksum_enabled;
724 746
725 atomic_t netif_stop_count; 747 atomic_t netif_stop_count;
726 spinlock_t netif_stop_lock; 748 spinlock_t netif_stop_lock;
@@ -728,6 +750,7 @@ struct efx_nic {
728 struct efx_mac_stats mac_stats; 750 struct efx_mac_stats mac_stats;
729 struct efx_buffer stats_buffer; 751 struct efx_buffer stats_buffer;
730 spinlock_t stats_lock; 752 spinlock_t stats_lock;
753 bool stats_enabled;
731 754
732 unsigned char mac_address[ETH_ALEN]; 755 unsigned char mac_address[ETH_ALEN];
733 756
@@ -736,13 +759,13 @@ struct efx_nic {
736 struct efx_phy_operations *phy_op; 759 struct efx_phy_operations *phy_op;
737 void *phy_data; 760 void *phy_data;
738 struct mii_if_info mii; 761 struct mii_if_info mii;
739 unsigned tx_disabled; 762 enum efx_phy_mode phy_mode;
740 763
741 int link_up; 764 bool link_up;
742 unsigned int link_options; 765 unsigned int link_options;
743 unsigned int n_link_state_changes; 766 unsigned int n_link_state_changes;
744 767
745 int promiscuous; 768 bool promiscuous;
746 union efx_multicast_hash multicast_hash; 769 union efx_multicast_hash multicast_hash;
747 enum efx_fc_type flow_control; 770 enum efx_fc_type flow_control;
748 struct work_struct reconfigure_work; 771 struct work_struct reconfigure_work;
@@ -829,50 +852,33 @@ struct efx_nic_type {
829 continue; \ 852 continue; \
830 else 853 else
831 854
832/* Iterate over all used channels with interrupts */
833#define efx_for_each_channel_with_interrupt(_channel, _efx) \
834 for (_channel = &_efx->channel[0]; \
835 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \
836 _channel++) \
837 if (!(_channel->used_flags && _channel->has_interrupt)) \
838 continue; \
839 else
840
841/* Iterate over all used TX queues */ 855/* Iterate over all used TX queues */
842#define efx_for_each_tx_queue(_tx_queue, _efx) \ 856#define efx_for_each_tx_queue(_tx_queue, _efx) \
843 for (_tx_queue = &_efx->tx_queue[0]; \ 857 for (_tx_queue = &_efx->tx_queue[0]; \
844 _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES]; \ 858 _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
845 _tx_queue++) \ 859 _tx_queue++)
846 if (!_tx_queue->used) \
847 continue; \
848 else
849 860
850/* Iterate over all TX queues belonging to a channel */ 861/* Iterate over all TX queues belonging to a channel */
851#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 862#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
852 for (_tx_queue = &_channel->efx->tx_queue[0]; \ 863 for (_tx_queue = &_channel->efx->tx_queue[0]; \
853 _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES]; \ 864 _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \
854 _tx_queue++) \ 865 _tx_queue++) \
855 if ((!_tx_queue->used) || \ 866 if (_tx_queue->channel != _channel) \
856 (_tx_queue->channel != _channel)) \
857 continue; \ 867 continue; \
858 else 868 else
859 869
860/* Iterate over all used RX queues */ 870/* Iterate over all used RX queues */
861#define efx_for_each_rx_queue(_rx_queue, _efx) \ 871#define efx_for_each_rx_queue(_rx_queue, _efx) \
862 for (_rx_queue = &_efx->rx_queue[0]; \ 872 for (_rx_queue = &_efx->rx_queue[0]; \
863 _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \ 873 _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \
864 _rx_queue++) \ 874 _rx_queue++)
865 if (!_rx_queue->used) \
866 continue; \
867 else
868 875
869/* Iterate over all RX queues belonging to a channel */ 876/* Iterate over all RX queues belonging to a channel */
870#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 877#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
871 for (_rx_queue = &_channel->efx->rx_queue[0]; \ 878 for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \
872 _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \ 879 _rx_queue; \
873 _rx_queue++) \ 880 _rx_queue = NULL) \
874 if ((!_rx_queue->used) || \ 881 if (_rx_queue->channel != _channel) \
875 (_rx_queue->channel != _channel)) \
876 continue; \ 882 continue; \
877 else 883 else
878 884
@@ -886,13 +892,13 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
886} 892}
887 893
888/* Set bit in a little-endian bitfield */ 894/* Set bit in a little-endian bitfield */
889static inline void set_bit_le(int nr, unsigned char *addr) 895static inline void set_bit_le(unsigned nr, unsigned char *addr)
890{ 896{
891 addr[nr / 8] |= (1 << (nr % 8)); 897 addr[nr / 8] |= (1 << (nr % 8));
892} 898}
893 899
894/* Clear bit in a little-endian bitfield */ 900/* Clear bit in a little-endian bitfield */
895static inline void clear_bit_le(int nr, unsigned char *addr) 901static inline void clear_bit_le(unsigned nr, unsigned char *addr)
896{ 902{
897 addr[nr / 8] &= ~(1 << (nr % 8)); 903 addr[nr / 8] &= ~(1 << (nr % 8));
898} 904}
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index 9d02c84e6b2d..f746536f4ffa 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -15,15 +15,7 @@
15 */ 15 */
16extern struct efx_phy_operations falcon_tenxpress_phy_ops; 16extern struct efx_phy_operations falcon_tenxpress_phy_ops;
17 17
18enum tenxpress_state { 18extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
19 TENXPRESS_STATUS_OFF = 0,
20 TENXPRESS_STATUS_OTEMP = 1,
21 TENXPRESS_STATUS_NORMAL = 2,
22};
23
24extern void tenxpress_set_state(struct efx_nic *efx,
25 enum tenxpress_state state);
26extern void tenxpress_phy_blink(struct efx_nic *efx, int blink);
27extern void tenxpress_crc_err(struct efx_nic *efx); 19extern void tenxpress_crc_err(struct efx_nic *efx);
28 20
29/**************************************************************************** 21/****************************************************************************
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 0d27dd39bc09..0f805da4ce55 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -212,8 +212,8 @@ void efx_lro_fini(struct net_lro_mgr *lro_mgr)
212 * and populates a struct efx_rx_buffer with the relevant 212 * and populates a struct efx_rx_buffer with the relevant
213 * information. Return a negative error code or 0 on success. 213 * information. Return a negative error code or 0 on success.
214 */ 214 */
215static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, 215static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
216 struct efx_rx_buffer *rx_buf) 216 struct efx_rx_buffer *rx_buf)
217{ 217{
218 struct efx_nic *efx = rx_queue->efx; 218 struct efx_nic *efx = rx_queue->efx;
219 struct net_device *net_dev = efx->net_dev; 219 struct net_device *net_dev = efx->net_dev;
@@ -252,8 +252,8 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
252 * and populates a struct efx_rx_buffer with the relevant 252 * and populates a struct efx_rx_buffer with the relevant
253 * information. Return a negative error code or 0 on success. 253 * information. Return a negative error code or 0 on success.
254 */ 254 */
255static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, 255static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
256 struct efx_rx_buffer *rx_buf) 256 struct efx_rx_buffer *rx_buf)
257{ 257{
258 struct efx_nic *efx = rx_queue->efx; 258 struct efx_nic *efx = rx_queue->efx;
259 int bytes, space, offset; 259 int bytes, space, offset;
@@ -319,8 +319,8 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
319 * and populates a struct efx_rx_buffer with the relevant 319 * and populates a struct efx_rx_buffer with the relevant
320 * information. 320 * information.
321 */ 321 */
322static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue, 322static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
323 struct efx_rx_buffer *new_rx_buf) 323 struct efx_rx_buffer *new_rx_buf)
324{ 324{
325 int rc = 0; 325 int rc = 0;
326 326
@@ -340,8 +340,8 @@ static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
340 return rc; 340 return rc;
341} 341}
342 342
343static inline void efx_unmap_rx_buffer(struct efx_nic *efx, 343static void efx_unmap_rx_buffer(struct efx_nic *efx,
344 struct efx_rx_buffer *rx_buf) 344 struct efx_rx_buffer *rx_buf)
345{ 345{
346 if (rx_buf->page) { 346 if (rx_buf->page) {
347 EFX_BUG_ON_PARANOID(rx_buf->skb); 347 EFX_BUG_ON_PARANOID(rx_buf->skb);
@@ -357,8 +357,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
357 } 357 }
358} 358}
359 359
360static inline void efx_free_rx_buffer(struct efx_nic *efx, 360static void efx_free_rx_buffer(struct efx_nic *efx,
361 struct efx_rx_buffer *rx_buf) 361 struct efx_rx_buffer *rx_buf)
362{ 362{
363 if (rx_buf->page) { 363 if (rx_buf->page) {
364 __free_pages(rx_buf->page, efx->rx_buffer_order); 364 __free_pages(rx_buf->page, efx->rx_buffer_order);
@@ -369,8 +369,8 @@ static inline void efx_free_rx_buffer(struct efx_nic *efx,
369 } 369 }
370} 370}
371 371
372static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 372static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
373 struct efx_rx_buffer *rx_buf) 373 struct efx_rx_buffer *rx_buf)
374{ 374{
375 efx_unmap_rx_buffer(rx_queue->efx, rx_buf); 375 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
376 efx_free_rx_buffer(rx_queue->efx, rx_buf); 376 efx_free_rx_buffer(rx_queue->efx, rx_buf);
@@ -506,10 +506,10 @@ void efx_rx_work(struct work_struct *data)
506 efx_schedule_slow_fill(rx_queue, 1); 506 efx_schedule_slow_fill(rx_queue, 1);
507} 507}
508 508
509static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 509static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
510 struct efx_rx_buffer *rx_buf, 510 struct efx_rx_buffer *rx_buf,
511 int len, int *discard, 511 int len, bool *discard,
512 int *leak_packet) 512 bool *leak_packet)
513{ 513{
514 struct efx_nic *efx = rx_queue->efx; 514 struct efx_nic *efx = rx_queue->efx;
515 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 515 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -520,7 +520,7 @@ static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
520 /* The packet must be discarded, but this is only a fatal error 520 /* The packet must be discarded, but this is only a fatal error
521 * if the caller indicated it was 521 * if the caller indicated it was
522 */ 522 */
523 *discard = 1; 523 *discard = true;
524 524
525 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { 525 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
526 EFX_ERR_RL(efx, " RX queue %d seriously overlength " 526 EFX_ERR_RL(efx, " RX queue %d seriously overlength "
@@ -546,8 +546,8 @@ static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
546 * Handles driverlink veto, and passes the fragment up via 546 * Handles driverlink veto, and passes the fragment up via
547 * the appropriate LRO method 547 * the appropriate LRO method
548 */ 548 */
549static inline void efx_rx_packet_lro(struct efx_channel *channel, 549static void efx_rx_packet_lro(struct efx_channel *channel,
550 struct efx_rx_buffer *rx_buf) 550 struct efx_rx_buffer *rx_buf)
551{ 551{
552 struct net_lro_mgr *lro_mgr = &channel->lro_mgr; 552 struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
553 void *priv = channel; 553 void *priv = channel;
@@ -574,9 +574,9 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
574} 574}
575 575
576/* Allocate and construct an SKB around a struct page.*/ 576/* Allocate and construct an SKB around a struct page.*/
577static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, 577static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
578 struct efx_nic *efx, 578 struct efx_nic *efx,
579 int hdr_len) 579 int hdr_len)
580{ 580{
581 struct sk_buff *skb; 581 struct sk_buff *skb;
582 582
@@ -621,11 +621,11 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
621} 621}
622 622
623void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 623void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
624 unsigned int len, int checksummed, int discard) 624 unsigned int len, bool checksummed, bool discard)
625{ 625{
626 struct efx_nic *efx = rx_queue->efx; 626 struct efx_nic *efx = rx_queue->efx;
627 struct efx_rx_buffer *rx_buf; 627 struct efx_rx_buffer *rx_buf;
628 int leak_packet = 0; 628 bool leak_packet = false;
629 629
630 rx_buf = efx_rx_buffer(rx_queue, index); 630 rx_buf = efx_rx_buffer(rx_queue, index);
631 EFX_BUG_ON_PARANOID(!rx_buf->data); 631 EFX_BUG_ON_PARANOID(!rx_buf->data);
@@ -683,11 +683,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
683 683
684/* Handle a received packet. Second half: Touches packet payload. */ 684/* Handle a received packet. Second half: Touches packet payload. */
685void __efx_rx_packet(struct efx_channel *channel, 685void __efx_rx_packet(struct efx_channel *channel,
686 struct efx_rx_buffer *rx_buf, int checksummed) 686 struct efx_rx_buffer *rx_buf, bool checksummed)
687{ 687{
688 struct efx_nic *efx = channel->efx; 688 struct efx_nic *efx = channel->efx;
689 struct sk_buff *skb; 689 struct sk_buff *skb;
690 int lro = efx->net_dev->features & NETIF_F_LRO; 690 bool lro = !!(efx->net_dev->features & NETIF_F_LRO);
691 691
692 /* If we're in loopback test, then pass the packet directly to the 692 /* If we're in loopback test, then pass the packet directly to the
693 * loopback layer, and free the rx_buf here 693 * loopback layer, and free the rx_buf here
@@ -789,27 +789,18 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
789 /* Allocate RX buffers */ 789 /* Allocate RX buffers */
790 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); 790 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
791 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 791 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
792 if (!rx_queue->buffer) { 792 if (!rx_queue->buffer)
793 rc = -ENOMEM; 793 return -ENOMEM;
794 goto fail1;
795 }
796 794
797 rc = falcon_probe_rx(rx_queue); 795 rc = falcon_probe_rx(rx_queue);
798 if (rc) 796 if (rc) {
799 goto fail2; 797 kfree(rx_queue->buffer);
800 798 rx_queue->buffer = NULL;
801 return 0; 799 }
802
803 fail2:
804 kfree(rx_queue->buffer);
805 rx_queue->buffer = NULL;
806 fail1:
807 rx_queue->used = 0;
808
809 return rc; 800 return rc;
810} 801}
811 802
812int efx_init_rx_queue(struct efx_rx_queue *rx_queue) 803void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
813{ 804{
814 struct efx_nic *efx = rx_queue->efx; 805 struct efx_nic *efx = rx_queue->efx;
815 unsigned int max_fill, trigger, limit; 806 unsigned int max_fill, trigger, limit;
@@ -833,7 +824,7 @@ int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
833 rx_queue->fast_fill_limit = limit; 824 rx_queue->fast_fill_limit = limit;
834 825
835 /* Set up RX descriptor ring */ 826 /* Set up RX descriptor ring */
836 return falcon_init_rx(rx_queue); 827 falcon_init_rx(rx_queue);
837} 828}
838 829
839void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 830void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
@@ -872,7 +863,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
872 863
873 kfree(rx_queue->buffer); 864 kfree(rx_queue->buffer);
874 rx_queue->buffer = NULL; 865 rx_queue->buffer = NULL;
875 rx_queue->used = 0;
876} 866}
877 867
878void efx_flush_lro(struct efx_channel *channel) 868void efx_flush_lro(struct efx_channel *channel)
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
index f35e377bfc5f..0e88a9ddc1c6 100644
--- a/drivers/net/sfc/rx.h
+++ b/drivers/net/sfc/rx.h
@@ -14,7 +14,7 @@
14 14
15int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 15int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
16void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); 16void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17int efx_init_rx_queue(struct efx_rx_queue *rx_queue); 17void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19 19
20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx); 20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
@@ -24,6 +24,6 @@ void efx_rx_strategy(struct efx_channel *channel);
24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
25void efx_rx_work(struct work_struct *data); 25void efx_rx_work(struct work_struct *data);
26void __efx_rx_packet(struct efx_channel *channel, 26void __efx_rx_packet(struct efx_channel *channel,
27 struct efx_rx_buffer *rx_buf, int checksummed); 27 struct efx_rx_buffer *rx_buf, bool checksummed);
28 28
29#endif /* EFX_RX_H */ 29#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 3b2de9fe7f27..362956e3fe17 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -27,6 +27,9 @@
27#include "boards.h" 27#include "boards.h"
28#include "workarounds.h" 28#include "workarounds.h"
29#include "mac.h" 29#include "mac.h"
30#include "spi.h"
31#include "falcon_io.h"
32#include "mdio_10g.h"
30 33
31/* 34/*
32 * Loopback test packet structure 35 * Loopback test packet structure
@@ -51,7 +54,7 @@ static const char *payload_msg =
51 "Hello world! This is an Efx loopback test in progress!"; 54 "Hello world! This is an Efx loopback test in progress!";
52 55
53/** 56/**
54 * efx_selftest_state - persistent state during a selftest 57 * efx_loopback_state - persistent state during a loopback selftest
55 * @flush: Drop all packets in efx_loopback_rx_packet 58 * @flush: Drop all packets in efx_loopback_rx_packet
56 * @packet_count: Number of packets being used in this test 59 * @packet_count: Number of packets being used in this test
57 * @skbs: An array of skbs transmitted 60 * @skbs: An array of skbs transmitted
@@ -59,10 +62,14 @@ static const char *payload_msg =
59 * @rx_bad: RX bad packet count 62 * @rx_bad: RX bad packet count
60 * @payload: Payload used in tests 63 * @payload: Payload used in tests
61 */ 64 */
62struct efx_selftest_state { 65struct efx_loopback_state {
63 int flush; 66 bool flush;
64 int packet_count; 67 int packet_count;
65 struct sk_buff **skbs; 68 struct sk_buff **skbs;
69
70 /* Checksums are being offloaded */
71 bool offload_csum;
72
66 atomic_t rx_good; 73 atomic_t rx_good;
67 atomic_t rx_bad; 74 atomic_t rx_bad;
68 struct efx_loopback_payload payload; 75 struct efx_loopback_payload payload;
@@ -70,21 +77,65 @@ struct efx_selftest_state {
70 77
71/************************************************************************** 78/**************************************************************************
72 * 79 *
73 * Configurable values 80 * MII, NVRAM and register tests
74 * 81 *
75 **************************************************************************/ 82 **************************************************************************/
76 83
77/* Level of loopback testing 84static int efx_test_mii(struct efx_nic *efx, struct efx_self_tests *tests)
78 * 85{
79 * The maximum packet burst length is 16**(n-1), i.e. 86 int rc = 0;
80 * 87 u16 physid1, physid2;
81 * - Level 0 : no packets 88 struct mii_if_info *mii = &efx->mii;
82 * - Level 1 : 1 packet 89 struct net_device *net_dev = efx->net_dev;
83 * - Level 2 : 17 packets (1 * 1 packet, 1 * 16 packets) 90
84 * - Level 3 : 273 packets (1 * 1 packet, 1 * 16 packet, 1 * 256 packets) 91 if (efx->phy_type == PHY_TYPE_NONE)
85 * 92 return 0;
86 */ 93
87static unsigned int loopback_test_level = 3; 94 mutex_lock(&efx->mac_lock);
95 tests->mii = -1;
96
97 physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1);
98 physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2);
99
100 if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
101 (physid2 == 0x0000) || (physid2 == 0xffff)) {
102 EFX_ERR(efx, "no MII PHY present with ID %d\n",
103 mii->phy_id);
104 rc = -EINVAL;
105 goto out;
106 }
107
108 rc = mdio_clause45_check_mmds(efx, efx->phy_op->mmds, 0);
109 if (rc)
110 goto out;
111
112out:
113 mutex_unlock(&efx->mac_lock);
114 tests->mii = rc ? -1 : 1;
115 return rc;
116}
117
118static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
119{
120 int rc;
121
122 rc = falcon_read_nvram(efx, NULL);
123 tests->nvram = rc ? -1 : 1;
124 return rc;
125}
126
127static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
128{
129 int rc;
130
131 /* Not supported on A-series silicon */
132 if (falcon_rev(efx) < FALCON_REV_B0)
133 return 0;
134
135 rc = falcon_test_registers(efx);
136 tests->registers = rc ? -1 : 1;
137 return rc;
138}
88 139
89/************************************************************************** 140/**************************************************************************
90 * 141 *
@@ -107,7 +158,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
107 158
108 /* ACK each interrupting event queue. Receiving an interrupt due to 159 /* ACK each interrupting event queue. Receiving an interrupt due to
109 * traffic before a test event is raised is considered a pass */ 160 * traffic before a test event is raised is considered a pass */
110 efx_for_each_channel_with_interrupt(channel, efx) { 161 efx_for_each_channel(channel, efx) {
111 if (channel->work_pending) 162 if (channel->work_pending)
112 efx_process_channel_now(channel); 163 efx_process_channel_now(channel);
113 if (efx->last_irq_cpu >= 0) 164 if (efx->last_irq_cpu >= 0)
@@ -132,41 +183,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
132 return 0; 183 return 0;
133} 184}
134 185
135/* Test generation and receipt of non-interrupting events */
136static int efx_test_eventq(struct efx_channel *channel,
137 struct efx_self_tests *tests)
138{
139 unsigned int magic;
140
141 /* Channel specific code, limited to 20 bits */
142 magic = (0x00010150 + channel->channel);
143 EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
144 channel->channel, magic);
145
146 tests->eventq_dma[channel->channel] = -1;
147 tests->eventq_int[channel->channel] = 1; /* fake pass */
148 tests->eventq_poll[channel->channel] = 1; /* fake pass */
149
150 /* Reset flag and zero magic word */
151 channel->efx->last_irq_cpu = -1;
152 channel->eventq_magic = 0;
153 smp_wmb();
154
155 falcon_generate_test_event(channel, magic);
156 udelay(1);
157
158 efx_process_channel_now(channel);
159 if (channel->eventq_magic != magic) {
160 EFX_ERR(channel->efx, "channel %d failed to see test event\n",
161 channel->channel);
162 return -ETIMEDOUT;
163 } else {
164 tests->eventq_dma[channel->channel] = 1;
165 }
166
167 return 0;
168}
169
170/* Test generation and receipt of interrupting events */ 186/* Test generation and receipt of interrupting events */
171static int efx_test_eventq_irq(struct efx_channel *channel, 187static int efx_test_eventq_irq(struct efx_channel *channel,
172 struct efx_self_tests *tests) 188 struct efx_self_tests *tests)
@@ -230,39 +246,18 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
230 return 0; 246 return 0;
231} 247}
232 248
233/************************************************************************** 249static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests)
234 *
235 * PHY testing
236 *
237 **************************************************************************/
238
239/* Check PHY presence by reading the PHY ID registers */
240static int efx_test_phy(struct efx_nic *efx,
241 struct efx_self_tests *tests)
242{ 250{
243 u16 physid1, physid2; 251 int rc;
244 struct mii_if_info *mii = &efx->mii;
245 struct net_device *net_dev = efx->net_dev;
246 252
247 if (efx->phy_type == PHY_TYPE_NONE) 253 if (!efx->phy_op->test)
248 return 0; 254 return 0;
249 255
250 EFX_LOG(efx, "testing PHY presence\n"); 256 mutex_lock(&efx->mac_lock);
251 tests->phy_ok = -1; 257 rc = efx->phy_op->test(efx);
252 258 mutex_unlock(&efx->mac_lock);
253 physid1 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID1); 259 tests->phy = rc ? -1 : 1;
254 physid2 = mii->mdio_read(net_dev, mii->phy_id, MII_PHYSID2); 260 return rc;
255
256 if ((physid1 != 0x0000) && (physid1 != 0xffff) &&
257 (physid2 != 0x0000) && (physid2 != 0xffff)) {
258 EFX_LOG(efx, "found MII PHY %d ID 0x%x:%x\n",
259 mii->phy_id, physid1, physid2);
260 tests->phy_ok = 1;
261 return 0;
262 }
263
264 EFX_ERR(efx, "no MII PHY present with ID %d\n", mii->phy_id);
265 return -ENODEV;
266} 261}
267 262
268/************************************************************************** 263/**************************************************************************
@@ -278,7 +273,7 @@ static int efx_test_phy(struct efx_nic *efx,
278void efx_loopback_rx_packet(struct efx_nic *efx, 273void efx_loopback_rx_packet(struct efx_nic *efx,
279 const char *buf_ptr, int pkt_len) 274 const char *buf_ptr, int pkt_len)
280{ 275{
281 struct efx_selftest_state *state = efx->loopback_selftest; 276 struct efx_loopback_state *state = efx->loopback_selftest;
282 struct efx_loopback_payload *received; 277 struct efx_loopback_payload *received;
283 struct efx_loopback_payload *payload; 278 struct efx_loopback_payload *payload;
284 279
@@ -289,11 +284,12 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
289 return; 284 return;
290 285
291 payload = &state->payload; 286 payload = &state->payload;
292 287
293 received = (struct efx_loopback_payload *) buf_ptr; 288 received = (struct efx_loopback_payload *) buf_ptr;
294 received->ip.saddr = payload->ip.saddr; 289 received->ip.saddr = payload->ip.saddr;
295 received->ip.check = payload->ip.check; 290 if (state->offload_csum)
296 291 received->ip.check = payload->ip.check;
292
297 /* Check that header exists */ 293 /* Check that header exists */
298 if (pkt_len < sizeof(received->header)) { 294 if (pkt_len < sizeof(received->header)) {
299 EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback " 295 EFX_ERR(efx, "saw runt RX packet (length %d) in %s loopback "
@@ -362,7 +358,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
362/* Initialise an efx_selftest_state for a new iteration */ 358/* Initialise an efx_selftest_state for a new iteration */
363static void efx_iterate_state(struct efx_nic *efx) 359static void efx_iterate_state(struct efx_nic *efx)
364{ 360{
365 struct efx_selftest_state *state = efx->loopback_selftest; 361 struct efx_loopback_state *state = efx->loopback_selftest;
366 struct net_device *net_dev = efx->net_dev; 362 struct net_device *net_dev = efx->net_dev;
367 struct efx_loopback_payload *payload = &state->payload; 363 struct efx_loopback_payload *payload = &state->payload;
368 364
@@ -395,17 +391,17 @@ static void efx_iterate_state(struct efx_nic *efx)
395 smp_wmb(); 391 smp_wmb();
396} 392}
397 393
398static int efx_tx_loopback(struct efx_tx_queue *tx_queue) 394static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
399{ 395{
400 struct efx_nic *efx = tx_queue->efx; 396 struct efx_nic *efx = tx_queue->efx;
401 struct efx_selftest_state *state = efx->loopback_selftest; 397 struct efx_loopback_state *state = efx->loopback_selftest;
402 struct efx_loopback_payload *payload; 398 struct efx_loopback_payload *payload;
403 struct sk_buff *skb; 399 struct sk_buff *skb;
404 int i, rc; 400 int i, rc;
405 401
406 /* Transmit N copies of buffer */ 402 /* Transmit N copies of buffer */
407 for (i = 0; i < state->packet_count; i++) { 403 for (i = 0; i < state->packet_count; i++) {
408 /* Allocate an skb, holding an extra reference for 404 /* Allocate an skb, holding an extra reference for
409 * transmit completion counting */ 405 * transmit completion counting */
410 skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 406 skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
411 if (!skb) 407 if (!skb)
@@ -444,11 +440,25 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
444 return 0; 440 return 0;
445} 441}
446 442
447static int efx_rx_loopback(struct efx_tx_queue *tx_queue, 443static int efx_poll_loopback(struct efx_nic *efx)
448 struct efx_loopback_self_tests *lb_tests) 444{
445 struct efx_loopback_state *state = efx->loopback_selftest;
446 struct efx_channel *channel;
447
448 /* NAPI polling is not enabled, so process channels
449 * synchronously */
450 efx_for_each_channel(channel, efx) {
451 if (channel->work_pending)
452 efx_process_channel_now(channel);
453 }
454 return atomic_read(&state->rx_good) == state->packet_count;
455}
456
457static int efx_end_loopback(struct efx_tx_queue *tx_queue,
458 struct efx_loopback_self_tests *lb_tests)
449{ 459{
450 struct efx_nic *efx = tx_queue->efx; 460 struct efx_nic *efx = tx_queue->efx;
451 struct efx_selftest_state *state = efx->loopback_selftest; 461 struct efx_loopback_state *state = efx->loopback_selftest;
452 struct sk_buff *skb; 462 struct sk_buff *skb;
453 int tx_done = 0, rx_good, rx_bad; 463 int tx_done = 0, rx_good, rx_bad;
454 int i, rc = 0; 464 int i, rc = 0;
@@ -507,11 +517,10 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
507 struct efx_loopback_self_tests *lb_tests) 517 struct efx_loopback_self_tests *lb_tests)
508{ 518{
509 struct efx_nic *efx = tx_queue->efx; 519 struct efx_nic *efx = tx_queue->efx;
510 struct efx_selftest_state *state = efx->loopback_selftest; 520 struct efx_loopback_state *state = efx->loopback_selftest;
511 struct efx_channel *channel; 521 int i, begin_rc, end_rc;
512 int i, rc = 0;
513 522
514 for (i = 0; i < loopback_test_level; i++) { 523 for (i = 0; i < 3; i++) {
515 /* Determine how many packets to send */ 524 /* Determine how many packets to send */
516 state->packet_count = (efx->type->txd_ring_mask + 1) / 3; 525 state->packet_count = (efx->type->txd_ring_mask + 1) / 3;
517 state->packet_count = min(1 << (i << 2), state->packet_count); 526 state->packet_count = min(1 << (i << 2), state->packet_count);
@@ -519,30 +528,31 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
519 state->packet_count, GFP_KERNEL); 528 state->packet_count, GFP_KERNEL);
520 if (!state->skbs) 529 if (!state->skbs)
521 return -ENOMEM; 530 return -ENOMEM;
522 state->flush = 0; 531 state->flush = false;
523 532
524 EFX_LOG(efx, "TX queue %d testing %s loopback with %d " 533 EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
525 "packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 534 "packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
526 state->packet_count); 535 state->packet_count);
527 536
528 efx_iterate_state(efx); 537 efx_iterate_state(efx);
529 rc = efx_tx_loopback(tx_queue); 538 begin_rc = efx_begin_loopback(tx_queue);
530 539
531 /* NAPI polling is not enabled, so process channels synchronously */ 540 /* This will normally complete very quickly, but be
532 schedule_timeout_uninterruptible(HZ / 50); 541 * prepared to wait up to 100 ms. */
533 efx_for_each_channel_with_interrupt(channel, efx) { 542 msleep(1);
534 if (channel->work_pending) 543 if (!efx_poll_loopback(efx)) {
535 efx_process_channel_now(channel); 544 msleep(100);
545 efx_poll_loopback(efx);
536 } 546 }
537 547
538 rc |= efx_rx_loopback(tx_queue, lb_tests); 548 end_rc = efx_end_loopback(tx_queue, lb_tests);
539 kfree(state->skbs); 549 kfree(state->skbs);
540 550
541 if (rc) { 551 if (begin_rc || end_rc) {
542 /* Wait a while to ensure there are no packets 552 /* Wait a while to ensure there are no packets
543 * floating around after a failure. */ 553 * floating around after a failure. */
544 schedule_timeout_uninterruptible(HZ / 10); 554 schedule_timeout_uninterruptible(HZ / 10);
545 return rc; 555 return begin_rc ? begin_rc : end_rc;
546 } 556 }
547 } 557 }
548 558
@@ -550,49 +560,36 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
550 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), 560 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
551 state->packet_count); 561 state->packet_count);
552 562
553 return rc; 563 return 0;
554} 564}
555 565
556static int efx_test_loopbacks(struct efx_nic *efx, 566static int efx_test_loopbacks(struct efx_nic *efx, struct ethtool_cmd ecmd,
557 struct efx_self_tests *tests, 567 struct efx_self_tests *tests,
558 unsigned int loopback_modes) 568 unsigned int loopback_modes)
559{ 569{
560 struct efx_selftest_state *state = efx->loopback_selftest; 570 enum efx_loopback_mode mode;
561 struct ethtool_cmd ecmd, ecmd_loopback; 571 struct efx_loopback_state *state;
562 struct efx_tx_queue *tx_queue; 572 struct efx_tx_queue *tx_queue;
563 enum efx_loopback_mode old_mode, mode; 573 bool link_up;
564 int count, rc = 0, link_up; 574 int count, rc = 0;
565
566 rc = efx_ethtool_get_settings(efx->net_dev, &ecmd);
567 if (rc) {
568 EFX_ERR(efx, "could not get GMII settings\n");
569 return rc;
570 }
571 old_mode = efx->loopback_mode;
572
573 /* Disable autonegotiation for the purposes of loopback */
574 memcpy(&ecmd_loopback, &ecmd, sizeof(ecmd_loopback));
575 if (ecmd_loopback.autoneg == AUTONEG_ENABLE) {
576 ecmd_loopback.autoneg = AUTONEG_DISABLE;
577 ecmd_loopback.duplex = DUPLEX_FULL;
578 ecmd_loopback.speed = SPEED_10000;
579 }
580 575
581 rc = efx_ethtool_set_settings(efx->net_dev, &ecmd_loopback); 576 /* Set the port loopback_selftest member. From this point on
582 if (rc) { 577 * all received packets will be dropped. Mark the state as
583 EFX_ERR(efx, "could not disable autonegotiation\n"); 578 * "flushing" so all inflight packets are dropped */
584 goto out; 579 state = kzalloc(sizeof(*state), GFP_KERNEL);
585 } 580 if (state == NULL)
586 tests->loopback_speed = ecmd_loopback.speed; 581 return -ENOMEM;
587 tests->loopback_full_duplex = ecmd_loopback.duplex; 582 BUG_ON(efx->loopback_selftest);
583 state->flush = true;
584 efx->loopback_selftest = state;
588 585
589 /* Test all supported loopback modes */ 586 /* Test all supported loopback modes */
590 for (mode = LOOPBACK_NONE; mode < LOOPBACK_TEST_MAX; mode++) { 587 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
591 if (!(loopback_modes & (1 << mode))) 588 if (!(loopback_modes & (1 << mode)))
592 continue; 589 continue;
593 590
594 /* Move the port into the specified loopback mode. */ 591 /* Move the port into the specified loopback mode. */
595 state->flush = 1; 592 state->flush = true;
596 efx->loopback_mode = mode; 593 efx->loopback_mode = mode;
597 efx_reconfigure_port(efx); 594 efx_reconfigure_port(efx);
598 595
@@ -616,7 +613,7 @@ static int efx_test_loopbacks(struct efx_nic *efx,
616 */ 613 */
617 link_up = efx->link_up; 614 link_up = efx->link_up;
618 if (!falcon_xaui_link_ok(efx)) 615 if (!falcon_xaui_link_ok(efx))
619 link_up = 0; 616 link_up = false;
620 617
621 } while ((++count < 20) && !link_up); 618 } while ((++count < 20) && !link_up);
622 619
@@ -634,18 +631,21 @@ static int efx_test_loopbacks(struct efx_nic *efx,
634 631
635 /* Test every TX queue */ 632 /* Test every TX queue */
636 efx_for_each_tx_queue(tx_queue, efx) { 633 efx_for_each_tx_queue(tx_queue, efx) {
637 rc |= efx_test_loopback(tx_queue, 634 state->offload_csum = (tx_queue->queue ==
638 &tests->loopback[mode]); 635 EFX_TX_QUEUE_OFFLOAD_CSUM);
636 rc = efx_test_loopback(tx_queue,
637 &tests->loopback[mode]);
639 if (rc) 638 if (rc)
640 goto out; 639 goto out;
641 } 640 }
642 } 641 }
643 642
644 out: 643 out:
645 /* Take out of loopback and restore PHY settings */ 644 /* Remove the flush. The caller will remove the loopback setting */
646 state->flush = 1; 645 state->flush = true;
647 efx->loopback_mode = old_mode; 646 efx->loopback_selftest = NULL;
648 efx_ethtool_set_settings(efx->net_dev, &ecmd); 647 wmb();
648 kfree(state);
649 649
650 return rc; 650 return rc;
651} 651}
@@ -661,23 +661,27 @@ static int efx_test_loopbacks(struct efx_nic *efx,
661int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests) 661int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
662{ 662{
663 struct efx_channel *channel; 663 struct efx_channel *channel;
664 int rc = 0; 664 int rc, rc2 = 0;
665
666 rc = efx_test_mii(efx, tests);
667 if (rc && !rc2)
668 rc2 = rc;
665 669
666 EFX_LOG(efx, "performing online self-tests\n"); 670 rc = efx_test_nvram(efx, tests);
671 if (rc && !rc2)
672 rc2 = rc;
673
674 rc = efx_test_interrupts(efx, tests);
675 if (rc && !rc2)
676 rc2 = rc;
667 677
668 rc |= efx_test_interrupts(efx, tests);
669 efx_for_each_channel(channel, efx) { 678 efx_for_each_channel(channel, efx) {
670 if (channel->has_interrupt) 679 rc = efx_test_eventq_irq(channel, tests);
671 rc |= efx_test_eventq_irq(channel, tests); 680 if (rc && !rc2)
672 else 681 rc2 = rc;
673 rc |= efx_test_eventq(channel, tests);
674 } 682 }
675 rc |= efx_test_phy(efx, tests);
676
677 if (rc)
678 EFX_ERR(efx, "failed online self-tests\n");
679 683
680 return rc; 684 return rc2;
681} 685}
682 686
683/* Offline (i.e. disruptive) testing 687/* Offline (i.e. disruptive) testing
@@ -685,35 +689,66 @@ int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
685int efx_offline_test(struct efx_nic *efx, 689int efx_offline_test(struct efx_nic *efx,
686 struct efx_self_tests *tests, unsigned int loopback_modes) 690 struct efx_self_tests *tests, unsigned int loopback_modes)
687{ 691{
688 struct efx_selftest_state *state; 692 enum efx_loopback_mode loopback_mode = efx->loopback_mode;
689 int rc = 0; 693 int phy_mode = efx->phy_mode;
690 694 struct ethtool_cmd ecmd, ecmd_test;
691 EFX_LOG(efx, "performing offline self-tests\n"); 695 int rc, rc2 = 0;
696
697 /* force the carrier state off so the kernel doesn't transmit during
698 * the loopback test, and the watchdog timeout doesn't fire. Also put
699 * falcon into loopback for the register test.
700 */
701 mutex_lock(&efx->mac_lock);
702 efx->port_inhibited = true;
703 if (efx->loopback_modes)
704 efx->loopback_mode = __ffs(efx->loopback_modes);
705 __efx_reconfigure_port(efx);
706 mutex_unlock(&efx->mac_lock);
707
708 /* free up all consumers of SRAM (including all the queues) */
709 efx_reset_down(efx, &ecmd);
710
711 rc = efx_test_chip(efx, tests);
712 if (rc && !rc2)
713 rc2 = rc;
714
715 /* reset the chip to recover from the register test */
716 rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
717
718 /* Modify the saved ecmd so that when efx_reset_up() restores the phy
719 * state, AN is disabled, and the phy is powered, and out of loopback */
720 memcpy(&ecmd_test, &ecmd, sizeof(ecmd_test));
721 if (ecmd_test.autoneg == AUTONEG_ENABLE) {
722 ecmd_test.autoneg = AUTONEG_DISABLE;
723 ecmd_test.duplex = DUPLEX_FULL;
724 ecmd_test.speed = SPEED_10000;
725 }
726 efx->loopback_mode = LOOPBACK_NONE;
692 727
693 /* Create a selftest_state structure to hold state for the test */ 728 rc = efx_reset_up(efx, &ecmd_test, rc == 0);
694 state = kzalloc(sizeof(*state), GFP_KERNEL); 729 if (rc) {
695 if (state == NULL) { 730 EFX_ERR(efx, "Unable to recover from chip test\n");
696 rc = -ENOMEM; 731 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
697 goto out; 732 return rc;
698 } 733 }
699 734
700 /* Set the port loopback_selftest member. From this point on 735 tests->loopback_speed = ecmd_test.speed;
701 * all received packets will be dropped. Mark the state as 736 tests->loopback_full_duplex = ecmd_test.duplex;
702 * "flushing" so all inflight packets are dropped */
703 BUG_ON(efx->loopback_selftest);
704 state->flush = 1;
705 efx->loopback_selftest = state;
706 737
707 rc = efx_test_loopbacks(efx, tests, loopback_modes); 738 rc = efx_test_phy(efx, tests);
739 if (rc && !rc2)
740 rc2 = rc;
708 741
709 efx->loopback_selftest = NULL; 742 rc = efx_test_loopbacks(efx, ecmd_test, tests, loopback_modes);
710 wmb(); 743 if (rc && !rc2)
711 kfree(state); 744 rc2 = rc;
712 745
713 out: 746 /* restore the PHY to the previous state */
714 if (rc) 747 efx->loopback_mode = loopback_mode;
715 EFX_ERR(efx, "failed offline self-tests\n"); 748 efx->phy_mode = phy_mode;
749 efx->port_inhibited = false;
750 efx_ethtool_set_settings(efx->net_dev, &ecmd);
716 751
717 return rc; 752 return rc2;
718} 753}
719 754
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6999c2b622d..fc15df15d766 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20struct efx_loopback_self_tests { 20struct efx_loopback_self_tests {
21 int tx_sent[EFX_MAX_TX_QUEUES]; 21 int tx_sent[EFX_TX_QUEUE_COUNT];
22 int tx_done[EFX_MAX_TX_QUEUES]; 22 int tx_done[EFX_TX_QUEUE_COUNT];
23 int rx_good; 23 int rx_good;
24 int rx_bad; 24 int rx_bad;
25}; 25};
@@ -29,14 +29,19 @@ struct efx_loopback_self_tests {
29 * indicates failure. 29 * indicates failure.
30 */ 30 */
31struct efx_self_tests { 31struct efx_self_tests {
32 /* online tests */
33 int mii;
34 int nvram;
32 int interrupt; 35 int interrupt;
33 int eventq_dma[EFX_MAX_CHANNELS]; 36 int eventq_dma[EFX_MAX_CHANNELS];
34 int eventq_int[EFX_MAX_CHANNELS]; 37 int eventq_int[EFX_MAX_CHANNELS];
35 int eventq_poll[EFX_MAX_CHANNELS]; 38 int eventq_poll[EFX_MAX_CHANNELS];
36 int phy_ok; 39 /* offline tests */
40 int registers;
41 int phy;
37 int loopback_speed; 42 int loopback_speed;
38 int loopback_full_duplex; 43 int loopback_full_duplex;
39 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX]; 44 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
40}; 45};
41 46
42extern void efx_loopback_rx_packet(struct efx_nic *efx, 47extern void efx_loopback_rx_packet(struct efx_nic *efx,
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index b27849523990..fe4e3fd22330 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -13,11 +13,13 @@
13 * the PHY 13 * the PHY
14 */ 14 */
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include "net_driver.h"
16#include "efx.h" 17#include "efx.h"
17#include "phy.h" 18#include "phy.h"
18#include "boards.h" 19#include "boards.h"
19#include "falcon.h" 20#include "falcon.h"
20#include "falcon_hwdefs.h" 21#include "falcon_hwdefs.h"
22#include "falcon_io.h"
21#include "mac.h" 23#include "mac.h"
22 24
23/************************************************************************** 25/**************************************************************************
@@ -120,23 +122,144 @@ static void sfe4001_poweroff(struct efx_nic *efx)
120 i2c_smbus_read_byte_data(hwmon_client, RSL); 122 i2c_smbus_read_byte_data(hwmon_client, RSL);
121} 123}
122 124
123static void sfe4001_fini(struct efx_nic *efx) 125static int sfe4001_poweron(struct efx_nic *efx)
124{ 126{
125 EFX_INFO(efx, "%s\n", __func__); 127 struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
128 struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
129 unsigned int i, j;
130 int rc;
131 u8 out;
132
133 /* Clear any previous over-temperature alert */
134 rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
135 if (rc < 0)
136 return rc;
137
138 /* Enable port 0 and port 1 outputs on IO expander */
139 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
140 if (rc)
141 return rc;
142 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
143 0xff & ~(1 << P1_SPARE_LBN));
144 if (rc)
145 goto fail_on;
146
147 /* If PHY power is on, turn it all off and wait 1 second to
148 * ensure a full reset.
149 */
150 rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
151 if (rc < 0)
152 goto fail_on;
153 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
154 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
155 (0 << P0_EN_1V0X_LBN));
156 if (rc != out) {
157 EFX_INFO(efx, "power-cycling PHY\n");
158 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
159 if (rc)
160 goto fail_on;
161 schedule_timeout_uninterruptible(HZ);
162 }
126 163
164 for (i = 0; i < 20; ++i) {
165 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
166 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
167 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
168 (1 << P0_X_TRST_LBN));
169 if (efx->phy_mode & PHY_MODE_SPECIAL)
170 out |= 1 << P0_EN_3V3X_LBN;
171
172 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
173 if (rc)
174 goto fail_on;
175 msleep(10);
176
177 /* Turn on 1V power rail */
178 out &= ~(1 << P0_EN_1V0X_LBN);
179 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
180 if (rc)
181 goto fail_on;
182
183 EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
184
185 /* In flash config mode, DSP does not turn on AFE, so
186 * just wait 1 second.
187 */
188 if (efx->phy_mode & PHY_MODE_SPECIAL) {
189 schedule_timeout_uninterruptible(HZ);
190 return 0;
191 }
192
193 for (j = 0; j < 10; ++j) {
194 msleep(100);
195
196 /* Check DSP has asserted AFE power line */
197 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
198 if (rc < 0)
199 goto fail_on;
200 if (rc & (1 << P1_AFE_PWD_LBN))
201 return 0;
202 }
203 }
204
205 EFX_INFO(efx, "timed out waiting for DSP boot\n");
206 rc = -ETIMEDOUT;
207fail_on:
127 sfe4001_poweroff(efx); 208 sfe4001_poweroff(efx);
128 i2c_unregister_device(efx->board_info.ioexp_client); 209 return rc;
129 i2c_unregister_device(efx->board_info.hwmon_client);
130} 210}
131 211
132/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected 212/* On SFE4001 rev A2 and later, we can control the FLASH_CFG_1 pin
133 * to the FLASH_CFG_1 input on the DSP. We must keep it high at power- 213 * using the 3V3X output of the IO-expander. Allow the user to set
134 * up to allow writing the flash (done through MDIO from userland). 214 * this when the device is stopped, and keep it stopped then.
135 */ 215 */
136unsigned int sfe4001_phy_flash_cfg; 216
137module_param_named(phy_flash_cfg, sfe4001_phy_flash_cfg, uint, 0444); 217static ssize_t show_phy_flash_cfg(struct device *dev,
138MODULE_PARM_DESC(phy_flash_cfg, 218 struct device_attribute *attr, char *buf)
139 "Force PHY to enter flash configuration mode"); 219{
220 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
221 return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
222}
223
224static ssize_t set_phy_flash_cfg(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count)
227{
228 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
229 enum efx_phy_mode old_mode, new_mode;
230 int err;
231
232 rtnl_lock();
233 old_mode = efx->phy_mode;
234 if (count == 0 || *buf == '0')
235 new_mode = old_mode & ~PHY_MODE_SPECIAL;
236 else
237 new_mode = PHY_MODE_SPECIAL;
238 if (old_mode == new_mode) {
239 err = 0;
240 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
241 err = -EBUSY;
242 } else {
243 efx->phy_mode = new_mode;
244 err = sfe4001_poweron(efx);
245 efx_reconfigure_port(efx);
246 }
247 rtnl_unlock();
248
249 return err ? err : count;
250}
251
252static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
253
254static void sfe4001_fini(struct efx_nic *efx)
255{
256 EFX_INFO(efx, "%s\n", __func__);
257
258 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
259 sfe4001_poweroff(efx);
260 i2c_unregister_device(efx->board_info.ioexp_client);
261 i2c_unregister_device(efx->board_info.hwmon_client);
262}
140 263
141/* This board uses an I2C expander to provider power to the PHY, which needs to 264/* This board uses an I2C expander to provider power to the PHY, which needs to
142 * be turned on before the PHY can be used. 265 * be turned on before the PHY can be used.
@@ -144,41 +267,14 @@ MODULE_PARM_DESC(phy_flash_cfg,
144 */ 267 */
145int sfe4001_init(struct efx_nic *efx) 268int sfe4001_init(struct efx_nic *efx)
146{ 269{
147 struct i2c_client *hwmon_client, *ioexp_client; 270 struct i2c_client *hwmon_client;
148 unsigned int count;
149 int rc; 271 int rc;
150 u8 out;
151 efx_dword_t reg;
152 272
153 hwmon_client = i2c_new_dummy(&efx->i2c_adap, MAX6647); 273 hwmon_client = i2c_new_dummy(&efx->i2c_adap, MAX6647);
154 if (!hwmon_client) 274 if (!hwmon_client)
155 return -EIO; 275 return -EIO;
156 efx->board_info.hwmon_client = hwmon_client; 276 efx->board_info.hwmon_client = hwmon_client;
157 277
158 ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
159 if (!ioexp_client) {
160 rc = -EIO;
161 goto fail_hwmon;
162 }
163 efx->board_info.ioexp_client = ioexp_client;
164
165 /* 10Xpress has fixed-function LED pins, so there is no board-specific
166 * blink code. */
167 efx->board_info.blink = tenxpress_phy_blink;
168
169 /* Ensure that XGXS and XAUI SerDes are held in reset */
170 EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1,
171 XX_PWRDNB_EN, 1,
172 XX_RSTPLLAB_EN, 1,
173 XX_RESETA_EN, 1,
174 XX_RESETB_EN, 1,
175 XX_RSTXGXSRX_EN, 1,
176 XX_RSTXGXSTX_EN, 1);
177 falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
178 udelay(10);
179
180 efx->board_info.fini = sfe4001_fini;
181
182 /* Set DSP over-temperature alert threshold */ 278 /* Set DSP over-temperature alert threshold */
183 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature); 279 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
184 rc = i2c_smbus_write_byte_data(hwmon_client, WLHO, 280 rc = i2c_smbus_write_byte_data(hwmon_client, WLHO,
@@ -195,78 +291,34 @@ int sfe4001_init(struct efx_nic *efx)
195 goto fail_ioexp; 291 goto fail_ioexp;
196 } 292 }
197 293
198 /* Clear any previous over-temperature alert */ 294 efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
199 rc = i2c_smbus_read_byte_data(hwmon_client, RSL); 295 if (!efx->board_info.ioexp_client) {
200 if (rc < 0) 296 rc = -EIO;
201 goto fail_ioexp; 297 goto fail_hwmon;
298 }
202 299
203 /* Enable port 0 and port 1 outputs on IO expander */ 300 /* 10Xpress has fixed-function LED pins, so there is no board-specific
204 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00); 301 * blink code. */
302 efx->board_info.blink = tenxpress_phy_blink;
303
304 efx->board_info.fini = sfe4001_fini;
305
306 rc = sfe4001_poweron(efx);
205 if (rc) 307 if (rc)
206 goto fail_ioexp; 308 goto fail_ioexp;
207 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
208 0xff & ~(1 << P1_SPARE_LBN));
209 if (rc)
210 goto fail_on;
211 309
212 /* Turn all power off then wait 1 sec. This ensures PHY is reset */ 310 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
213 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
214 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
215 (0 << P0_EN_1V0X_LBN));
216 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
217 if (rc) 311 if (rc)
218 goto fail_on; 312 goto fail_on;
219 313
220 schedule_timeout_uninterruptible(HZ);
221 count = 0;
222 do {
223 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
224 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
225 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
226 (1 << P0_X_TRST_LBN));
227 if (sfe4001_phy_flash_cfg)
228 out |= 1 << P0_EN_3V3X_LBN;
229
230 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
231 if (rc)
232 goto fail_on;
233 msleep(10);
234
235 /* Turn on 1V power rail */
236 out &= ~(1 << P0_EN_1V0X_LBN);
237 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
238 if (rc)
239 goto fail_on;
240
241 EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
242
243 schedule_timeout_uninterruptible(HZ);
244
245 /* Check DSP is powered */
246 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
247 if (rc < 0)
248 goto fail_on;
249 if (rc & (1 << P1_AFE_PWD_LBN))
250 goto done;
251
252 /* DSP doesn't look powered in flash config mode */
253 if (sfe4001_phy_flash_cfg)
254 goto done;
255 } while (++count < 20);
256
257 EFX_INFO(efx, "timed out waiting for power\n");
258 rc = -ETIMEDOUT;
259 goto fail_on;
260
261done:
262 EFX_INFO(efx, "PHY is powered on\n"); 314 EFX_INFO(efx, "PHY is powered on\n");
263 return 0; 315 return 0;
264 316
265fail_on: 317fail_on:
266 sfe4001_poweroff(efx); 318 sfe4001_poweroff(efx);
267fail_ioexp: 319fail_ioexp:
268 i2c_unregister_device(ioexp_client); 320 i2c_unregister_device(efx->board_info.ioexp_client);
269fail_hwmon: 321fail_hwmon:
270 i2c_unregister_device(hwmon_client); 322 i2c_unregister_device(hwmon_client);
271 return rc; 323 return rc;
272} 324}
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 34412f3d41c9..feef61942377 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -19,53 +19,48 @@
19 * 19 *
20 *************************************************************************/ 20 *************************************************************************/
21 21
22/* 22#define SPI_WRSR 0x01 /* Write status register */
23 * Commands common to all known devices. 23#define SPI_WRITE 0x02 /* Write data to memory array */
24 * 24#define SPI_READ 0x03 /* Read data from memory array */
25#define SPI_WRDI 0x04 /* Reset write enable latch */
26#define SPI_RDSR 0x05 /* Read status register */
27#define SPI_WREN 0x06 /* Set write enable latch */
28
29#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
30#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
31#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
32#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
33#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
34#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
35
36/**
37 * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
38 * @efx: The Efx controller that owns this device
39 * @device_id: Controller's id for the device
40 * @size: Size (in bytes)
41 * @addr_len: Number of address bytes in read/write commands
42 * @munge_address: Flag whether addresses should be munged.
43 * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
44 * use bit 3 of the command byte as address bit A8, rather
45 * than having a two-byte address. If this flag is set, then
46 * commands should be munged in this way.
47 * @block_size: Write block size (in bytes).
48 * Write commands are limited to blocks with this size and alignment.
49 * @read: Read function for the device
50 * @write: Write function for the device
25 */ 51 */
26 52struct efx_spi_device {
27/* Write status register */ 53 struct efx_nic *efx;
28#define SPI_WRSR 0x01 54 int device_id;
29 55 unsigned int size;
30/* Write data to memory array */ 56 unsigned int addr_len;
31#define SPI_WRITE 0x02 57 unsigned int munge_address:1;
32 58 unsigned int block_size;
33/* Read data from memory array */ 59};
34#define SPI_READ 0x03 60
35 61int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
36/* Reset write enable latch */ 62 size_t len, size_t *retlen, u8 *buffer);
37#define SPI_WRDI 0x04 63int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
38 64 size_t len, size_t *retlen, const u8 *buffer);
39/* Read status register */
40#define SPI_RDSR 0x05
41
42/* Set write enable latch */
43#define SPI_WREN 0x06
44
45/* SST: Enable write to status register */
46#define SPI_SST_EWSR 0x50
47
48/*
49 * Status register bits. Not all bits are supported on all devices.
50 *
51 */
52
53/* Write-protect pin enabled */
54#define SPI_STATUS_WPEN 0x80
55
56/* Block protection bit 2 */
57#define SPI_STATUS_BP2 0x10
58
59/* Block protection bit 1 */
60#define SPI_STATUS_BP1 0x08
61
62/* Block protection bit 0 */
63#define SPI_STATUS_BP0 0x04
64
65/* State of the write enable latch */
66#define SPI_STATUS_WEN 0x02
67
68/* Device busy flag */
69#define SPI_STATUS_NRDY 0x01
70 65
71#endif /* EFX_SPI_H */ 66#endif /* EFX_SPI_H */
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index c0146061c326..d507c93d666e 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -65,25 +65,10 @@
65#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) 65#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
66 66
67 67
68/* Self test (BIST) control register */
69#define PMA_PMD_BIST_CTRL_REG (0xc014)
70#define PMA_PMD_BIST_BER_LBN (2) /* Run BER test */
71#define PMA_PMD_BIST_CONT_LBN (1) /* Run continuous BIST until cleared */
72#define PMA_PMD_BIST_SINGLE_LBN (0) /* Run 1 BIST iteration (self clears) */
73/* Self test status register */
74#define PMA_PMD_BIST_STAT_REG (0xc015)
75#define PMA_PMD_BIST_ENX_LBN (3)
76#define PMA_PMD_BIST_PMA_LBN (2)
77#define PMA_PMD_BIST_RXD_LBN (1)
78#define PMA_PMD_BIST_AFE_LBN (0)
79
80/* Special Software reset register */ 68/* Special Software reset register */
81#define PMA_PMD_EXT_CTRL_REG 49152 69#define PMA_PMD_EXT_CTRL_REG 49152
82#define PMA_PMD_EXT_SSR_LBN 15 70#define PMA_PMD_EXT_SSR_LBN 15
83 71
84#define BIST_MAX_DELAY (1000)
85#define BIST_POLL_DELAY (10)
86
87/* Misc register defines */ 72/* Misc register defines */
88#define PCS_CLOCK_CTRL_REG 0xd801 73#define PCS_CLOCK_CTRL_REG 0xd801
89#define PLL312_RST_N_LBN 2 74#define PLL312_RST_N_LBN 2
@@ -119,27 +104,12 @@ MODULE_PARM_DESC(crc_error_reset_threshold,
119 "Max number of CRC errors before XAUI reset"); 104 "Max number of CRC errors before XAUI reset");
120 105
121struct tenxpress_phy_data { 106struct tenxpress_phy_data {
122 enum tenxpress_state state;
123 enum efx_loopback_mode loopback_mode; 107 enum efx_loopback_mode loopback_mode;
124 atomic_t bad_crc_count; 108 atomic_t bad_crc_count;
125 int tx_disabled; 109 enum efx_phy_mode phy_mode;
126 int bad_lp_tries; 110 int bad_lp_tries;
127}; 111};
128 112
129static int tenxpress_state_is(struct efx_nic *efx, int state)
130{
131 struct tenxpress_phy_data *phy_data = efx->phy_data;
132 return (phy_data != NULL) && (state == phy_data->state);
133}
134
135void tenxpress_set_state(struct efx_nic *efx,
136 enum tenxpress_state state)
137{
138 struct tenxpress_phy_data *phy_data = efx->phy_data;
139 if (phy_data != NULL)
140 phy_data->state = state;
141}
142
143void tenxpress_crc_err(struct efx_nic *efx) 113void tenxpress_crc_err(struct efx_nic *efx)
144{ 114{
145 struct tenxpress_phy_data *phy_data = efx->phy_data; 115 struct tenxpress_phy_data *phy_data = efx->phy_data;
@@ -176,8 +146,6 @@ static int tenxpress_phy_check(struct efx_nic *efx)
176 return 0; 146 return 0;
177} 147}
178 148
179static void tenxpress_reset_xaui(struct efx_nic *efx);
180
181static int tenxpress_init(struct efx_nic *efx) 149static int tenxpress_init(struct efx_nic *efx)
182{ 150{
183 int rc, reg; 151 int rc, reg;
@@ -214,15 +182,12 @@ static int tenxpress_phy_init(struct efx_nic *efx)
214 if (!phy_data) 182 if (!phy_data)
215 return -ENOMEM; 183 return -ENOMEM;
216 efx->phy_data = phy_data; 184 efx->phy_data = phy_data;
185 phy_data->phy_mode = efx->phy_mode;
217 186
218 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); 187 rc = mdio_clause45_wait_reset_mmds(efx,
219 188 TENXPRESS_REQUIRED_DEVS);
220 if (!sfe4001_phy_flash_cfg) { 189 if (rc < 0)
221 rc = mdio_clause45_wait_reset_mmds(efx, 190 goto fail;
222 TENXPRESS_REQUIRED_DEVS);
223 if (rc < 0)
224 goto fail;
225 }
226 191
227 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 192 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
228 if (rc < 0) 193 if (rc < 0)
@@ -249,7 +214,10 @@ static int tenxpress_special_reset(struct efx_nic *efx)
249{ 214{
250 int rc, reg; 215 int rc, reg;
251 216
252 EFX_TRACE(efx, "%s\n", __func__); 217 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
218 * a special software reset can glitch the XGMAC sufficiently for stats
219 * requests to fail. Since we don't ofen special_reset, just lock. */
220 spin_lock(&efx->stats_lock);
253 221
254 /* Initiate reset */ 222 /* Initiate reset */
255 reg = mdio_clause45_read(efx, efx->mii.phy_id, 223 reg = mdio_clause45_read(efx, efx->mii.phy_id,
@@ -258,23 +226,25 @@ static int tenxpress_special_reset(struct efx_nic *efx)
258 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 226 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
259 PMA_PMD_EXT_CTRL_REG, reg); 227 PMA_PMD_EXT_CTRL_REG, reg);
260 228
261 msleep(200); 229 mdelay(200);
262 230
263 /* Wait for the blocks to come out of reset */ 231 /* Wait for the blocks to come out of reset */
264 rc = mdio_clause45_wait_reset_mmds(efx, 232 rc = mdio_clause45_wait_reset_mmds(efx,
265 TENXPRESS_REQUIRED_DEVS); 233 TENXPRESS_REQUIRED_DEVS);
266 if (rc < 0) 234 if (rc < 0)
267 return rc; 235 goto unlock;
268 236
269 /* Try and reconfigure the device */ 237 /* Try and reconfigure the device */
270 rc = tenxpress_init(efx); 238 rc = tenxpress_init(efx);
271 if (rc < 0) 239 if (rc < 0)
272 return rc; 240 goto unlock;
273 241
274 return 0; 242unlock:
243 spin_unlock(&efx->stats_lock);
244 return rc;
275} 245}
276 246
277static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) 247static void tenxpress_set_bad_lp(struct efx_nic *efx, bool bad_lp)
278{ 248{
279 struct tenxpress_phy_data *pd = efx->phy_data; 249 struct tenxpress_phy_data *pd = efx->phy_data;
280 int reg; 250 int reg;
@@ -311,15 +281,15 @@ static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
311 * into a non-10GBT port and if so warn the user that they won't get 281 * into a non-10GBT port and if so warn the user that they won't get
312 * link any time soon as we are 10GBT only, unless caller specified 282 * link any time soon as we are 10GBT only, unless caller specified
313 * not to do this check (it isn't useful in loopback) */ 283 * not to do this check (it isn't useful in loopback) */
314static int tenxpress_link_ok(struct efx_nic *efx, int check_lp) 284static bool tenxpress_link_ok(struct efx_nic *efx, bool check_lp)
315{ 285{
316 int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS); 286 bool ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
317 287
318 if (ok) { 288 if (ok) {
319 tenxpress_set_bad_lp(efx, 0); 289 tenxpress_set_bad_lp(efx, false);
320 } else if (check_lp) { 290 } else if (check_lp) {
321 /* Are we plugged into the wrong sort of link? */ 291 /* Are we plugged into the wrong sort of link? */
322 int bad_lp = 0; 292 bool bad_lp = false;
323 int phy_id = efx->mii.phy_id; 293 int phy_id = efx->mii.phy_id;
324 int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, 294 int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
325 MDIO_AN_STATUS); 295 MDIO_AN_STATUS);
@@ -332,7 +302,7 @@ static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
332 * bit has the advantage of not clearing when autoneg 302 * bit has the advantage of not clearing when autoneg
333 * restarts. */ 303 * restarts. */
334 if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) { 304 if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
335 tenxpress_set_bad_lp(efx, 0); 305 tenxpress_set_bad_lp(efx, false);
336 return ok; 306 return ok;
337 } 307 }
338 308
@@ -367,16 +337,19 @@ static void tenxpress_phyxs_loopback(struct efx_nic *efx)
367static void tenxpress_phy_reconfigure(struct efx_nic *efx) 337static void tenxpress_phy_reconfigure(struct efx_nic *efx)
368{ 338{
369 struct tenxpress_phy_data *phy_data = efx->phy_data; 339 struct tenxpress_phy_data *phy_data = efx->phy_data;
370 int loop_change = LOOPBACK_OUT_OF(phy_data, efx, 340 bool loop_change = LOOPBACK_OUT_OF(phy_data, efx,
371 TENXPRESS_LOOPBACKS); 341 TENXPRESS_LOOPBACKS);
372 342
373 if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) 343 if (efx->phy_mode & PHY_MODE_SPECIAL) {
344 phy_data->phy_mode = efx->phy_mode;
374 return; 345 return;
346 }
375 347
376 /* When coming out of transmit disable, coming out of low power 348 /* When coming out of transmit disable, coming out of low power
377 * mode, or moving out of any PHY internal loopback mode, 349 * mode, or moving out of any PHY internal loopback mode,
378 * perform a special software reset */ 350 * perform a special software reset */
379 if ((phy_data->tx_disabled && !efx->tx_disabled) || 351 if ((efx->phy_mode == PHY_MODE_NORMAL &&
352 phy_data->phy_mode != PHY_MODE_NORMAL) ||
380 loop_change) { 353 loop_change) {
381 tenxpress_special_reset(efx); 354 tenxpress_special_reset(efx);
382 falcon_reset_xaui(efx); 355 falcon_reset_xaui(efx);
@@ -386,9 +359,9 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
386 mdio_clause45_phy_reconfigure(efx); 359 mdio_clause45_phy_reconfigure(efx);
387 tenxpress_phyxs_loopback(efx); 360 tenxpress_phyxs_loopback(efx);
388 361
389 phy_data->tx_disabled = efx->tx_disabled;
390 phy_data->loopback_mode = efx->loopback_mode; 362 phy_data->loopback_mode = efx->loopback_mode;
391 efx->link_up = tenxpress_link_ok(efx, 0); 363 phy_data->phy_mode = efx->phy_mode;
364 efx->link_up = tenxpress_link_ok(efx, false);
392 efx->link_options = GM_LPA_10000FULL; 365 efx->link_options = GM_LPA_10000FULL;
393} 366}
394 367
@@ -402,16 +375,14 @@ static void tenxpress_phy_clear_interrupt(struct efx_nic *efx)
402static int tenxpress_phy_check_hw(struct efx_nic *efx) 375static int tenxpress_phy_check_hw(struct efx_nic *efx)
403{ 376{
404 struct tenxpress_phy_data *phy_data = efx->phy_data; 377 struct tenxpress_phy_data *phy_data = efx->phy_data;
405 int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL); 378 bool link_ok;
406 int link_ok;
407 379
408 link_ok = phy_up && tenxpress_link_ok(efx, 1); 380 link_ok = tenxpress_link_ok(efx, true);
409 381
410 if (link_ok != efx->link_up) 382 if (link_ok != efx->link_up)
411 falcon_xmac_sim_phy_event(efx); 383 falcon_xmac_sim_phy_event(efx);
412 384
413 /* Nothing to check if we've already shut down the PHY */ 385 if (phy_data->phy_mode != PHY_MODE_NORMAL)
414 if (!phy_up)
415 return 0; 386 return 0;
416 387
417 if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) { 388 if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
@@ -444,7 +415,7 @@ static void tenxpress_phy_fini(struct efx_nic *efx)
444 415
445/* Set the RX and TX LEDs and Link LED flashing. The other LEDs 416/* Set the RX and TX LEDs and Link LED flashing. The other LEDs
446 * (which probably aren't wired anyway) are left in AUTO mode */ 417 * (which probably aren't wired anyway) are left in AUTO mode */
447void tenxpress_phy_blink(struct efx_nic *efx, int blink) 418void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
448{ 419{
449 int reg; 420 int reg;
450 421
@@ -459,52 +430,10 @@ void tenxpress_phy_blink(struct efx_nic *efx, int blink)
459 PMA_PMD_LED_OVERR_REG, reg); 430 PMA_PMD_LED_OVERR_REG, reg);
460} 431}
461 432
462static void tenxpress_reset_xaui(struct efx_nic *efx) 433static int tenxpress_phy_test(struct efx_nic *efx)
463{ 434{
464 int phy = efx->mii.phy_id; 435 /* BIST is automatically run after a special software reset */
465 int clk_ctrl, test_select, soft_rst2; 436 return tenxpress_special_reset(efx);
466
467 /* Real work is done on clock_ctrl other resets are thought to be
468 * optional but make the reset more reliable
469 */
470
471 /* Read */
472 clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
473 PCS_CLOCK_CTRL_REG);
474 test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
475 PCS_TEST_SELECT_REG);
476 soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
477 PCS_SOFT_RST2_REG);
478
479 /* Put in reset */
480 test_select &= ~(1 << CLK312_EN_LBN);
481 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
482 PCS_TEST_SELECT_REG, test_select);
483
484 soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
485 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
486 PCS_SOFT_RST2_REG, soft_rst2);
487
488 clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
489 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
490 PCS_CLOCK_CTRL_REG, clk_ctrl);
491 udelay(10);
492
493 /* Remove reset */
494 clk_ctrl |= (1 << PLL312_RST_N_LBN);
495 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
496 PCS_CLOCK_CTRL_REG, clk_ctrl);
497 udelay(10);
498
499 soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
500 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
501 PCS_SOFT_RST2_REG, soft_rst2);
502 udelay(10);
503
504 test_select |= (1 << CLK312_EN_LBN);
505 mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
506 PCS_TEST_SELECT_REG, test_select);
507 udelay(10);
508} 437}
509 438
510struct efx_phy_operations falcon_tenxpress_phy_ops = { 439struct efx_phy_operations falcon_tenxpress_phy_ops = {
@@ -513,7 +442,7 @@ struct efx_phy_operations falcon_tenxpress_phy_ops = {
513 .check_hw = tenxpress_phy_check_hw, 442 .check_hw = tenxpress_phy_check_hw,
514 .fini = tenxpress_phy_fini, 443 .fini = tenxpress_phy_fini,
515 .clear_interrupt = tenxpress_phy_clear_interrupt, 444 .clear_interrupt = tenxpress_phy_clear_interrupt,
516 .reset_xaui = tenxpress_reset_xaui, 445 .test = tenxpress_phy_test,
517 .mmds = TENXPRESS_REQUIRED_DEVS, 446 .mmds = TENXPRESS_REQUIRED_DEVS,
518 .loopbacks = TENXPRESS_LOOPBACKS, 447 .loopbacks = TENXPRESS_LOOPBACKS,
519}; 448};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 5e8374ab28ee..da3e9ff339f5 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -47,7 +47,7 @@ void efx_stop_queue(struct efx_nic *efx)
47 * We want to be able to nest calls to netif_stop_queue(), since each 47 * We want to be able to nest calls to netif_stop_queue(), since each
48 * channel can have an individual stop on the queue. 48 * channel can have an individual stop on the queue.
49 */ 49 */
50inline void efx_wake_queue(struct efx_nic *efx) 50void efx_wake_queue(struct efx_nic *efx)
51{ 51{
52 local_bh_disable(); 52 local_bh_disable();
53 if (atomic_dec_and_lock(&efx->netif_stop_count, 53 if (atomic_dec_and_lock(&efx->netif_stop_count,
@@ -59,19 +59,21 @@ inline void efx_wake_queue(struct efx_nic *efx)
59 local_bh_enable(); 59 local_bh_enable();
60} 60}
61 61
62static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 62static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
63 struct efx_tx_buffer *buffer) 63 struct efx_tx_buffer *buffer)
64{ 64{
65 if (buffer->unmap_len) { 65 if (buffer->unmap_len) {
66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev; 66 struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
67 dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
68 buffer->unmap_len);
67 if (buffer->unmap_single) 69 if (buffer->unmap_single)
68 pci_unmap_single(pci_dev, buffer->unmap_addr, 70 pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
69 buffer->unmap_len, PCI_DMA_TODEVICE); 71 PCI_DMA_TODEVICE);
70 else 72 else
71 pci_unmap_page(pci_dev, buffer->unmap_addr, 73 pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
72 buffer->unmap_len, PCI_DMA_TODEVICE); 74 PCI_DMA_TODEVICE);
73 buffer->unmap_len = 0; 75 buffer->unmap_len = 0;
74 buffer->unmap_single = 0; 76 buffer->unmap_single = false;
75 } 77 }
76 78
77 if (buffer->skb) { 79 if (buffer->skb) {
@@ -103,13 +105,13 @@ struct efx_tso_header {
103}; 105};
104 106
105static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 107static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
106 const struct sk_buff *skb); 108 struct sk_buff *skb);
107static void efx_fini_tso(struct efx_tx_queue *tx_queue); 109static void efx_fini_tso(struct efx_tx_queue *tx_queue);
108static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, 110static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
109 struct efx_tso_header *tsoh); 111 struct efx_tso_header *tsoh);
110 112
111static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue, 113static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
112 struct efx_tx_buffer *buffer) 114 struct efx_tx_buffer *buffer)
113{ 115{
114 if (buffer->tsoh) { 116 if (buffer->tsoh) {
115 if (likely(!buffer->tsoh->unmap_len)) { 117 if (likely(!buffer->tsoh->unmap_len)) {
@@ -136,8 +138,8 @@ static inline void efx_tsoh_free(struct efx_tx_queue *tx_queue,
136 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 138 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
137 * You must hold netif_tx_lock() to call this function. 139 * You must hold netif_tx_lock() to call this function.
138 */ 140 */
139static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, 141static int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
140 const struct sk_buff *skb) 142 struct sk_buff *skb)
141{ 143{
142 struct efx_nic *efx = tx_queue->efx; 144 struct efx_nic *efx = tx_queue->efx;
143 struct pci_dev *pci_dev = efx->pci_dev; 145 struct pci_dev *pci_dev = efx->pci_dev;
@@ -148,7 +150,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
148 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; 150 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
149 dma_addr_t dma_addr, unmap_addr = 0; 151 dma_addr_t dma_addr, unmap_addr = 0;
150 unsigned int dma_len; 152 unsigned int dma_len;
151 unsigned unmap_single; 153 bool unmap_single;
152 int q_space, i = 0; 154 int q_space, i = 0;
153 int rc = NETDEV_TX_OK; 155 int rc = NETDEV_TX_OK;
154 156
@@ -167,7 +169,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
167 * since this is more efficient on machines with sparse 169 * since this is more efficient on machines with sparse
168 * memory. 170 * memory.
169 */ 171 */
170 unmap_single = 1; 172 unmap_single = true;
171 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); 173 dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
172 174
173 /* Process all fragments */ 175 /* Process all fragments */
@@ -213,7 +215,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
213 EFX_BUG_ON_PARANOID(buffer->tsoh); 215 EFX_BUG_ON_PARANOID(buffer->tsoh);
214 EFX_BUG_ON_PARANOID(buffer->skb); 216 EFX_BUG_ON_PARANOID(buffer->skb);
215 EFX_BUG_ON_PARANOID(buffer->len); 217 EFX_BUG_ON_PARANOID(buffer->len);
216 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 218 EFX_BUG_ON_PARANOID(!buffer->continuation);
217 EFX_BUG_ON_PARANOID(buffer->unmap_len); 219 EFX_BUG_ON_PARANOID(buffer->unmap_len);
218 220
219 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); 221 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
@@ -233,7 +235,6 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
233 } while (len); 235 } while (len);
234 236
235 /* Transfer ownership of the unmapping to the final buffer */ 237 /* Transfer ownership of the unmapping to the final buffer */
236 buffer->unmap_addr = unmap_addr;
237 buffer->unmap_single = unmap_single; 238 buffer->unmap_single = unmap_single;
238 buffer->unmap_len = unmap_len; 239 buffer->unmap_len = unmap_len;
239 unmap_len = 0; 240 unmap_len = 0;
@@ -247,14 +248,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
247 page_offset = fragment->page_offset; 248 page_offset = fragment->page_offset;
248 i++; 249 i++;
249 /* Map for DMA */ 250 /* Map for DMA */
250 unmap_single = 0; 251 unmap_single = false;
251 dma_addr = pci_map_page(pci_dev, page, page_offset, len, 252 dma_addr = pci_map_page(pci_dev, page, page_offset, len,
252 PCI_DMA_TODEVICE); 253 PCI_DMA_TODEVICE);
253 } 254 }
254 255
255 /* Transfer ownership of the skb to the final buffer */ 256 /* Transfer ownership of the skb to the final buffer */
256 buffer->skb = skb; 257 buffer->skb = skb;
257 buffer->continuation = 0; 258 buffer->continuation = false;
258 259
259 /* Pass off to hardware */ 260 /* Pass off to hardware */
260 falcon_push_buffers(tx_queue); 261 falcon_push_buffers(tx_queue);
@@ -287,9 +288,14 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
287 } 288 }
288 289
289 /* Free the fragment we were mid-way through pushing */ 290 /* Free the fragment we were mid-way through pushing */
290 if (unmap_len) 291 if (unmap_len) {
291 pci_unmap_page(pci_dev, unmap_addr, unmap_len, 292 if (unmap_single)
292 PCI_DMA_TODEVICE); 293 pci_unmap_single(pci_dev, unmap_addr, unmap_len,
294 PCI_DMA_TODEVICE);
295 else
296 pci_unmap_page(pci_dev, unmap_addr, unmap_len,
297 PCI_DMA_TODEVICE);
298 }
293 299
294 return rc; 300 return rc;
295} 301}
@@ -299,8 +305,8 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
299 * This removes packets from the TX queue, up to and including the 305 * This removes packets from the TX queue, up to and including the
300 * specified index. 306 * specified index.
301 */ 307 */
302static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 308static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
303 unsigned int index) 309 unsigned int index)
304{ 310{
305 struct efx_nic *efx = tx_queue->efx; 311 struct efx_nic *efx = tx_queue->efx;
306 unsigned int stop_index, read_ptr; 312 unsigned int stop_index, read_ptr;
@@ -320,7 +326,7 @@ static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
320 } 326 }
321 327
322 efx_dequeue_buffer(tx_queue, buffer); 328 efx_dequeue_buffer(tx_queue, buffer);
323 buffer->continuation = 1; 329 buffer->continuation = true;
324 buffer->len = 0; 330 buffer->len = 0;
325 331
326 ++tx_queue->read_count; 332 ++tx_queue->read_count;
@@ -367,8 +373,15 @@ inline int efx_xmit(struct efx_nic *efx,
367 */ 373 */
368int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 374int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
369{ 375{
370 struct efx_nic *efx = net_dev->priv; 376 struct efx_nic *efx = netdev_priv(net_dev);
371 return efx_xmit(efx, &efx->tx_queue[0], skb); 377 struct efx_tx_queue *tx_queue;
378
379 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
380 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM];
381 else
382 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
383
384 return efx_xmit(efx, tx_queue, skb);
372} 385}
373 386
374void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 387void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -412,30 +425,25 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
412 /* Allocate software ring */ 425 /* Allocate software ring */
413 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); 426 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
414 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 427 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
415 if (!tx_queue->buffer) { 428 if (!tx_queue->buffer)
416 rc = -ENOMEM; 429 return -ENOMEM;
417 goto fail1;
418 }
419 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 430 for (i = 0; i <= efx->type->txd_ring_mask; ++i)
420 tx_queue->buffer[i].continuation = 1; 431 tx_queue->buffer[i].continuation = true;
421 432
422 /* Allocate hardware ring */ 433 /* Allocate hardware ring */
423 rc = falcon_probe_tx(tx_queue); 434 rc = falcon_probe_tx(tx_queue);
424 if (rc) 435 if (rc)
425 goto fail2; 436 goto fail;
426 437
427 return 0; 438 return 0;
428 439
429 fail2: 440 fail:
430 kfree(tx_queue->buffer); 441 kfree(tx_queue->buffer);
431 tx_queue->buffer = NULL; 442 tx_queue->buffer = NULL;
432 fail1:
433 tx_queue->used = 0;
434
435 return rc; 443 return rc;
436} 444}
437 445
438int efx_init_tx_queue(struct efx_tx_queue *tx_queue) 446void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
439{ 447{
440 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue); 448 EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
441 449
@@ -446,7 +454,7 @@ int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
446 BUG_ON(tx_queue->stopped); 454 BUG_ON(tx_queue->stopped);
447 455
448 /* Set up TX descriptor ring */ 456 /* Set up TX descriptor ring */
449 return falcon_init_tx(tx_queue); 457 falcon_init_tx(tx_queue);
450} 458}
451 459
452void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) 460void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -461,7 +469,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
461 buffer = &tx_queue->buffer[tx_queue->read_count & 469 buffer = &tx_queue->buffer[tx_queue->read_count &
462 tx_queue->efx->type->txd_ring_mask]; 470 tx_queue->efx->type->txd_ring_mask];
463 efx_dequeue_buffer(tx_queue, buffer); 471 efx_dequeue_buffer(tx_queue, buffer);
464 buffer->continuation = 1; 472 buffer->continuation = true;
465 buffer->len = 0; 473 buffer->len = 0;
466 474
467 ++tx_queue->read_count; 475 ++tx_queue->read_count;
@@ -494,7 +502,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
494 502
495 kfree(tx_queue->buffer); 503 kfree(tx_queue->buffer);
496 tx_queue->buffer = NULL; 504 tx_queue->buffer = NULL;
497 tx_queue->used = 0;
498} 505}
499 506
500 507
@@ -509,7 +516,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
509/* Number of bytes inserted at the start of a TSO header buffer, 516/* Number of bytes inserted at the start of a TSO header buffer,
510 * similar to NET_IP_ALIGN. 517 * similar to NET_IP_ALIGN.
511 */ 518 */
512#if defined(__i386__) || defined(__x86_64__) 519#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
513#define TSOH_OFFSET 0 520#define TSOH_OFFSET 0
514#else 521#else
515#define TSOH_OFFSET NET_IP_ALIGN 522#define TSOH_OFFSET NET_IP_ALIGN
@@ -533,47 +540,37 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
533 540
534/** 541/**
535 * struct tso_state - TSO state for an SKB 542 * struct tso_state - TSO state for an SKB
536 * @remaining_len: Bytes of data we've yet to segment 543 * @out_len: Remaining length in current segment
537 * @seqnum: Current sequence number 544 * @seqnum: Current sequence number
545 * @ipv4_id: Current IPv4 ID, host endian
538 * @packet_space: Remaining space in current packet 546 * @packet_space: Remaining space in current packet
539 * @ifc: Input fragment cursor. 547 * @dma_addr: DMA address of current position
540 * Where we are in the current fragment of the incoming SKB. These 548 * @in_len: Remaining length in current SKB fragment
541 * values get updated in place when we split a fragment over 549 * @unmap_len: Length of SKB fragment
542 * multiple packets. 550 * @unmap_addr: DMA address of SKB fragment
543 * @p: Parameters. 551 * @unmap_single: DMA single vs page mapping flag
544 * These values are set once at the start of the TSO send and do 552 * @header_len: Number of bytes of header
545 * not get changed as the routine progresses. 553 * @full_packet_size: Number of bytes to put in each outgoing segment
546 * 554 *
547 * The state used during segmentation. It is put into this data structure 555 * The state used during segmentation. It is put into this data structure
548 * just to make it easy to pass into inline functions. 556 * just to make it easy to pass into inline functions.
549 */ 557 */
550struct tso_state { 558struct tso_state {
551 unsigned remaining_len; 559 /* Output position */
560 unsigned out_len;
552 unsigned seqnum; 561 unsigned seqnum;
562 unsigned ipv4_id;
553 unsigned packet_space; 563 unsigned packet_space;
554 564
555 struct { 565 /* Input position */
556 /* DMA address of current position */ 566 dma_addr_t dma_addr;
557 dma_addr_t dma_addr; 567 unsigned in_len;
558 /* Remaining length */ 568 unsigned unmap_len;
559 unsigned int len; 569 dma_addr_t unmap_addr;
560 /* DMA address and length of the whole fragment */ 570 bool unmap_single;
561 unsigned int unmap_len; 571
562 dma_addr_t unmap_addr; 572 unsigned header_len;
563 struct page *page; 573 int full_packet_size;
564 unsigned page_off;
565 } ifc;
566
567 struct {
568 /* The number of bytes of header */
569 unsigned int header_length;
570
571 /* The number of bytes to put in each outgoing segment. */
572 int full_packet_size;
573
574 /* Current IPv4 ID, host endian. */
575 unsigned ipv4_id;
576 } p;
577}; 574};
578 575
579 576
@@ -581,11 +578,24 @@ struct tso_state {
581 * Verify that our various assumptions about sk_buffs and the conditions 578 * Verify that our various assumptions about sk_buffs and the conditions
582 * under which TSO will be attempted hold true. 579 * under which TSO will be attempted hold true.
583 */ 580 */
584static inline void efx_tso_check_safe(const struct sk_buff *skb) 581static void efx_tso_check_safe(struct sk_buff *skb)
585{ 582{
586 EFX_BUG_ON_PARANOID(skb->protocol != htons(ETH_P_IP)); 583 __be16 protocol = skb->protocol;
584
587 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != 585 EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
588 skb->protocol); 586 protocol);
587 if (protocol == htons(ETH_P_8021Q)) {
588 /* Find the encapsulated protocol; reset network header
589 * and transport header based on that. */
590 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
591 protocol = veh->h_vlan_encapsulated_proto;
592 skb_set_network_header(skb, sizeof(*veh));
593 if (protocol == htons(ETH_P_IP))
594 skb_set_transport_header(skb, sizeof(*veh) +
595 4 * ip_hdr(skb)->ihl);
596 }
597
598 EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IP));
589 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); 599 EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP);
590 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) 600 EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data)
591 + (tcp_hdr(skb)->doff << 2u)) > 601 + (tcp_hdr(skb)->doff << 2u)) >
@@ -685,18 +695,14 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
685 * @tx_queue: Efx TX queue 695 * @tx_queue: Efx TX queue
686 * @dma_addr: DMA address of fragment 696 * @dma_addr: DMA address of fragment
687 * @len: Length of fragment 697 * @len: Length of fragment
688 * @skb: Only non-null for end of last segment 698 * @final_buffer: The final buffer inserted into the queue
689 * @end_of_packet: True if last fragment in a packet
690 * @unmap_addr: DMA address of fragment for unmapping
691 * @unmap_len: Only set this in last segment of a fragment
692 * 699 *
693 * Push descriptors onto the TX queue. Return 0 on success or 1 if 700 * Push descriptors onto the TX queue. Return 0 on success or 1 if
694 * @tx_queue full. 701 * @tx_queue full.
695 */ 702 */
696static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, 703static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
697 dma_addr_t dma_addr, unsigned len, 704 dma_addr_t dma_addr, unsigned len,
698 const struct sk_buff *skb, int end_of_packet, 705 struct efx_tx_buffer **final_buffer)
699 dma_addr_t unmap_addr, unsigned unmap_len)
700{ 706{
701 struct efx_tx_buffer *buffer; 707 struct efx_tx_buffer *buffer;
702 struct efx_nic *efx = tx_queue->efx; 708 struct efx_nic *efx = tx_queue->efx;
@@ -724,8 +730,10 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
724 fill_level = (tx_queue->insert_count 730 fill_level = (tx_queue->insert_count
725 - tx_queue->old_read_count); 731 - tx_queue->old_read_count);
726 q_space = efx->type->txd_ring_mask - 1 - fill_level; 732 q_space = efx->type->txd_ring_mask - 1 - fill_level;
727 if (unlikely(q_space-- <= 0)) 733 if (unlikely(q_space-- <= 0)) {
734 *final_buffer = NULL;
728 return 1; 735 return 1;
736 }
729 smp_mb(); 737 smp_mb();
730 --tx_queue->stopped; 738 --tx_queue->stopped;
731 } 739 }
@@ -742,7 +750,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
742 EFX_BUG_ON_PARANOID(buffer->len); 750 EFX_BUG_ON_PARANOID(buffer->len);
743 EFX_BUG_ON_PARANOID(buffer->unmap_len); 751 EFX_BUG_ON_PARANOID(buffer->unmap_len);
744 EFX_BUG_ON_PARANOID(buffer->skb); 752 EFX_BUG_ON_PARANOID(buffer->skb);
745 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 753 EFX_BUG_ON_PARANOID(!buffer->continuation);
746 EFX_BUG_ON_PARANOID(buffer->tsoh); 754 EFX_BUG_ON_PARANOID(buffer->tsoh);
747 755
748 buffer->dma_addr = dma_addr; 756 buffer->dma_addr = dma_addr;
@@ -765,10 +773,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
765 773
766 EFX_BUG_ON_PARANOID(!len); 774 EFX_BUG_ON_PARANOID(!len);
767 buffer->len = len; 775 buffer->len = len;
768 buffer->skb = skb; 776 *final_buffer = buffer;
769 buffer->continuation = !end_of_packet;
770 buffer->unmap_addr = unmap_addr;
771 buffer->unmap_len = unmap_len;
772 return 0; 777 return 0;
773} 778}
774 779
@@ -780,8 +785,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
780 * a single fragment, and we know it doesn't cross a page boundary. It 785 * a single fragment, and we know it doesn't cross a page boundary. It
781 * also allows us to not worry about end-of-packet etc. 786 * also allows us to not worry about end-of-packet etc.
782 */ 787 */
783static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue, 788static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
784 struct efx_tso_header *tsoh, unsigned len) 789 struct efx_tso_header *tsoh, unsigned len)
785{ 790{
786 struct efx_tx_buffer *buffer; 791 struct efx_tx_buffer *buffer;
787 792
@@ -791,7 +796,7 @@ static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
791 EFX_BUG_ON_PARANOID(buffer->len); 796 EFX_BUG_ON_PARANOID(buffer->len);
792 EFX_BUG_ON_PARANOID(buffer->unmap_len); 797 EFX_BUG_ON_PARANOID(buffer->unmap_len);
793 EFX_BUG_ON_PARANOID(buffer->skb); 798 EFX_BUG_ON_PARANOID(buffer->skb);
794 EFX_BUG_ON_PARANOID(buffer->continuation != 1); 799 EFX_BUG_ON_PARANOID(!buffer->continuation);
795 EFX_BUG_ON_PARANOID(buffer->tsoh); 800 EFX_BUG_ON_PARANOID(buffer->tsoh);
796 buffer->len = len; 801 buffer->len = len;
797 buffer->dma_addr = tsoh->dma_addr; 802 buffer->dma_addr = tsoh->dma_addr;
@@ -805,6 +810,7 @@ static inline void efx_tso_put_header(struct efx_tx_queue *tx_queue,
805static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) 810static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
806{ 811{
807 struct efx_tx_buffer *buffer; 812 struct efx_tx_buffer *buffer;
813 dma_addr_t unmap_addr;
808 814
809 /* Work backwards until we hit the original insert pointer value */ 815 /* Work backwards until we hit the original insert pointer value */
810 while (tx_queue->insert_count != tx_queue->write_count) { 816 while (tx_queue->insert_count != tx_queue->write_count) {
@@ -814,11 +820,18 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
814 efx_tsoh_free(tx_queue, buffer); 820 efx_tsoh_free(tx_queue, buffer);
815 EFX_BUG_ON_PARANOID(buffer->skb); 821 EFX_BUG_ON_PARANOID(buffer->skb);
816 buffer->len = 0; 822 buffer->len = 0;
817 buffer->continuation = 1; 823 buffer->continuation = true;
818 if (buffer->unmap_len) { 824 if (buffer->unmap_len) {
819 pci_unmap_page(tx_queue->efx->pci_dev, 825 unmap_addr = (buffer->dma_addr + buffer->len -
820 buffer->unmap_addr, 826 buffer->unmap_len);
821 buffer->unmap_len, PCI_DMA_TODEVICE); 827 if (buffer->unmap_single)
828 pci_unmap_single(tx_queue->efx->pci_dev,
829 unmap_addr, buffer->unmap_len,
830 PCI_DMA_TODEVICE);
831 else
832 pci_unmap_page(tx_queue->efx->pci_dev,
833 unmap_addr, buffer->unmap_len,
834 PCI_DMA_TODEVICE);
822 buffer->unmap_len = 0; 835 buffer->unmap_len = 0;
823 } 836 }
824 } 837 }
@@ -826,50 +839,57 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
826 839
827 840
828/* Parse the SKB header and initialise state. */ 841/* Parse the SKB header and initialise state. */
829static inline void tso_start(struct tso_state *st, const struct sk_buff *skb) 842static void tso_start(struct tso_state *st, const struct sk_buff *skb)
830{ 843{
831 /* All ethernet/IP/TCP headers combined size is TCP header size 844 /* All ethernet/IP/TCP headers combined size is TCP header size
832 * plus offset of TCP header relative to start of packet. 845 * plus offset of TCP header relative to start of packet.
833 */ 846 */
834 st->p.header_length = ((tcp_hdr(skb)->doff << 2u) 847 st->header_len = ((tcp_hdr(skb)->doff << 2u)
835 + PTR_DIFF(tcp_hdr(skb), skb->data)); 848 + PTR_DIFF(tcp_hdr(skb), skb->data));
836 st->p.full_packet_size = (st->p.header_length 849 st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
837 + skb_shinfo(skb)->gso_size);
838 850
839 st->p.ipv4_id = ntohs(ip_hdr(skb)->id); 851 st->ipv4_id = ntohs(ip_hdr(skb)->id);
840 st->seqnum = ntohl(tcp_hdr(skb)->seq); 852 st->seqnum = ntohl(tcp_hdr(skb)->seq);
841 853
842 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); 854 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
843 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); 855 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
844 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); 856 EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
845 857
846 st->packet_space = st->p.full_packet_size; 858 st->packet_space = st->full_packet_size;
847 st->remaining_len = skb->len - st->p.header_length; 859 st->out_len = skb->len - st->header_len;
860 st->unmap_len = 0;
861 st->unmap_single = false;
848} 862}
849 863
850 864static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
851/** 865 skb_frag_t *frag)
852 * tso_get_fragment - record fragment details and map for DMA
853 * @st: TSO state
854 * @efx: Efx NIC
855 * @data: Pointer to fragment data
856 * @len: Length of fragment
857 *
858 * Record fragment details and map for DMA. Return 0 on success, or
859 * -%ENOMEM if DMA mapping fails.
860 */
861static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
862 int len, struct page *page, int page_off)
863{ 866{
867 st->unmap_addr = pci_map_page(efx->pci_dev, frag->page,
868 frag->page_offset, frag->size,
869 PCI_DMA_TODEVICE);
870 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
871 st->unmap_single = false;
872 st->unmap_len = frag->size;
873 st->in_len = frag->size;
874 st->dma_addr = st->unmap_addr;
875 return 0;
876 }
877 return -ENOMEM;
878}
864 879
865 st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, 880static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
866 len, PCI_DMA_TODEVICE); 881 const struct sk_buff *skb)
867 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { 882{
868 st->ifc.unmap_len = len; 883 int hl = st->header_len;
869 st->ifc.len = len; 884 int len = skb_headlen(skb) - hl;
870 st->ifc.dma_addr = st->ifc.unmap_addr; 885
871 st->ifc.page = page; 886 st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
872 st->ifc.page_off = page_off; 887 len, PCI_DMA_TODEVICE);
888 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
889 st->unmap_single = true;
890 st->unmap_len = len;
891 st->in_len = len;
892 st->dma_addr = st->unmap_addr;
873 return 0; 893 return 0;
874 } 894 }
875 return -ENOMEM; 895 return -ENOMEM;
@@ -886,36 +906,45 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
886 * of fragment or end-of-packet. Return 0 on success, 1 if not enough 906 * of fragment or end-of-packet. Return 0 on success, 1 if not enough
887 * space in @tx_queue. 907 * space in @tx_queue.
888 */ 908 */
889static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, 909static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
890 const struct sk_buff *skb, 910 const struct sk_buff *skb,
891 struct tso_state *st) 911 struct tso_state *st)
892{ 912{
893 913 struct efx_tx_buffer *buffer;
894 int n, end_of_packet, rc; 914 int n, end_of_packet, rc;
895 915
896 if (st->ifc.len == 0) 916 if (st->in_len == 0)
897 return 0; 917 return 0;
898 if (st->packet_space == 0) 918 if (st->packet_space == 0)
899 return 0; 919 return 0;
900 920
901 EFX_BUG_ON_PARANOID(st->ifc.len <= 0); 921 EFX_BUG_ON_PARANOID(st->in_len <= 0);
902 EFX_BUG_ON_PARANOID(st->packet_space <= 0); 922 EFX_BUG_ON_PARANOID(st->packet_space <= 0);
903 923
904 n = min(st->ifc.len, st->packet_space); 924 n = min(st->in_len, st->packet_space);
905 925
906 st->packet_space -= n; 926 st->packet_space -= n;
907 st->remaining_len -= n; 927 st->out_len -= n;
908 st->ifc.len -= n; 928 st->in_len -= n;
909 st->ifc.page_off += n; 929
910 end_of_packet = st->remaining_len == 0 || st->packet_space == 0; 930 rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
911 931 if (likely(rc == 0)) {
912 rc = efx_tx_queue_insert(tx_queue, st->ifc.dma_addr, n, 932 if (st->out_len == 0)
913 st->remaining_len ? NULL : skb, 933 /* Transfer ownership of the skb */
914 end_of_packet, st->ifc.unmap_addr, 934 buffer->skb = skb;
915 st->ifc.len ? 0 : st->ifc.unmap_len); 935
916 936 end_of_packet = st->out_len == 0 || st->packet_space == 0;
917 st->ifc.dma_addr += n; 937 buffer->continuation = !end_of_packet;
938
939 if (st->in_len == 0) {
940 /* Transfer ownership of the pci mapping */
941 buffer->unmap_len = st->unmap_len;
942 buffer->unmap_single = st->unmap_single;
943 st->unmap_len = 0;
944 }
945 }
918 946
947 st->dma_addr += n;
919 return rc; 948 return rc;
920} 949}
921 950
@@ -929,9 +958,9 @@ static inline int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
929 * Generate a new header and prepare for the new packet. Return 0 on 958 * Generate a new header and prepare for the new packet. Return 0 on
930 * success, or -1 if failed to alloc header. 959 * success, or -1 if failed to alloc header.
931 */ 960 */
932static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, 961static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
933 const struct sk_buff *skb, 962 const struct sk_buff *skb,
934 struct tso_state *st) 963 struct tso_state *st)
935{ 964{
936 struct efx_tso_header *tsoh; 965 struct efx_tso_header *tsoh;
937 struct iphdr *tsoh_iph; 966 struct iphdr *tsoh_iph;
@@ -940,7 +969,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
940 u8 *header; 969 u8 *header;
941 970
942 /* Allocate a DMA-mapped header buffer. */ 971 /* Allocate a DMA-mapped header buffer. */
943 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 972 if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) {
944 if (tx_queue->tso_headers_free == NULL) { 973 if (tx_queue->tso_headers_free == NULL) {
945 if (efx_tsoh_block_alloc(tx_queue)) 974 if (efx_tsoh_block_alloc(tx_queue))
946 return -1; 975 return -1;
@@ -951,7 +980,7 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
951 tsoh->unmap_len = 0; 980 tsoh->unmap_len = 0;
952 } else { 981 } else {
953 tx_queue->tso_long_headers++; 982 tx_queue->tso_long_headers++;
954 tsoh = efx_tsoh_heap_alloc(tx_queue, st->p.header_length); 983 tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
955 if (unlikely(!tsoh)) 984 if (unlikely(!tsoh))
956 return -1; 985 return -1;
957 } 986 }
@@ -961,33 +990,32 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
961 tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); 990 tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb));
962 991
963 /* Copy and update the headers. */ 992 /* Copy and update the headers. */
964 memcpy(header, skb->data, st->p.header_length); 993 memcpy(header, skb->data, st->header_len);
965 994
966 tsoh_th->seq = htonl(st->seqnum); 995 tsoh_th->seq = htonl(st->seqnum);
967 st->seqnum += skb_shinfo(skb)->gso_size; 996 st->seqnum += skb_shinfo(skb)->gso_size;
968 if (st->remaining_len > skb_shinfo(skb)->gso_size) { 997 if (st->out_len > skb_shinfo(skb)->gso_size) {
969 /* This packet will not finish the TSO burst. */ 998 /* This packet will not finish the TSO burst. */
970 ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb); 999 ip_length = st->full_packet_size - ETH_HDR_LEN(skb);
971 tsoh_th->fin = 0; 1000 tsoh_th->fin = 0;
972 tsoh_th->psh = 0; 1001 tsoh_th->psh = 0;
973 } else { 1002 } else {
974 /* This packet will be the last in the TSO burst. */ 1003 /* This packet will be the last in the TSO burst. */
975 ip_length = (st->p.header_length - ETH_HDR_LEN(skb) 1004 ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len;
976 + st->remaining_len);
977 tsoh_th->fin = tcp_hdr(skb)->fin; 1005 tsoh_th->fin = tcp_hdr(skb)->fin;
978 tsoh_th->psh = tcp_hdr(skb)->psh; 1006 tsoh_th->psh = tcp_hdr(skb)->psh;
979 } 1007 }
980 tsoh_iph->tot_len = htons(ip_length); 1008 tsoh_iph->tot_len = htons(ip_length);
981 1009
982 /* Linux leaves suitable gaps in the IP ID space for us to fill. */ 1010 /* Linux leaves suitable gaps in the IP ID space for us to fill. */
983 tsoh_iph->id = htons(st->p.ipv4_id); 1011 tsoh_iph->id = htons(st->ipv4_id);
984 st->p.ipv4_id++; 1012 st->ipv4_id++;
985 1013
986 st->packet_space = skb_shinfo(skb)->gso_size; 1014 st->packet_space = skb_shinfo(skb)->gso_size;
987 ++tx_queue->tso_packets; 1015 ++tx_queue->tso_packets;
988 1016
989 /* Form a descriptor for this header. */ 1017 /* Form a descriptor for this header. */
990 efx_tso_put_header(tx_queue, tsoh, st->p.header_length); 1018 efx_tso_put_header(tx_queue, tsoh, st->header_len);
991 1019
992 return 0; 1020 return 0;
993} 1021}
@@ -1005,11 +1033,11 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
1005 * %NETDEV_TX_OK or %NETDEV_TX_BUSY. 1033 * %NETDEV_TX_OK or %NETDEV_TX_BUSY.
1006 */ 1034 */
1007static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, 1035static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1008 const struct sk_buff *skb) 1036 struct sk_buff *skb)
1009{ 1037{
1038 struct efx_nic *efx = tx_queue->efx;
1010 int frag_i, rc, rc2 = NETDEV_TX_OK; 1039 int frag_i, rc, rc2 = NETDEV_TX_OK;
1011 struct tso_state state; 1040 struct tso_state state;
1012 skb_frag_t *f;
1013 1041
1014 /* Verify TSO is safe - these checks should never fail. */ 1042 /* Verify TSO is safe - these checks should never fail. */
1015 efx_tso_check_safe(skb); 1043 efx_tso_check_safe(skb);
@@ -1021,29 +1049,16 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1021 /* Assume that skb header area contains exactly the headers, and 1049 /* Assume that skb header area contains exactly the headers, and
1022 * all payload is in the frag list. 1050 * all payload is in the frag list.
1023 */ 1051 */
1024 if (skb_headlen(skb) == state.p.header_length) { 1052 if (skb_headlen(skb) == state.header_len) {
1025 /* Grab the first payload fragment. */ 1053 /* Grab the first payload fragment. */
1026 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); 1054 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
1027 frag_i = 0; 1055 frag_i = 0;
1028 f = &skb_shinfo(skb)->frags[frag_i]; 1056 rc = tso_get_fragment(&state, efx,
1029 rc = tso_get_fragment(&state, tx_queue->efx, 1057 skb_shinfo(skb)->frags + frag_i);
1030 f->size, f->page, f->page_offset);
1031 if (rc) 1058 if (rc)
1032 goto mem_err; 1059 goto mem_err;
1033 } else { 1060 } else {
1034 /* It may look like this code fragment assumes that the 1061 rc = tso_get_head_fragment(&state, efx, skb);
1035 * skb->data portion does not cross a page boundary, but
1036 * that is not the case. It is guaranteed to be direct
1037 * mapped memory, and therefore is physically contiguous,
1038 * and so DMA will work fine. kmap_atomic() on this region
1039 * will just return the direct mapping, so that will work
1040 * too.
1041 */
1042 int page_off = (unsigned long)skb->data & (PAGE_SIZE - 1);
1043 int hl = state.p.header_length;
1044 rc = tso_get_fragment(&state, tx_queue->efx,
1045 skb_headlen(skb) - hl,
1046 virt_to_page(skb->data), page_off + hl);
1047 if (rc) 1062 if (rc)
1048 goto mem_err; 1063 goto mem_err;
1049 frag_i = -1; 1064 frag_i = -1;
@@ -1058,13 +1073,12 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1058 goto stop; 1073 goto stop;
1059 1074
1060 /* Move onto the next fragment? */ 1075 /* Move onto the next fragment? */
1061 if (state.ifc.len == 0) { 1076 if (state.in_len == 0) {
1062 if (++frag_i >= skb_shinfo(skb)->nr_frags) 1077 if (++frag_i >= skb_shinfo(skb)->nr_frags)
1063 /* End of payload reached. */ 1078 /* End of payload reached. */
1064 break; 1079 break;
1065 f = &skb_shinfo(skb)->frags[frag_i]; 1080 rc = tso_get_fragment(&state, efx,
1066 rc = tso_get_fragment(&state, tx_queue->efx, 1081 skb_shinfo(skb)->frags + frag_i);
1067 f->size, f->page, f->page_offset);
1068 if (rc) 1082 if (rc)
1069 goto mem_err; 1083 goto mem_err;
1070 } 1084 }
@@ -1082,8 +1096,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1082 return NETDEV_TX_OK; 1096 return NETDEV_TX_OK;
1083 1097
1084 mem_err: 1098 mem_err:
1085 EFX_ERR(tx_queue->efx, "Out of memory for TSO headers, or PCI mapping" 1099 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
1086 " error\n");
1087 dev_kfree_skb_any((struct sk_buff *)skb); 1100 dev_kfree_skb_any((struct sk_buff *)skb);
1088 goto unwind; 1101 goto unwind;
1089 1102
@@ -1092,9 +1105,19 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1092 1105
1093 /* Stop the queue if it wasn't stopped before. */ 1106 /* Stop the queue if it wasn't stopped before. */
1094 if (tx_queue->stopped == 1) 1107 if (tx_queue->stopped == 1)
1095 efx_stop_queue(tx_queue->efx); 1108 efx_stop_queue(efx);
1096 1109
1097 unwind: 1110 unwind:
1111 /* Free the DMA mapping we were in the process of writing out */
1112 if (state.unmap_len) {
1113 if (state.unmap_single)
1114 pci_unmap_single(efx->pci_dev, state.unmap_addr,
1115 state.unmap_len, PCI_DMA_TODEVICE);
1116 else
1117 pci_unmap_page(efx->pci_dev, state.unmap_addr,
1118 state.unmap_len, PCI_DMA_TODEVICE);
1119 }
1120
1098 efx_enqueue_unwind(tx_queue); 1121 efx_enqueue_unwind(tx_queue);
1099 return rc2; 1122 return rc2;
1100} 1123}
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
index 1526a73b4b51..5e1cc234e42f 100644
--- a/drivers/net/sfc/tx.h
+++ b/drivers/net/sfc/tx.h
@@ -15,7 +15,7 @@
15 15
16int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); 16int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
17void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); 17void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
18int efx_init_tx_queue(struct efx_tx_queue *tx_queue); 18void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
19void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); 19void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
20 20
21int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); 21int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index 35ab19c27f8d..fa7b49d69288 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -20,14 +20,10 @@
20 20
21/* XAUI resets if link not detected */ 21/* XAUI resets if link not detected */
22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
23/* SNAP frames have TOBE_DISC set */
24#define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS
25/* RX PCIe double split performance issue */ 23/* RX PCIe double split performance issue */
26#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS 24#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
27/* TX pkt parser problem with <= 16 byte TXes */ 25/* TX pkt parser problem with <= 16 byte TXes */
28#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS 26#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
29/* XGXS and XAUI reset sequencing in SW */
30#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
31/* Low rate CRC errors require XAUI reset */ 27/* Low rate CRC errors require XAUI reset */
32#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS 28#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
33/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor 29/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index f3684ad28887..276151df3a70 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -40,7 +40,7 @@ void xfp_set_led(struct efx_nic *p, int led, int mode)
40} 40}
41 41
42struct xfp_phy_data { 42struct xfp_phy_data {
43 int tx_disabled; 43 enum efx_phy_mode phy_mode;
44}; 44};
45 45
46#define XFP_MAX_RESET_TIME 500 46#define XFP_MAX_RESET_TIME 500
@@ -93,7 +93,7 @@ static int xfp_phy_init(struct efx_nic *efx)
93 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), 93 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
94 MDIO_ID_REV(devid)); 94 MDIO_ID_REV(devid));
95 95
96 phy_data->tx_disabled = efx->tx_disabled; 96 phy_data->phy_mode = efx->phy_mode;
97 97
98 rc = xfp_reset_phy(efx); 98 rc = xfp_reset_phy(efx);
99 99
@@ -136,13 +136,14 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
136 struct xfp_phy_data *phy_data = efx->phy_data; 136 struct xfp_phy_data *phy_data = efx->phy_data;
137 137
138 /* Reset the PHY when moving from tx off to tx on */ 138 /* Reset the PHY when moving from tx off to tx on */
139 if (phy_data->tx_disabled && !efx->tx_disabled) 139 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
140 (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
140 xfp_reset_phy(efx); 141 xfp_reset_phy(efx);
141 142
142 mdio_clause45_transmit_disable(efx); 143 mdio_clause45_transmit_disable(efx);
143 mdio_clause45_phy_reconfigure(efx); 144 mdio_clause45_phy_reconfigure(efx);
144 145
145 phy_data->tx_disabled = efx->tx_disabled; 146 phy_data->phy_mode = efx->phy_mode;
146 efx->link_up = xfp_link_ok(efx); 147 efx->link_up = xfp_link_ok(efx);
147 efx->link_options = GM_LPA_10000FULL; 148 efx->link_options = GM_LPA_10000FULL;
148} 149}
@@ -151,7 +152,7 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
151static void xfp_phy_fini(struct efx_nic *efx) 152static void xfp_phy_fini(struct efx_nic *efx)
152{ 153{
153 /* Clobber the LED if it was blinking */ 154 /* Clobber the LED if it was blinking */
154 efx->board_info.blink(efx, 0); 155 efx->board_info.blink(efx, false);
155 156
156 /* Free the context block */ 157 /* Free the context block */
157 kfree(efx->phy_data); 158 kfree(efx->phy_data);
@@ -164,7 +165,6 @@ struct efx_phy_operations falcon_xfp_phy_ops = {
164 .check_hw = xfp_phy_check_hw, 165 .check_hw = xfp_phy_check_hw,
165 .fini = xfp_phy_fini, 166 .fini = xfp_phy_fini,
166 .clear_interrupt = xfp_phy_clear_interrupt, 167 .clear_interrupt = xfp_phy_clear_interrupt,
167 .reset_xaui = efx_port_dummy_op_void,
168 .mmds = XFP_REQUIRED_DEVS, 168 .mmds = XFP_REQUIRED_DEVS,
169 .loopbacks = XFP_LOOPBACKS, 169 .loopbacks = XFP_LOOPBACKS,
170}; 170};
diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c
index ea85de918233..79e665e0853d 100644
--- a/drivers/net/skfp/pmf.c
+++ b/drivers/net/skfp/pmf.c
@@ -44,17 +44,10 @@ static SMbuf *smt_build_pmf_response(struct s_smc *smc, struct smt_header *req,
44 int set, int local); 44 int set, int local);
45static int port_to_mib(struct s_smc *smc, int p); 45static int port_to_mib(struct s_smc *smc, int p);
46 46
47#define MOFFSS(e) ((int)&(((struct fddi_mib *)0)->e)) 47#define MOFFSS(e) offsetof(struct fddi_mib, e)
48#define MOFFSA(e) ((int) (((struct fddi_mib *)0)->e)) 48#define MOFFMS(e) offsetof(struct fddi_mib_m, e)
49 49#define MOFFAS(e) offsetof(struct fddi_mib_a, e)
50#define MOFFMS(e) ((int)&(((struct fddi_mib_m *)0)->e)) 50#define MOFFPS(e) offsetof(struct fddi_mib_p, e)
51#define MOFFMA(e) ((int) (((struct fddi_mib_m *)0)->e))
52
53#define MOFFAS(e) ((int)&(((struct fddi_mib_a *)0)->e))
54#define MOFFAA(e) ((int) (((struct fddi_mib_a *)0)->e))
55
56#define MOFFPS(e) ((int)&(((struct fddi_mib_p *)0)->e))
57#define MOFFPA(e) ((int) (((struct fddi_mib_p *)0)->e))
58 51
59 52
60#define AC_G 0x01 /* Get */ 53#define AC_G 0x01 /* Get */
@@ -87,8 +80,8 @@ static const struct s_p_tab {
87 { SMT_P100D,AC_G, MOFFSS(fddiSMTOpVersionId), "S" } , 80 { SMT_P100D,AC_G, MOFFSS(fddiSMTOpVersionId), "S" } ,
88 { SMT_P100E,AC_G, MOFFSS(fddiSMTHiVersionId), "S" } , 81 { SMT_P100E,AC_G, MOFFSS(fddiSMTHiVersionId), "S" } ,
89 { SMT_P100F,AC_G, MOFFSS(fddiSMTLoVersionId), "S" } , 82 { SMT_P100F,AC_G, MOFFSS(fddiSMTLoVersionId), "S" } ,
90 { SMT_P1010,AC_G, MOFFSA(fddiSMTManufacturerData), "D" } , 83 { SMT_P1010,AC_G, MOFFSS(fddiSMTManufacturerData), "D" } ,
91 { SMT_P1011,AC_GR, MOFFSA(fddiSMTUserData), "D" } , 84 { SMT_P1011,AC_GR, MOFFSS(fddiSMTUserData), "D" } ,
92 { SMT_P1012,AC_G, MOFFSS(fddiSMTMIBVersionId), "S" } , 85 { SMT_P1012,AC_G, MOFFSS(fddiSMTMIBVersionId), "S" } ,
93 86
94 /* StationConfigGrp */ 87 /* StationConfigGrp */
@@ -103,7 +96,7 @@ static const struct s_p_tab {
103 { SMT_P101D,AC_GR, MOFFSS(fddiSMTTT_Notify), "wS" } , 96 { SMT_P101D,AC_GR, MOFFSS(fddiSMTTT_Notify), "wS" } ,
104 { SMT_P101E,AC_GR, MOFFSS(fddiSMTStatRptPolicy), "bB" } , 97 { SMT_P101E,AC_GR, MOFFSS(fddiSMTStatRptPolicy), "bB" } ,
105 { SMT_P101F,AC_GR, MOFFSS(fddiSMTTrace_MaxExpiration),"lL" } , 98 { SMT_P101F,AC_GR, MOFFSS(fddiSMTTrace_MaxExpiration),"lL" } ,
106 { SMT_P1020,AC_G, MOFFSA(fddiSMTPORTIndexes), "II" } , 99 { SMT_P1020,AC_G, MOFFSS(fddiSMTPORTIndexes), "II" } ,
107 { SMT_P1021,AC_G, MOFFSS(fddiSMTMACIndexes), "I" } , 100 { SMT_P1021,AC_G, MOFFSS(fddiSMTMACIndexes), "I" } ,
108 { SMT_P1022,AC_G, MOFFSS(fddiSMTBypassPresent), "F" } , 101 { SMT_P1022,AC_G, MOFFSS(fddiSMTBypassPresent), "F" } ,
109 102
@@ -117,8 +110,8 @@ static const struct s_p_tab {
117 110
118 /* MIBOperationGrp */ 111 /* MIBOperationGrp */
119 { SMT_P1032,AC_GROUP } , 112 { SMT_P1032,AC_GROUP } ,
120 { SMT_P1033,AC_G, MOFFSA(fddiSMTTimeStamp),"P" } , 113 { SMT_P1033,AC_G, MOFFSS(fddiSMTTimeStamp),"P" } ,
121 { SMT_P1034,AC_G, MOFFSA(fddiSMTTransitionTimeStamp),"P" } , 114 { SMT_P1034,AC_G, MOFFSS(fddiSMTTransitionTimeStamp),"P" } ,
122 /* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */ 115 /* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */
123 { SMT_P1035,AC_G, MOFFSS(fddiSMTSetCount),"4P" } , 116 { SMT_P1035,AC_G, MOFFSS(fddiSMTSetCount),"4P" } ,
124 { SMT_P1036,AC_G, MOFFSS(fddiSMTLastSetStationId),"8" } , 117 { SMT_P1036,AC_G, MOFFSS(fddiSMTLastSetStationId),"8" } ,
@@ -129,7 +122,7 @@ static const struct s_p_tab {
129 * PRIVATE EXTENSIONS 122 * PRIVATE EXTENSIONS
130 * only accessible locally to get/set passwd 123 * only accessible locally to get/set passwd
131 */ 124 */
132 { SMT_P10F0,AC_GR, MOFFSA(fddiPRPMFPasswd), "8" } , 125 { SMT_P10F0,AC_GR, MOFFSS(fddiPRPMFPasswd), "8" } ,
133 { SMT_P10F1,AC_GR, MOFFSS(fddiPRPMFStation), "8" } , 126 { SMT_P10F1,AC_GR, MOFFSS(fddiPRPMFStation), "8" } ,
134#ifdef ESS 127#ifdef ESS
135 { SMT_P10F2,AC_GR, MOFFSS(fddiESSPayload), "lL" } , 128 { SMT_P10F2,AC_GR, MOFFSS(fddiESSPayload), "lL" } ,
@@ -245,7 +238,7 @@ static const struct s_p_tab {
245 { SMT_P400E,AC_GR, MOFFPS(fddiPORTConnectionPolicies),"bB" } , 238 { SMT_P400E,AC_GR, MOFFPS(fddiPORTConnectionPolicies),"bB" } ,
246 { SMT_P400F,AC_G, MOFFPS(fddiPORTMacIndicated), "2" } , 239 { SMT_P400F,AC_G, MOFFPS(fddiPORTMacIndicated), "2" } ,
247 { SMT_P4010,AC_G, MOFFPS(fddiPORTCurrentPath), "E" } , 240 { SMT_P4010,AC_G, MOFFPS(fddiPORTCurrentPath), "E" } ,
248 { SMT_P4011,AC_GR, MOFFPA(fddiPORTRequestedPaths), "l4" } , 241 { SMT_P4011,AC_GR, MOFFPS(fddiPORTRequestedPaths), "l4" } ,
249 { SMT_P4012,AC_G, MOFFPS(fddiPORTMACPlacement), "S" } , 242 { SMT_P4012,AC_G, MOFFPS(fddiPORTMACPlacement), "S" } ,
250 { SMT_P4013,AC_G, MOFFPS(fddiPORTAvailablePaths), "B" } , 243 { SMT_P4013,AC_G, MOFFPS(fddiPORTAvailablePaths), "B" } ,
251 { SMT_P4016,AC_G, MOFFPS(fddiPORTPMDClass), "E" } , 244 { SMT_P4016,AC_G, MOFFPS(fddiPORTPMDClass), "E" } ,
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index e24b25ca1c69..3805b9318be7 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -3732,27 +3732,63 @@ static int sky2_get_eeprom_len(struct net_device *dev)
3732 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); 3732 return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
3733} 3733}
3734 3734
3735static u32 sky2_vpd_read(struct sky2_hw *hw, int cap, u16 offset) 3735static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
3736{ 3736{
3737 u32 val; 3737 unsigned long start = jiffies;
3738 3738
3739 sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset); 3739 while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
3740 /* Can take up to 10.6 ms for write */
3741 if (time_after(jiffies, start + HZ/4)) {
3742 dev_err(&hw->pdev->dev, PFX "VPD cycle timed out");
3743 return -ETIMEDOUT;
3744 }
3745 mdelay(1);
3746 }
3740 3747
3741 do { 3748 return 0;
3742 offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); 3749}
3743 } while (!(offset & PCI_VPD_ADDR_F)); 3750
3751static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
3752 u16 offset, size_t length)
3753{
3754 int rc = 0;
3755
3756 while (length > 0) {
3757 u32 val;
3758
3759 sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
3760 rc = sky2_vpd_wait(hw, cap, 0);
3761 if (rc)
3762 break;
3744 3763
3745 val = sky2_pci_read32(hw, cap + PCI_VPD_DATA); 3764 val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
3746 return val; 3765
3766 memcpy(data, &val, min(sizeof(val), length));
3767 offset += sizeof(u32);
3768 data += sizeof(u32);
3769 length -= sizeof(u32);
3770 }
3771
3772 return rc;
3747} 3773}
3748 3774
3749static void sky2_vpd_write(struct sky2_hw *hw, int cap, u16 offset, u32 val) 3775static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
3776 u16 offset, unsigned int length)
3750{ 3777{
3751 sky2_pci_write16(hw, cap + PCI_VPD_DATA, val); 3778 unsigned int i;
3752 sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F); 3779 int rc = 0;
3753 do { 3780
3754 offset = sky2_pci_read16(hw, cap + PCI_VPD_ADDR); 3781 for (i = 0; i < length; i += sizeof(u32)) {
3755 } while (offset & PCI_VPD_ADDR_F); 3782 u32 val = *(u32 *)(data + i);
3783
3784 sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
3785 sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
3786
3787 rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
3788 if (rc)
3789 break;
3790 }
3791 return rc;
3756} 3792}
3757 3793
3758static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 3794static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
@@ -3760,24 +3796,13 @@ static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
3760{ 3796{
3761 struct sky2_port *sky2 = netdev_priv(dev); 3797 struct sky2_port *sky2 = netdev_priv(dev);
3762 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); 3798 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
3763 int length = eeprom->len;
3764 u16 offset = eeprom->offset;
3765 3799
3766 if (!cap) 3800 if (!cap)
3767 return -EINVAL; 3801 return -EINVAL;
3768 3802
3769 eeprom->magic = SKY2_EEPROM_MAGIC; 3803 eeprom->magic = SKY2_EEPROM_MAGIC;
3770 3804
3771 while (length > 0) { 3805 return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
3772 u32 val = sky2_vpd_read(sky2->hw, cap, offset);
3773 int n = min_t(int, length, sizeof(val));
3774
3775 memcpy(data, &val, n);
3776 length -= n;
3777 data += n;
3778 offset += n;
3779 }
3780 return 0;
3781} 3806}
3782 3807
3783static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 3808static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
@@ -3785,8 +3810,6 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
3785{ 3810{
3786 struct sky2_port *sky2 = netdev_priv(dev); 3811 struct sky2_port *sky2 = netdev_priv(dev);
3787 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD); 3812 int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
3788 int length = eeprom->len;
3789 u16 offset = eeprom->offset;
3790 3813
3791 if (!cap) 3814 if (!cap)
3792 return -EINVAL; 3815 return -EINVAL;
@@ -3794,21 +3817,11 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
3794 if (eeprom->magic != SKY2_EEPROM_MAGIC) 3817 if (eeprom->magic != SKY2_EEPROM_MAGIC)
3795 return -EINVAL; 3818 return -EINVAL;
3796 3819
3797 while (length > 0) { 3820 /* Partial writes not supported */
3798 u32 val; 3821 if ((eeprom->offset & 3) || (eeprom->len & 3))
3799 int n = min_t(int, length, sizeof(val)); 3822 return -EINVAL;
3800
3801 if (n < sizeof(val))
3802 val = sky2_vpd_read(sky2->hw, cap, offset);
3803 memcpy(&val, data, n);
3804
3805 sky2_vpd_write(sky2->hw, cap, offset, val);
3806 3823
3807 length -= n; 3824 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
3808 data += n;
3809 offset += n;
3810 }
3811 return 0;
3812} 3825}
3813 3826
3814 3827
@@ -4178,6 +4191,69 @@ static int __devinit pci_wake_enabled(struct pci_dev *dev)
4178 return value & PCI_PM_CTRL_PME_ENABLE; 4191 return value & PCI_PM_CTRL_PME_ENABLE;
4179} 4192}
4180 4193
4194/*
4195 * Read and parse the first part of Vital Product Data
4196 */
4197#define VPD_SIZE 128
4198#define VPD_MAGIC 0x82
4199
4200static void __devinit sky2_vpd_info(struct sky2_hw *hw)
4201{
4202 int cap = pci_find_capability(hw->pdev, PCI_CAP_ID_VPD);
4203 const u8 *p;
4204 u8 *vpd_buf = NULL;
4205 u16 len;
4206 static struct vpd_tag {
4207 char tag[2];
4208 char *label;
4209 } vpd_tags[] = {
4210 { "PN", "Part Number" },
4211 { "EC", "Engineering Level" },
4212 { "MN", "Manufacturer" },
4213 };
4214
4215 if (!cap)
4216 goto out;
4217
4218 vpd_buf = kmalloc(VPD_SIZE, GFP_KERNEL);
4219 if (!vpd_buf)
4220 goto out;
4221
4222 if (sky2_vpd_read(hw, cap, vpd_buf, 0, VPD_SIZE))
4223 goto out;
4224
4225 if (vpd_buf[0] != VPD_MAGIC)
4226 goto out;
4227 len = vpd_buf[1];
4228 if (len == 0 || len > VPD_SIZE - 4)
4229 goto out;
4230 p = vpd_buf + 3;
4231 dev_info(&hw->pdev->dev, "%.*s\n", len, p);
4232 p += len;
4233
4234 while (p < vpd_buf + VPD_SIZE - 4) {
4235 int i;
4236
4237 if (!memcmp("RW", p, 2)) /* end marker */
4238 break;
4239
4240 len = p[2];
4241 if (len > (p - vpd_buf) - 4)
4242 break;
4243
4244 for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
4245 if (!memcmp(vpd_tags[i].tag, p, 2)) {
4246 printk(KERN_DEBUG " %s: %.*s\n",
4247 vpd_tags[i].label, len, p + 3);
4248 break;
4249 }
4250 }
4251 p += len + 3;
4252 }
4253out:
4254 kfree(vpd_buf);
4255}
4256
4181/* This driver supports yukon2 chipset only */ 4257/* This driver supports yukon2 chipset only */
4182static const char *sky2_name(u8 chipid, char *buf, int sz) 4258static const char *sky2_name(u8 chipid, char *buf, int sz)
4183{ 4259{
@@ -4276,13 +4352,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4276 if (err) 4352 if (err)
4277 goto err_out_iounmap; 4353 goto err_out_iounmap;
4278 4354
4279 dev_info(&pdev->dev, "v%s addr 0x%llx irq %d Yukon-2 %s rev %d\n", 4355 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
4280 DRV_VERSION, (unsigned long long)pci_resource_start(pdev, 0), 4356 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
4281 pdev->irq, sky2_name(hw->chip_id, buf1, sizeof(buf1)),
4282 hw->chip_rev);
4283 4357
4284 sky2_reset(hw); 4358 sky2_reset(hw);
4285 4359
4360 sky2_vpd_info(hw);
4361
4286 dev = sky2_init_netdev(hw, 0, using_dac, wol_default); 4362 dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
4287 if (!dev) { 4363 if (!dev) {
4288 err = -ENOMEM; 4364 err = -ENOMEM;
@@ -4533,6 +4609,8 @@ static struct pci_driver sky2_driver = {
4533 4609
4534static int __init sky2_init_module(void) 4610static int __init sky2_init_module(void)
4535{ 4611{
4612 pr_info(PFX "driver version " DRV_VERSION "\n");
4613
4536 sky2_debug_init(); 4614 sky2_debug_init();
4537 return pci_register_driver(&sky2_driver); 4615 return pci_register_driver(&sky2_driver);
4538} 4616}
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index c5871624f972..02cc064c2c8b 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -183,7 +183,7 @@ static void smc911x_reset(struct net_device *dev)
183 unsigned int reg, timeout=0, resets=1; 183 unsigned int reg, timeout=0, resets=1;
184 unsigned long flags; 184 unsigned long flags;
185 185
186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 186 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
187 187
188 /* Take out of PM setting first */ 188 /* Take out of PM setting first */
189 if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { 189 if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) {
@@ -272,7 +272,7 @@ static void smc911x_enable(struct net_device *dev)
272 unsigned mask, cfg, cr; 272 unsigned mask, cfg, cr;
273 unsigned long flags; 273 unsigned long flags;
274 274
275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
276 276
277 SMC_SET_MAC_ADDR(lp, dev->dev_addr); 277 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
278 278
@@ -329,7 +329,7 @@ static void smc911x_shutdown(struct net_device *dev)
329 unsigned cr; 329 unsigned cr;
330 unsigned long flags; 330 unsigned long flags;
331 331
332 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__); 332 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__);
333 333
334 /* Disable IRQ's */ 334 /* Disable IRQ's */
335 SMC_SET_INT_EN(lp, 0); 335 SMC_SET_INT_EN(lp, 0);
@@ -348,7 +348,7 @@ static inline void smc911x_drop_pkt(struct net_device *dev)
348 struct smc911x_local *lp = netdev_priv(dev); 348 struct smc911x_local *lp = netdev_priv(dev);
349 unsigned int fifo_count, timeout, reg; 349 unsigned int fifo_count, timeout, reg;
350 350
351 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__); 351 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__);
352 fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; 352 fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF;
353 if (fifo_count <= 4) { 353 if (fifo_count <= 4) {
354 /* Manually dump the packet data */ 354 /* Manually dump the packet data */
@@ -382,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev)
382 unsigned char *data; 382 unsigned char *data;
383 383
384 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", 384 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
385 dev->name, __FUNCTION__); 385 dev->name, __func__);
386 status = SMC_GET_RX_STS_FIFO(lp); 386 status = SMC_GET_RX_STS_FIFO(lp);
387 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n", 387 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
388 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); 388 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
@@ -460,7 +460,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
460 unsigned char *buf; 460 unsigned char *buf;
461 unsigned long flags; 461 unsigned long flags;
462 462
463 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __FUNCTION__); 463 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
464 BUG_ON(lp->pending_tx_skb == NULL); 464 BUG_ON(lp->pending_tx_skb == NULL);
465 465
466 skb = lp->pending_tx_skb; 466 skb = lp->pending_tx_skb;
@@ -524,7 +524,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
524 unsigned long flags; 524 unsigned long flags;
525 525
526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
527 dev->name, __FUNCTION__); 527 dev->name, __func__);
528 528
529 BUG_ON(lp->pending_tx_skb != NULL); 529 BUG_ON(lp->pending_tx_skb != NULL);
530 530
@@ -596,7 +596,7 @@ static void smc911x_tx(struct net_device *dev)
596 unsigned int tx_status; 596 unsigned int tx_status;
597 597
598 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 598 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
599 dev->name, __FUNCTION__); 599 dev->name, __func__);
600 600
601 /* Collect the TX status */ 601 /* Collect the TX status */
602 while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { 602 while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
@@ -647,7 +647,7 @@ static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
647 SMC_GET_MII(lp, phyreg, phyaddr, phydata); 647 SMC_GET_MII(lp, phyreg, phyaddr, phydata);
648 648
649 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", 649 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
650 __FUNCTION__, phyaddr, phyreg, phydata); 650 __func__, phyaddr, phyreg, phydata);
651 return phydata; 651 return phydata;
652} 652}
653 653
@@ -661,7 +661,7 @@ static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
661 struct smc911x_local *lp = netdev_priv(dev); 661 struct smc911x_local *lp = netdev_priv(dev);
662 662
663 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 663 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
664 __FUNCTION__, phyaddr, phyreg, phydata); 664 __func__, phyaddr, phyreg, phydata);
665 665
666 SMC_SET_MII(lp, phyreg, phyaddr, phydata); 666 SMC_SET_MII(lp, phyreg, phyaddr, phydata);
667} 667}
@@ -676,7 +676,7 @@ static void smc911x_phy_detect(struct net_device *dev)
676 int phyaddr; 676 int phyaddr;
677 unsigned int cfg, id1, id2; 677 unsigned int cfg, id1, id2;
678 678
679 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 679 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
680 680
681 lp->phy_type = 0; 681 lp->phy_type = 0;
682 682
@@ -746,7 +746,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
746 int phyaddr = lp->mii.phy_id; 746 int phyaddr = lp->mii.phy_id;
747 int bmcr; 747 int bmcr;
748 748
749 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 749 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
750 750
751 /* Enter Link Disable state */ 751 /* Enter Link Disable state */
752 SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); 752 SMC_GET_PHY_BMCR(lp, phyaddr, bmcr);
@@ -793,7 +793,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
793 unsigned long flags; 793 unsigned long flags;
794 unsigned int reg; 794 unsigned int reg;
795 795
796 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); 796 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
797 797
798 spin_lock_irqsave(&lp->lock, flags); 798 spin_lock_irqsave(&lp->lock, flags);
799 reg = SMC_GET_PMT_CTRL(lp); 799 reg = SMC_GET_PMT_CTRL(lp);
@@ -852,7 +852,7 @@ static void smc911x_phy_check_media(struct net_device *dev, int init)
852 int phyaddr = lp->mii.phy_id; 852 int phyaddr = lp->mii.phy_id;
853 unsigned int bmcr, cr; 853 unsigned int bmcr, cr;
854 854
855 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 855 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
856 856
857 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { 857 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
858 /* duplex state has changed */ 858 /* duplex state has changed */
@@ -892,7 +892,7 @@ static void smc911x_phy_configure(struct work_struct *work)
892 int status; 892 int status;
893 unsigned long flags; 893 unsigned long flags;
894 894
895 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); 895 DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__);
896 896
897 /* 897 /*
898 * We should not be called if phy_type is zero. 898 * We should not be called if phy_type is zero.
@@ -985,7 +985,7 @@ static void smc911x_phy_interrupt(struct net_device *dev)
985 int phyaddr = lp->mii.phy_id; 985 int phyaddr = lp->mii.phy_id;
986 int status; 986 int status;
987 987
988 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 988 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
989 989
990 if (lp->phy_type == 0) 990 if (lp->phy_type == 0)
991 return; 991 return;
@@ -1013,7 +1013,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1013 unsigned int rx_overrun=0, cr, pkts; 1013 unsigned int rx_overrun=0, cr, pkts;
1014 unsigned long flags; 1014 unsigned long flags;
1015 1015
1016 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1016 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1017 1017
1018 spin_lock_irqsave(&lp->lock, flags); 1018 spin_lock_irqsave(&lp->lock, flags);
1019 1019
@@ -1174,8 +1174,6 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1174 1174
1175 spin_unlock_irqrestore(&lp->lock, flags); 1175 spin_unlock_irqrestore(&lp->lock, flags);
1176 1176
1177 DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout);
1178
1179 return IRQ_HANDLED; 1177 return IRQ_HANDLED;
1180} 1178}
1181 1179
@@ -1188,7 +1186,7 @@ smc911x_tx_dma_irq(int dma, void *data)
1188 struct sk_buff *skb = lp->current_tx_skb; 1186 struct sk_buff *skb = lp->current_tx_skb;
1189 unsigned long flags; 1187 unsigned long flags;
1190 1188
1191 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1189 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1192 1190
1193 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name); 1191 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
1194 /* Clear the DMA interrupt sources */ 1192 /* Clear the DMA interrupt sources */
@@ -1224,7 +1222,7 @@ smc911x_rx_dma_irq(int dma, void *data)
1224 unsigned long flags; 1222 unsigned long flags;
1225 unsigned int pkts; 1223 unsigned int pkts;
1226 1224
1227 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1225 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1228 DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name); 1226 DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
1229 /* Clear the DMA interrupt sources */ 1227 /* Clear the DMA interrupt sources */
1230 SMC_DMA_ACK_IRQ(dev, dma); 1228 SMC_DMA_ACK_IRQ(dev, dma);
@@ -1272,7 +1270,7 @@ static void smc911x_timeout(struct net_device *dev)
1272 int status, mask; 1270 int status, mask;
1273 unsigned long flags; 1271 unsigned long flags;
1274 1272
1275 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1273 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1276 1274
1277 spin_lock_irqsave(&lp->lock, flags); 1275 spin_lock_irqsave(&lp->lock, flags);
1278 status = SMC_GET_INT(lp); 1276 status = SMC_GET_INT(lp);
@@ -1310,7 +1308,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1310 unsigned int mcr, update_multicast = 0; 1308 unsigned int mcr, update_multicast = 0;
1311 unsigned long flags; 1309 unsigned long flags;
1312 1310
1313 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1311 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1314 1312
1315 spin_lock_irqsave(&lp->lock, flags); 1313 spin_lock_irqsave(&lp->lock, flags);
1316 SMC_GET_MAC_CR(lp, mcr); 1314 SMC_GET_MAC_CR(lp, mcr);
@@ -1412,7 +1410,7 @@ smc911x_open(struct net_device *dev)
1412{ 1410{
1413 struct smc911x_local *lp = netdev_priv(dev); 1411 struct smc911x_local *lp = netdev_priv(dev);
1414 1412
1415 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1413 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1416 1414
1417 /* 1415 /*
1418 * Check that the address is valid. If its not, refuse 1416 * Check that the address is valid. If its not, refuse
@@ -1420,7 +1418,7 @@ smc911x_open(struct net_device *dev)
1420 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1418 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1421 */ 1419 */
1422 if (!is_valid_ether_addr(dev->dev_addr)) { 1420 if (!is_valid_ether_addr(dev->dev_addr)) {
1423 PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__); 1421 PRINTK("%s: no valid ethernet hw addr\n", __func__);
1424 return -EINVAL; 1422 return -EINVAL;
1425 } 1423 }
1426 1424
@@ -1449,7 +1447,7 @@ static int smc911x_close(struct net_device *dev)
1449{ 1447{
1450 struct smc911x_local *lp = netdev_priv(dev); 1448 struct smc911x_local *lp = netdev_priv(dev);
1451 1449
1452 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1450 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1453 1451
1454 netif_stop_queue(dev); 1452 netif_stop_queue(dev);
1455 netif_carrier_off(dev); 1453 netif_carrier_off(dev);
@@ -1483,7 +1481,7 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1483 int ret, status; 1481 int ret, status;
1484 unsigned long flags; 1482 unsigned long flags;
1485 1483
1486 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1484 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1487 cmd->maxtxpkt = 1; 1485 cmd->maxtxpkt = 1;
1488 cmd->maxrxpkt = 1; 1486 cmd->maxrxpkt = 1;
1489 1487
@@ -1621,7 +1619,7 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
1621 for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { 1619 for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
1622 if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { 1620 if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
1623 PRINTK("%s: %s timeout waiting for EEPROM to respond\n", 1621 PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
1624 dev->name, __FUNCTION__); 1622 dev->name, __func__);
1625 return -EFAULT; 1623 return -EFAULT;
1626 } 1624 }
1627 mdelay(1); 1625 mdelay(1);
@@ -1629,7 +1627,7 @@ static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
1629 } 1627 }
1630 if (timeout == 0) { 1628 if (timeout == 0) {
1631 PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n", 1629 PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
1632 dev->name, __FUNCTION__); 1630 dev->name, __func__);
1633 return -ETIMEDOUT; 1631 return -ETIMEDOUT;
1634 } 1632 }
1635 return 0; 1633 return 0;
@@ -1742,7 +1740,7 @@ static int __init smc911x_findirq(struct net_device *dev)
1742 int timeout = 20; 1740 int timeout = 20;
1743 unsigned long cookie; 1741 unsigned long cookie;
1744 1742
1745 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 1743 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
1746 1744
1747 cookie = probe_irq_on(); 1745 cookie = probe_irq_on();
1748 1746
@@ -1808,7 +1806,7 @@ static int __init smc911x_probe(struct net_device *dev)
1808 const char *version_string; 1806 const char *version_string;
1809 unsigned long irq_flags; 1807 unsigned long irq_flags;
1810 1808
1811 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1809 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1812 1810
1813 /* First, see if the endian word is recognized */ 1811 /* First, see if the endian word is recognized */
1814 val = SMC_GET_BYTE_TEST(lp); 1812 val = SMC_GET_BYTE_TEST(lp);
@@ -2058,7 +2056,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
2058 unsigned int *addr; 2056 unsigned int *addr;
2059 int ret; 2057 int ret;
2060 2058
2061 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2059 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2062 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2060 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2063 if (!res) { 2061 if (!res) {
2064 ret = -ENODEV; 2062 ret = -ENODEV;
@@ -2129,7 +2127,7 @@ static int smc911x_drv_remove(struct platform_device *pdev)
2129 struct smc911x_local *lp = netdev_priv(ndev); 2127 struct smc911x_local *lp = netdev_priv(ndev);
2130 struct resource *res; 2128 struct resource *res;
2131 2129
2132 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2130 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2133 platform_set_drvdata(pdev, NULL); 2131 platform_set_drvdata(pdev, NULL);
2134 2132
2135 unregister_netdev(ndev); 2133 unregister_netdev(ndev);
@@ -2159,7 +2157,7 @@ static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
2159 struct net_device *ndev = platform_get_drvdata(dev); 2157 struct net_device *ndev = platform_get_drvdata(dev);
2160 struct smc911x_local *lp = netdev_priv(ndev); 2158 struct smc911x_local *lp = netdev_priv(ndev);
2161 2159
2162 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2160 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2163 if (ndev) { 2161 if (ndev) {
2164 if (netif_running(ndev)) { 2162 if (netif_running(ndev)) {
2165 netif_device_detach(ndev); 2163 netif_device_detach(ndev);
@@ -2177,7 +2175,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
2177{ 2175{
2178 struct net_device *ndev = platform_get_drvdata(dev); 2176 struct net_device *ndev = platform_get_drvdata(dev);
2179 2177
2180 DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2178 DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
2181 if (ndev) { 2179 if (ndev) {
2182 struct smc911x_local *lp = netdev_priv(ndev); 2180 struct smc911x_local *lp = netdev_priv(ndev);
2183 2181
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 24768c10cadb..ef5ce8845c9d 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -270,7 +270,7 @@ static void smc_reset(struct net_device *dev)
270 unsigned int ctl, cfg; 270 unsigned int ctl, cfg;
271 struct sk_buff *pending_skb; 271 struct sk_buff *pending_skb;
272 272
273 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 273 DBG(2, "%s: %s\n", dev->name, __func__);
274 274
275 /* Disable all interrupts, block TX tasklet */ 275 /* Disable all interrupts, block TX tasklet */
276 spin_lock_irq(&lp->lock); 276 spin_lock_irq(&lp->lock);
@@ -363,7 +363,7 @@ static void smc_enable(struct net_device *dev)
363 void __iomem *ioaddr = lp->base; 363 void __iomem *ioaddr = lp->base;
364 int mask; 364 int mask;
365 365
366 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 366 DBG(2, "%s: %s\n", dev->name, __func__);
367 367
368 /* see the header file for options in TCR/RCR DEFAULT */ 368 /* see the header file for options in TCR/RCR DEFAULT */
369 SMC_SELECT_BANK(lp, 0); 369 SMC_SELECT_BANK(lp, 0);
@@ -397,7 +397,7 @@ static void smc_shutdown(struct net_device *dev)
397 void __iomem *ioaddr = lp->base; 397 void __iomem *ioaddr = lp->base;
398 struct sk_buff *pending_skb; 398 struct sk_buff *pending_skb;
399 399
400 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 400 DBG(2, "%s: %s\n", CARDNAME, __func__);
401 401
402 /* no more interrupts for me */ 402 /* no more interrupts for me */
403 spin_lock_irq(&lp->lock); 403 spin_lock_irq(&lp->lock);
@@ -430,7 +430,7 @@ static inline void smc_rcv(struct net_device *dev)
430 void __iomem *ioaddr = lp->base; 430 void __iomem *ioaddr = lp->base;
431 unsigned int packet_number, status, packet_len; 431 unsigned int packet_number, status, packet_len;
432 432
433 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 433 DBG(3, "%s: %s\n", dev->name, __func__);
434 434
435 packet_number = SMC_GET_RXFIFO(lp); 435 packet_number = SMC_GET_RXFIFO(lp);
436 if (unlikely(packet_number & RXFIFO_REMPTY)) { 436 if (unlikely(packet_number & RXFIFO_REMPTY)) {
@@ -577,7 +577,7 @@ static void smc_hardware_send_pkt(unsigned long data)
577 unsigned int packet_no, len; 577 unsigned int packet_no, len;
578 unsigned char *buf; 578 unsigned char *buf;
579 579
580 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 580 DBG(3, "%s: %s\n", dev->name, __func__);
581 581
582 if (!smc_special_trylock(&lp->lock)) { 582 if (!smc_special_trylock(&lp->lock)) {
583 netif_stop_queue(dev); 583 netif_stop_queue(dev);
@@ -662,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
662 void __iomem *ioaddr = lp->base; 662 void __iomem *ioaddr = lp->base;
663 unsigned int numPages, poll_count, status; 663 unsigned int numPages, poll_count, status;
664 664
665 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 665 DBG(3, "%s: %s\n", dev->name, __func__);
666 666
667 BUG_ON(lp->pending_tx_skb != NULL); 667 BUG_ON(lp->pending_tx_skb != NULL);
668 668
@@ -734,7 +734,7 @@ static void smc_tx(struct net_device *dev)
734 void __iomem *ioaddr = lp->base; 734 void __iomem *ioaddr = lp->base;
735 unsigned int saved_packet, packet_no, tx_status, pkt_len; 735 unsigned int saved_packet, packet_no, tx_status, pkt_len;
736 736
737 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 737 DBG(3, "%s: %s\n", dev->name, __func__);
738 738
739 /* If the TX FIFO is empty then nothing to do */ 739 /* If the TX FIFO is empty then nothing to do */
740 packet_no = SMC_GET_TXFIFO(lp); 740 packet_no = SMC_GET_TXFIFO(lp);
@@ -856,7 +856,7 @@ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
856 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); 856 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
857 857
858 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 858 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
859 __FUNCTION__, phyaddr, phyreg, phydata); 859 __func__, phyaddr, phyreg, phydata);
860 860
861 SMC_SELECT_BANK(lp, 2); 861 SMC_SELECT_BANK(lp, 2);
862 return phydata; 862 return phydata;
@@ -883,7 +883,7 @@ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
883 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); 883 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
884 884
885 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 885 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
886 __FUNCTION__, phyaddr, phyreg, phydata); 886 __func__, phyaddr, phyreg, phydata);
887 887
888 SMC_SELECT_BANK(lp, 2); 888 SMC_SELECT_BANK(lp, 2);
889} 889}
@@ -896,7 +896,7 @@ static void smc_phy_detect(struct net_device *dev)
896 struct smc_local *lp = netdev_priv(dev); 896 struct smc_local *lp = netdev_priv(dev);
897 int phyaddr; 897 int phyaddr;
898 898
899 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 899 DBG(2, "%s: %s\n", dev->name, __func__);
900 900
901 lp->phy_type = 0; 901 lp->phy_type = 0;
902 902
@@ -935,7 +935,7 @@ static int smc_phy_fixed(struct net_device *dev)
935 int phyaddr = lp->mii.phy_id; 935 int phyaddr = lp->mii.phy_id;
936 int bmcr, cfg1; 936 int bmcr, cfg1;
937 937
938 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 938 DBG(3, "%s: %s\n", dev->name, __func__);
939 939
940 /* Enter Link Disable state */ 940 /* Enter Link Disable state */
941 cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); 941 cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG);
@@ -1168,7 +1168,7 @@ static void smc_phy_interrupt(struct net_device *dev)
1168 int phyaddr = lp->mii.phy_id; 1168 int phyaddr = lp->mii.phy_id;
1169 int phy18; 1169 int phy18;
1170 1170
1171 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1171 DBG(2, "%s: %s\n", dev->name, __func__);
1172 1172
1173 if (lp->phy_type == 0) 1173 if (lp->phy_type == 0)
1174 return; 1174 return;
@@ -1236,7 +1236,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1236 int status, mask, timeout, card_stats; 1236 int status, mask, timeout, card_stats;
1237 int saved_pointer; 1237 int saved_pointer;
1238 1238
1239 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 1239 DBG(3, "%s: %s\n", dev->name, __func__);
1240 1240
1241 spin_lock(&lp->lock); 1241 spin_lock(&lp->lock);
1242 1242
@@ -1358,7 +1358,7 @@ static void smc_timeout(struct net_device *dev)
1358 void __iomem *ioaddr = lp->base; 1358 void __iomem *ioaddr = lp->base;
1359 int status, mask, eph_st, meminfo, fifo; 1359 int status, mask, eph_st, meminfo, fifo;
1360 1360
1361 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1361 DBG(2, "%s: %s\n", dev->name, __func__);
1362 1362
1363 spin_lock_irq(&lp->lock); 1363 spin_lock_irq(&lp->lock);
1364 status = SMC_GET_INT(lp); 1364 status = SMC_GET_INT(lp);
@@ -1402,7 +1402,7 @@ static void smc_set_multicast_list(struct net_device *dev)
1402 unsigned char multicast_table[8]; 1402 unsigned char multicast_table[8];
1403 int update_multicast = 0; 1403 int update_multicast = 0;
1404 1404
1405 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1405 DBG(2, "%s: %s\n", dev->name, __func__);
1406 1406
1407 if (dev->flags & IFF_PROMISC) { 1407 if (dev->flags & IFF_PROMISC) {
1408 DBG(2, "%s: RCR_PRMS\n", dev->name); 1408 DBG(2, "%s: RCR_PRMS\n", dev->name);
@@ -1505,7 +1505,7 @@ smc_open(struct net_device *dev)
1505{ 1505{
1506 struct smc_local *lp = netdev_priv(dev); 1506 struct smc_local *lp = netdev_priv(dev);
1507 1507
1508 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1508 DBG(2, "%s: %s\n", dev->name, __func__);
1509 1509
1510 /* 1510 /*
1511 * Check that the address is valid. If its not, refuse 1511 * Check that the address is valid. If its not, refuse
@@ -1513,7 +1513,7 @@ smc_open(struct net_device *dev)
1513 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1513 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1514 */ 1514 */
1515 if (!is_valid_ether_addr(dev->dev_addr)) { 1515 if (!is_valid_ether_addr(dev->dev_addr)) {
1516 PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__); 1516 PRINTK("%s: no valid ethernet hw addr\n", __func__);
1517 return -EINVAL; 1517 return -EINVAL;
1518 } 1518 }
1519 1519
@@ -1557,7 +1557,7 @@ static int smc_close(struct net_device *dev)
1557{ 1557{
1558 struct smc_local *lp = netdev_priv(dev); 1558 struct smc_local *lp = netdev_priv(dev);
1559 1559
1560 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1560 DBG(2, "%s: %s\n", dev->name, __func__);
1561 1561
1562 netif_stop_queue(dev); 1562 netif_stop_queue(dev);
1563 netif_carrier_off(dev); 1563 netif_carrier_off(dev);
@@ -1700,7 +1700,7 @@ static int __init smc_findirq(struct smc_local *lp)
1700 int timeout = 20; 1700 int timeout = 20;
1701 unsigned long cookie; 1701 unsigned long cookie;
1702 1702
1703 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 1703 DBG(2, "%s: %s\n", CARDNAME, __func__);
1704 1704
1705 cookie = probe_irq_on(); 1705 cookie = probe_irq_on();
1706 1706
@@ -1778,7 +1778,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1778 const char *version_string; 1778 const char *version_string;
1779 DECLARE_MAC_BUF(mac); 1779 DECLARE_MAC_BUF(mac);
1780 1780
1781 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 1781 DBG(2, "%s: %s\n", CARDNAME, __func__);
1782 1782
1783 /* First, see if the high byte is 0x33 */ 1783 /* First, see if the high byte is 0x33 */
1784 val = SMC_CURRENT_BANK(lp); 1784 val = SMC_CURRENT_BANK(lp);
@@ -1961,7 +1961,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1961 if (dev->dma != (unsigned char)-1) 1961 if (dev->dma != (unsigned char)-1)
1962 printk(" DMA %d", dev->dma); 1962 printk(" DMA %d", dev->dma);
1963 1963
1964 printk("%s%s\n", nowait ? " [nowait]" : "", 1964 printk("%s%s\n",
1965 lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "",
1965 THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); 1966 THROTTLE_TX_PKTS ? " [throttle_tx]" : "");
1966 1967
1967 if (!is_valid_ether_addr(dev->dev_addr)) { 1968 if (!is_valid_ether_addr(dev->dev_addr)) {
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 997e7f1d5c6e..edea0732f145 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -446,6 +446,8 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
446#define SMC_CAN_USE_32BIT 1 446#define SMC_CAN_USE_32BIT 1
447#define SMC_NOWAIT 1 447#define SMC_NOWAIT 1
448 448
449#define SMC_IO_SHIFT (lp->io_shift)
450
449#define SMC_inb(a, r) readb((a) + (r)) 451#define SMC_inb(a, r) readb((a) + (r))
450#define SMC_inw(a, r) readw((a) + (r)) 452#define SMC_inw(a, r) readw((a) + (r))
451#define SMC_inl(a, r) readl((a) + (r)) 453#define SMC_inl(a, r) readl((a) + (r))
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 7d5561b8241c..f860ea150395 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -409,6 +409,7 @@ static int change_mtu(struct net_device *dev, int new_mtu);
409static int eeprom_read(void __iomem *ioaddr, int location); 409static int eeprom_read(void __iomem *ioaddr, int location);
410static int mdio_read(struct net_device *dev, int phy_id, int location); 410static int mdio_read(struct net_device *dev, int phy_id, int location);
411static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 411static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
412static int mdio_wait_link(struct net_device *dev, int wait);
412static int netdev_open(struct net_device *dev); 413static int netdev_open(struct net_device *dev);
413static void check_duplex(struct net_device *dev); 414static void check_duplex(struct net_device *dev);
414static void netdev_timer(unsigned long data); 415static void netdev_timer(unsigned long data);
@@ -785,6 +786,24 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
785 return; 786 return;
786} 787}
787 788
789static int mdio_wait_link(struct net_device *dev, int wait)
790{
791 int bmsr;
792 int phy_id;
793 struct netdev_private *np;
794
795 np = netdev_priv(dev);
796 phy_id = np->phys[0];
797
798 do {
799 bmsr = mdio_read(dev, phy_id, MII_BMSR);
800 if (bmsr & 0x0004)
801 return 0;
802 mdelay(1);
803 } while (--wait > 0);
804 return -1;
805}
806
788static int netdev_open(struct net_device *dev) 807static int netdev_open(struct net_device *dev)
789{ 808{
790 struct netdev_private *np = netdev_priv(dev); 809 struct netdev_private *np = netdev_priv(dev);
@@ -1393,41 +1412,51 @@ static void netdev_error(struct net_device *dev, int intr_status)
1393 int speed; 1412 int speed;
1394 1413
1395 if (intr_status & LinkChange) { 1414 if (intr_status & LinkChange) {
1396 if (np->an_enable) { 1415 if (mdio_wait_link(dev, 10) == 0) {
1397 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE); 1416 printk(KERN_INFO "%s: Link up\n", dev->name);
1398 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA); 1417 if (np->an_enable) {
1399 mii_advertise &= mii_lpa; 1418 mii_advertise = mdio_read(dev, np->phys[0],
1400 printk (KERN_INFO "%s: Link changed: ", dev->name); 1419 MII_ADVERTISE);
1401 if (mii_advertise & ADVERTISE_100FULL) { 1420 mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1402 np->speed = 100; 1421 mii_advertise &= mii_lpa;
1403 printk ("100Mbps, full duplex\n"); 1422 printk(KERN_INFO "%s: Link changed: ",
1404 } else if (mii_advertise & ADVERTISE_100HALF) { 1423 dev->name);
1405 np->speed = 100; 1424 if (mii_advertise & ADVERTISE_100FULL) {
1406 printk ("100Mbps, half duplex\n"); 1425 np->speed = 100;
1407 } else if (mii_advertise & ADVERTISE_10FULL) { 1426 printk("100Mbps, full duplex\n");
1408 np->speed = 10; 1427 } else if (mii_advertise & ADVERTISE_100HALF) {
1409 printk ("10Mbps, full duplex\n"); 1428 np->speed = 100;
1410 } else if (mii_advertise & ADVERTISE_10HALF) { 1429 printk("100Mbps, half duplex\n");
1411 np->speed = 10; 1430 } else if (mii_advertise & ADVERTISE_10FULL) {
1412 printk ("10Mbps, half duplex\n"); 1431 np->speed = 10;
1413 } else 1432 printk("10Mbps, full duplex\n");
1414 printk ("\n"); 1433 } else if (mii_advertise & ADVERTISE_10HALF) {
1434 np->speed = 10;
1435 printk("10Mbps, half duplex\n");
1436 } else
1437 printk("\n");
1415 1438
1439 } else {
1440 mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1441 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1442 np->speed = speed;
1443 printk(KERN_INFO "%s: Link changed: %dMbps ,",
1444 dev->name, speed);
1445 printk("%s duplex.\n",
1446 (mii_ctl & BMCR_FULLDPLX) ?
1447 "full" : "half");
1448 }
1449 check_duplex(dev);
1450 if (np->flowctrl && np->mii_if.full_duplex) {
1451 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1452 ioaddr + MulticastFilter1+2);
1453 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1454 ioaddr + MACCtrl0);
1455 }
1456 netif_carrier_on(dev);
1416 } else { 1457 } else {
1417 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR); 1458 printk(KERN_INFO "%s: Link down\n", dev->name);
1418 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; 1459 netif_carrier_off(dev);
1419 np->speed = speed;
1420 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1421 dev->name, speed);
1422 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1423 "full" : "half");
1424 }
1425 check_duplex (dev);
1426 if (np->flowctrl && np->mii_if.full_duplex) {
1427 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1428 ioaddr + MulticastFilter1+2);
1429 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1430 ioaddr + MACCtrl0);
1431 } 1460 }
1432 } 1461 }
1433 if (intr_status & StatsMax) { 1462 if (intr_status & StatsMax) {
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 7db48f1cd949..efaf84d9757d 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -539,22 +539,22 @@ struct txd_desc {
539 539
540#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args) 540#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args)
541#define DBG2(fmt, args...) \ 541#define DBG2(fmt, args...) \
542 printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) 542 printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
543 543
544#define BDX_ASSERT(x) BUG_ON(x) 544#define BDX_ASSERT(x) BUG_ON(x)
545 545
546#ifdef DEBUG 546#ifdef DEBUG
547 547
548#define ENTER do { \ 548#define ENTER do { \
549 printk(KERN_ERR "%s:%-5d: ENTER\n", __FUNCTION__, __LINE__); \ 549 printk(KERN_ERR "%s:%-5d: ENTER\n", __func__, __LINE__); \
550} while (0) 550} while (0)
551 551
552#define RET(args...) do { \ 552#define RET(args...) do { \
553 printk(KERN_ERR "%s:%-5d: RETURN\n", __FUNCTION__, __LINE__); \ 553 printk(KERN_ERR "%s:%-5d: RETURN\n", __func__, __LINE__); \
554return args; } while (0) 554return args; } while (0)
555 555
556#define DBG(fmt, args...) \ 556#define DBG(fmt, args...) \
557 printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) 557 printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
558#else 558#else
559#define ENTER do { } while (0) 559#define ENTER do { } while (0)
560#define RET(args...) return args 560#define RET(args...) return args
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 71d2c5cfdad9..123920759efd 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3861,10 +3861,7 @@ static void tg3_tx(struct tg3 *tp)
3861 return; 3861 return;
3862 } 3862 }
3863 3863
3864 pci_unmap_single(tp->pdev, 3864 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3865 pci_unmap_addr(ri, mapping),
3866 skb_headlen(skb),
3867 PCI_DMA_TODEVICE);
3868 3865
3869 ri->skb = NULL; 3866 ri->skb = NULL;
3870 3867
@@ -3874,12 +3871,6 @@ static void tg3_tx(struct tg3 *tp)
3874 ri = &tp->tx_buffers[sw_idx]; 3871 ri = &tp->tx_buffers[sw_idx];
3875 if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) 3872 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3876 tx_bug = 1; 3873 tx_bug = 1;
3877
3878 pci_unmap_page(tp->pdev,
3879 pci_unmap_addr(ri, mapping),
3880 skb_shinfo(skb)->frags[i].size,
3881 PCI_DMA_TODEVICE);
3882
3883 sw_idx = NEXT_TX(sw_idx); 3874 sw_idx = NEXT_TX(sw_idx);
3884 } 3875 }
3885 3876
@@ -4633,12 +4624,16 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4633 } else { 4624 } else {
4634 /* New SKB is guaranteed to be linear. */ 4625 /* New SKB is guaranteed to be linear. */
4635 entry = *start; 4626 entry = *start;
4636 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, 4627 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4637 PCI_DMA_TODEVICE); 4628 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4629
4638 /* Make sure new skb does not cross any 4G boundaries. 4630 /* Make sure new skb does not cross any 4G boundaries.
4639 * Drop the packet if it does. 4631 * Drop the packet if it does.
4640 */ 4632 */
4641 if (tg3_4g_overflow_test(new_addr, new_skb->len)) { 4633 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4634 if (!ret)
4635 skb_dma_unmap(&tp->pdev->dev, new_skb,
4636 DMA_TO_DEVICE);
4642 ret = -1; 4637 ret = -1;
4643 dev_kfree_skb(new_skb); 4638 dev_kfree_skb(new_skb);
4644 new_skb = NULL; 4639 new_skb = NULL;
@@ -4652,18 +4647,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4652 /* Now clean up the sw ring entries. */ 4647 /* Now clean up the sw ring entries. */
4653 i = 0; 4648 i = 0;
4654 while (entry != last_plus_one) { 4649 while (entry != last_plus_one) {
4655 int len;
4656
4657 if (i == 0)
4658 len = skb_headlen(skb);
4659 else
4660 len = skb_shinfo(skb)->frags[i-1].size;
4661 pci_unmap_single(tp->pdev,
4662 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4663 len, PCI_DMA_TODEVICE);
4664 if (i == 0) { 4650 if (i == 0) {
4665 tp->tx_buffers[entry].skb = new_skb; 4651 tp->tx_buffers[entry].skb = new_skb;
4666 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4667 } else { 4652 } else {
4668 tp->tx_buffers[entry].skb = NULL; 4653 tp->tx_buffers[entry].skb = NULL;
4669 } 4654 }
@@ -4671,6 +4656,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4671 i++; 4656 i++;
4672 } 4657 }
4673 4658
4659 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4674 dev_kfree_skb(skb); 4660 dev_kfree_skb(skb);
4675 4661
4676 return ret; 4662 return ret;
@@ -4705,8 +4691,9 @@ static void tg3_set_txd(struct tg3 *tp, int entry,
4705static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) 4691static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4706{ 4692{
4707 struct tg3 *tp = netdev_priv(dev); 4693 struct tg3 *tp = netdev_priv(dev);
4708 dma_addr_t mapping;
4709 u32 len, entry, base_flags, mss; 4694 u32 len, entry, base_flags, mss;
4695 struct skb_shared_info *sp;
4696 dma_addr_t mapping;
4710 4697
4711 len = skb_headlen(skb); 4698 len = skb_headlen(skb);
4712 4699
@@ -4765,11 +4752,16 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4765 (vlan_tx_tag_get(skb) << 16)); 4752 (vlan_tx_tag_get(skb) << 16));
4766#endif 4753#endif
4767 4754
4768 /* Queue skb data, a.k.a. the main skb fragment. */ 4755 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4769 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 4756 dev_kfree_skb(skb);
4757 goto out_unlock;
4758 }
4759
4760 sp = skb_shinfo(skb);
4761
4762 mapping = sp->dma_maps[0];
4770 4763
4771 tp->tx_buffers[entry].skb = skb; 4764 tp->tx_buffers[entry].skb = skb;
4772 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4773 4765
4774 tg3_set_txd(tp, entry, mapping, len, base_flags, 4766 tg3_set_txd(tp, entry, mapping, len, base_flags,
4775 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); 4767 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
@@ -4785,13 +4777,8 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4785 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4777 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4786 4778
4787 len = frag->size; 4779 len = frag->size;
4788 mapping = pci_map_page(tp->pdev, 4780 mapping = sp->dma_maps[i + 1];
4789 frag->page,
4790 frag->page_offset,
4791 len, PCI_DMA_TODEVICE);
4792
4793 tp->tx_buffers[entry].skb = NULL; 4781 tp->tx_buffers[entry].skb = NULL;
4794 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4795 4782
4796 tg3_set_txd(tp, entry, mapping, len, 4783 tg3_set_txd(tp, entry, mapping, len,
4797 base_flags, (i == last) | (mss << 1)); 4784 base_flags, (i == last) | (mss << 1));
@@ -4859,9 +4846,10 @@ tg3_tso_bug_end:
4859static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) 4846static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4860{ 4847{
4861 struct tg3 *tp = netdev_priv(dev); 4848 struct tg3 *tp = netdev_priv(dev);
4862 dma_addr_t mapping;
4863 u32 len, entry, base_flags, mss; 4849 u32 len, entry, base_flags, mss;
4850 struct skb_shared_info *sp;
4864 int would_hit_hwbug; 4851 int would_hit_hwbug;
4852 dma_addr_t mapping;
4865 4853
4866 len = skb_headlen(skb); 4854 len = skb_headlen(skb);
4867 4855
@@ -4942,11 +4930,16 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4942 (vlan_tx_tag_get(skb) << 16)); 4930 (vlan_tx_tag_get(skb) << 16));
4943#endif 4931#endif
4944 4932
4945 /* Queue skb data, a.k.a. the main skb fragment. */ 4933 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4946 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 4934 dev_kfree_skb(skb);
4935 goto out_unlock;
4936 }
4937
4938 sp = skb_shinfo(skb);
4939
4940 mapping = sp->dma_maps[0];
4947 4941
4948 tp->tx_buffers[entry].skb = skb; 4942 tp->tx_buffers[entry].skb = skb;
4949 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4950 4943
4951 would_hit_hwbug = 0; 4944 would_hit_hwbug = 0;
4952 4945
@@ -4969,13 +4962,9 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4969 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4962 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4970 4963
4971 len = frag->size; 4964 len = frag->size;
4972 mapping = pci_map_page(tp->pdev, 4965 mapping = sp->dma_maps[i + 1];
4973 frag->page,
4974 frag->page_offset,
4975 len, PCI_DMA_TODEVICE);
4976 4966
4977 tp->tx_buffers[entry].skb = NULL; 4967 tp->tx_buffers[entry].skb = NULL;
4978 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4979 4968
4980 if (tg3_4g_overflow_test(mapping, len)) 4969 if (tg3_4g_overflow_test(mapping, len))
4981 would_hit_hwbug = 1; 4970 would_hit_hwbug = 1;
@@ -5128,7 +5117,6 @@ static void tg3_free_rings(struct tg3 *tp)
5128 for (i = 0; i < TG3_TX_RING_SIZE; ) { 5117 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5129 struct tx_ring_info *txp; 5118 struct tx_ring_info *txp;
5130 struct sk_buff *skb; 5119 struct sk_buff *skb;
5131 int j;
5132 5120
5133 txp = &tp->tx_buffers[i]; 5121 txp = &tp->tx_buffers[i];
5134 skb = txp->skb; 5122 skb = txp->skb;
@@ -5138,22 +5126,11 @@ static void tg3_free_rings(struct tg3 *tp)
5138 continue; 5126 continue;
5139 } 5127 }
5140 5128
5141 pci_unmap_single(tp->pdev, 5129 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5142 pci_unmap_addr(txp, mapping),
5143 skb_headlen(skb),
5144 PCI_DMA_TODEVICE);
5145 txp->skb = NULL;
5146 5130
5147 i++; 5131 txp->skb = NULL;
5148 5132
5149 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { 5133 i += skb_shinfo(skb)->nr_frags + 1;
5150 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
5151 pci_unmap_page(tp->pdev,
5152 pci_unmap_addr(txp, mapping),
5153 skb_shinfo(skb)->frags[j].size,
5154 PCI_DMA_TODEVICE);
5155 i++;
5156 }
5157 5134
5158 dev_kfree_skb_any(skb); 5135 dev_kfree_skb_any(skb);
5159 } 5136 }
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index f5b8cab8d4b5..6c7b5e303dbb 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2197,7 +2197,6 @@ struct ring_info {
2197 2197
2198struct tx_ring_info { 2198struct tx_ring_info {
2199 struct sk_buff *skb; 2199 struct sk_buff *skb;
2200 DECLARE_PCI_UNMAP_ADDR(mapping)
2201 u32 prev_vlan_tag; 2200 u32 prev_vlan_tag;
2202}; 2201};
2203 2202
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 43fde99b24ac..eb1da6f0b086 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
263 return; 263 return;
264 udelay(10); 264 udelay(10);
265 } 265 }
266 printk(KERN_ERR "%s function time out \n", __FUNCTION__); 266 printk(KERN_ERR "%s function time out \n", __func__);
267} 267}
268 268
269static int mii_speed(struct mii_if_info *mii) 269static int mii_speed(struct mii_if_info *mii)
@@ -1059,7 +1059,7 @@ static void tsi108_stop_ethernet(struct net_device *dev)
1059 return; 1059 return;
1060 udelay(10); 1060 udelay(10);
1061 } 1061 }
1062 printk(KERN_ERR "%s function time out \n", __FUNCTION__); 1062 printk(KERN_ERR "%s function time out \n", __func__);
1063} 1063}
1064 1064
1065static void tsi108_reset_ether(struct tsi108_prv_data * data) 1065static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1244,7 +1244,7 @@ static void tsi108_init_phy(struct net_device *dev)
1244 udelay(10); 1244 udelay(10);
1245 } 1245 }
1246 if (i == 0) 1246 if (i == 0)
1247 printk(KERN_ERR "%s function time out \n", __FUNCTION__); 1247 printk(KERN_ERR "%s function time out \n", __func__);
1248 1248
1249 if (data->phy_type == TSI108_PHY_BCM54XX) { 1249 if (data->phy_type == TSI108_PHY_BCM54XX) {
1250 tsi108_write_mii(data, 0x09, 0x0300); 1250 tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 9281d06d5aaa..f54c45049d50 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1418,7 +1418,6 @@ static int de_close (struct net_device *dev)
1418 1418
1419 de_free_rings(de); 1419 de_free_rings(de);
1420 de_adapter_sleep(de); 1420 de_adapter_sleep(de);
1421 pci_disable_device(de->pdev);
1422 return 0; 1421 return 0;
1423} 1422}
1424 1423
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 617ef41bdfea..6444cbec0bdc 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -832,7 +832,7 @@ struct de4x5_private {
832 s32 csr14; /* Saved SIA TX/RX Register */ 832 s32 csr14; /* Saved SIA TX/RX Register */
833 s32 csr15; /* Saved SIA General Register */ 833 s32 csr15; /* Saved SIA General Register */
834 int save_cnt; /* Flag if state already saved */ 834 int save_cnt; /* Flag if state already saved */
835 struct sk_buff *skb; /* Save the (re-ordered) skb's */ 835 struct sk_buff_head queue; /* Save the (re-ordered) skb's */
836 } cache; 836 } cache;
837 struct de4x5_srom srom; /* A copy of the SROM */ 837 struct de4x5_srom srom; /* A copy of the SROM */
838 int cfrv; /* Card CFRV copy */ 838 int cfrv; /* Card CFRV copy */
@@ -1128,6 +1128,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1128 printk(" which has an Ethernet PROM CRC error.\n"); 1128 printk(" which has an Ethernet PROM CRC error.\n");
1129 return -ENXIO; 1129 return -ENXIO;
1130 } else { 1130 } else {
1131 skb_queue_head_init(&lp->cache.queue);
1131 lp->cache.gepc = GEP_INIT; 1132 lp->cache.gepc = GEP_INIT;
1132 lp->asBit = GEP_SLNK; 1133 lp->asBit = GEP_SLNK;
1133 lp->asPolarity = GEP_SLNK; 1134 lp->asPolarity = GEP_SLNK;
@@ -1487,7 +1488,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1487 } 1488 }
1488 } else if (skb->len > 0) { 1489 } else if (skb->len > 0) {
1489 /* If we already have stuff queued locally, use that first */ 1490 /* If we already have stuff queued locally, use that first */
1490 if (lp->cache.skb && !lp->interrupt) { 1491 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1491 de4x5_put_cache(dev, skb); 1492 de4x5_put_cache(dev, skb);
1492 skb = de4x5_get_cache(dev); 1493 skb = de4x5_get_cache(dev);
1493 } 1494 }
@@ -1580,7 +1581,7 @@ de4x5_interrupt(int irq, void *dev_id)
1580 1581
1581 /* Load the TX ring with any locally stored packets */ 1582 /* Load the TX ring with any locally stored packets */
1582 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) { 1583 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1583 while (lp->cache.skb && !netif_queue_stopped(dev) && lp->tx_enable) { 1584 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1584 de4x5_queue_pkt(de4x5_get_cache(dev), dev); 1585 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1585 } 1586 }
1586 lp->cache.lock = 0; 1587 lp->cache.lock = 0;
@@ -3679,11 +3680,7 @@ de4x5_free_tx_buffs(struct net_device *dev)
3679 } 3680 }
3680 3681
3681 /* Unload the locally queued packets */ 3682 /* Unload the locally queued packets */
3682 while (lp->cache.skb) { 3683 __skb_queue_purge(&lp->cache.queue);
3683 dev_kfree_skb(de4x5_get_cache(dev));
3684 }
3685
3686 return;
3687} 3684}
3688 3685
3689/* 3686/*
@@ -3781,43 +3778,24 @@ static void
3781de4x5_put_cache(struct net_device *dev, struct sk_buff *skb) 3778de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3782{ 3779{
3783 struct de4x5_private *lp = netdev_priv(dev); 3780 struct de4x5_private *lp = netdev_priv(dev);
3784 struct sk_buff *p;
3785
3786 if (lp->cache.skb) {
3787 for (p=lp->cache.skb; p->next; p=p->next);
3788 p->next = skb;
3789 } else {
3790 lp->cache.skb = skb;
3791 }
3792 skb->next = NULL;
3793 3781
3794 return; 3782 __skb_queue_tail(&lp->cache.queue, skb);
3795} 3783}
3796 3784
3797static void 3785static void
3798de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb) 3786de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3799{ 3787{
3800 struct de4x5_private *lp = netdev_priv(dev); 3788 struct de4x5_private *lp = netdev_priv(dev);
3801 struct sk_buff *p = lp->cache.skb;
3802
3803 lp->cache.skb = skb;
3804 skb->next = p;
3805 3789
3806 return; 3790 __skb_queue_head(&lp->cache.queue, skb);
3807} 3791}
3808 3792
3809static struct sk_buff * 3793static struct sk_buff *
3810de4x5_get_cache(struct net_device *dev) 3794de4x5_get_cache(struct net_device *dev)
3811{ 3795{
3812 struct de4x5_private *lp = netdev_priv(dev); 3796 struct de4x5_private *lp = netdev_priv(dev);
3813 struct sk_buff *p = lp->cache.skb;
3814 3797
3815 if (p) { 3798 return __skb_dequeue(&lp->cache.queue);
3816 lp->cache.skb = p->next;
3817 p->next = NULL;
3818 }
3819
3820 return p;
3821} 3799}
3822 3800
3823/* 3801/*
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 8f944e57fd55..c87747bb24c5 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -400,7 +400,7 @@ static struct enet_addr_container *get_enet_addr_container(void)
400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL); 400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
401 if (!enet_addr_cont) { 401 if (!enet_addr_cont) {
402 ugeth_err("%s: No memory for enet_addr_container object.", 402 ugeth_err("%s: No memory for enet_addr_container object.",
403 __FUNCTION__); 403 __func__);
404 return NULL; 404 return NULL;
405 } 405 }
406 406
@@ -427,7 +427,7 @@ static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
428 428
429 if (!(paddr_num < NUM_OF_PADDRS)) { 429 if (!(paddr_num < NUM_OF_PADDRS)) {
430 ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__); 430 ugeth_warn("%s: Illegal paddr_num.", __func__);
431 return -EINVAL; 431 return -EINVAL;
432 } 432 }
433 433
@@ -447,7 +447,7 @@ static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
448 448
449 if (!(paddr_num < NUM_OF_PADDRS)) { 449 if (!(paddr_num < NUM_OF_PADDRS)) {
450 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); 450 ugeth_warn("%s: Illagel paddr_num.", __func__);
451 return -EINVAL; 451 return -EINVAL;
452 } 452 }
453 453
@@ -1441,7 +1441,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1441 u32 upsmr, maccfg2, tbiBaseAddress; 1441 u32 upsmr, maccfg2, tbiBaseAddress;
1442 u16 value; 1442 u16 value;
1443 1443
1444 ugeth_vdbg("%s: IN", __FUNCTION__); 1444 ugeth_vdbg("%s: IN", __func__);
1445 1445
1446 ug_info = ugeth->ug_info; 1446 ug_info = ugeth->ug_info;
1447 ug_regs = ugeth->ug_regs; 1447 ug_regs = ugeth->ug_regs;
@@ -1504,7 +1504,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1504 if (ret_val != 0) { 1504 if (ret_val != 0) {
1505 if (netif_msg_probe(ugeth)) 1505 if (netif_msg_probe(ugeth))
1506 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", 1506 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
1507 __FUNCTION__); 1507 __func__);
1508 return ret_val; 1508 return ret_val;
1509 } 1509 }
1510 1510
@@ -1744,7 +1744,7 @@ static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1744 /* check if the UCC number is in range. */ 1744 /* check if the UCC number is in range. */
1745 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1745 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1746 if (netif_msg_probe(ugeth)) 1746 if (netif_msg_probe(ugeth))
1747 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1747 ugeth_err("%s: ucc_num out of range.", __func__);
1748 return -EINVAL; 1748 return -EINVAL;
1749 } 1749 }
1750 1750
@@ -1773,7 +1773,7 @@ static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
1773 /* check if the UCC number is in range. */ 1773 /* check if the UCC number is in range. */
1774 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1774 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1775 if (netif_msg_probe(ugeth)) 1775 if (netif_msg_probe(ugeth))
1776 ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1776 ugeth_err("%s: ucc_num out of range.", __func__);
1777 return -EINVAL; 1777 return -EINVAL;
1778 } 1778 }
1779 1779
@@ -2062,7 +2062,7 @@ static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth
2062 ugeth_warn 2062 ugeth_warn
2063 ("%s: multicast address added to paddr will have no " 2063 ("%s: multicast address added to paddr will have no "
2064 "effect - is this what you wanted?", 2064 "effect - is this what you wanted?",
2065 __FUNCTION__); 2065 __func__);
2066 2066
2067 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ 2067 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2068 /* store address in our database */ 2068 /* store address in our database */
@@ -2278,7 +2278,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2278 struct phy_device *phydev = ugeth->phydev; 2278 struct phy_device *phydev = ugeth->phydev;
2279 u32 tempval; 2279 u32 tempval;
2280 2280
2281 ugeth_vdbg("%s: IN", __FUNCTION__); 2281 ugeth_vdbg("%s: IN", __func__);
2282 2282
2283 /* Disable the controller */ 2283 /* Disable the controller */
2284 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 2284 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
@@ -2315,7 +2315,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2315 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2315 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2316 if (netif_msg_probe(ugeth)) 2316 if (netif_msg_probe(ugeth))
2317 ugeth_err("%s: Bad memory partition value.", 2317 ugeth_err("%s: Bad memory partition value.",
2318 __FUNCTION__); 2318 __func__);
2319 return -EINVAL; 2319 return -EINVAL;
2320 } 2320 }
2321 2321
@@ -2327,7 +2327,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2327 if (netif_msg_probe(ugeth)) 2327 if (netif_msg_probe(ugeth))
2328 ugeth_err 2328 ugeth_err
2329 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", 2329 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
2330 __FUNCTION__); 2330 __func__);
2331 return -EINVAL; 2331 return -EINVAL;
2332 } 2332 }
2333 } 2333 }
@@ -2338,7 +2338,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2338 if (netif_msg_probe(ugeth)) 2338 if (netif_msg_probe(ugeth))
2339 ugeth_err 2339 ugeth_err
2340 ("%s: Tx BD ring length must be no smaller than 2.", 2340 ("%s: Tx BD ring length must be no smaller than 2.",
2341 __FUNCTION__); 2341 __func__);
2342 return -EINVAL; 2342 return -EINVAL;
2343 } 2343 }
2344 } 2344 }
@@ -2349,21 +2349,21 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2349 if (netif_msg_probe(ugeth)) 2349 if (netif_msg_probe(ugeth))
2350 ugeth_err 2350 ugeth_err
2351 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2351 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2352 __FUNCTION__); 2352 __func__);
2353 return -EINVAL; 2353 return -EINVAL;
2354 } 2354 }
2355 2355
2356 /* num Tx queues */ 2356 /* num Tx queues */
2357 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2357 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2358 if (netif_msg_probe(ugeth)) 2358 if (netif_msg_probe(ugeth))
2359 ugeth_err("%s: number of tx queues too large.", __FUNCTION__); 2359 ugeth_err("%s: number of tx queues too large.", __func__);
2360 return -EINVAL; 2360 return -EINVAL;
2361 } 2361 }
2362 2362
2363 /* num Rx queues */ 2363 /* num Rx queues */
2364 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2364 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2365 if (netif_msg_probe(ugeth)) 2365 if (netif_msg_probe(ugeth))
2366 ugeth_err("%s: number of rx queues too large.", __FUNCTION__); 2366 ugeth_err("%s: number of rx queues too large.", __func__);
2367 return -EINVAL; 2367 return -EINVAL;
2368 } 2368 }
2369 2369
@@ -2374,7 +2374,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2374 ugeth_err 2374 ugeth_err
2375 ("%s: VLAN priority table entry must not be" 2375 ("%s: VLAN priority table entry must not be"
2376 " larger than number of Rx queues.", 2376 " larger than number of Rx queues.",
2377 __FUNCTION__); 2377 __func__);
2378 return -EINVAL; 2378 return -EINVAL;
2379 } 2379 }
2380 } 2380 }
@@ -2386,7 +2386,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2386 ugeth_err 2386 ugeth_err
2387 ("%s: IP priority table entry must not be" 2387 ("%s: IP priority table entry must not be"
2388 " larger than number of Rx queues.", 2388 " larger than number of Rx queues.",
2389 __FUNCTION__); 2389 __func__);
2390 return -EINVAL; 2390 return -EINVAL;
2391 } 2391 }
2392 } 2392 }
@@ -2394,7 +2394,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2394 if (ug_info->cam && !ug_info->ecamptr) { 2394 if (ug_info->cam && !ug_info->ecamptr) {
2395 if (netif_msg_probe(ugeth)) 2395 if (netif_msg_probe(ugeth))
2396 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2396 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2397 __FUNCTION__); 2397 __func__);
2398 return -EINVAL; 2398 return -EINVAL;
2399 } 2399 }
2400 2400
@@ -2404,7 +2404,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2404 if (netif_msg_probe(ugeth)) 2404 if (netif_msg_probe(ugeth))
2405 ugeth_err("%s: Number of station addresses greater than 1 " 2405 ugeth_err("%s: Number of station addresses greater than 1 "
2406 "not allowed in extended parsing mode.", 2406 "not allowed in extended parsing mode.",
2407 __FUNCTION__); 2407 __func__);
2408 return -EINVAL; 2408 return -EINVAL;
2409 } 2409 }
2410 2410
@@ -2418,7 +2418,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2418 /* Initialize the general fast UCC block. */ 2418 /* Initialize the general fast UCC block. */
2419 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2419 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2420 if (netif_msg_probe(ugeth)) 2420 if (netif_msg_probe(ugeth))
2421 ugeth_err("%s: Failed to init uccf.", __FUNCTION__); 2421 ugeth_err("%s: Failed to init uccf.", __func__);
2422 ucc_geth_memclean(ugeth); 2422 ucc_geth_memclean(ugeth);
2423 return -ENOMEM; 2423 return -ENOMEM;
2424 } 2424 }
@@ -2448,7 +2448,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2448 u8 __iomem *endOfRing; 2448 u8 __iomem *endOfRing;
2449 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2449 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2450 2450
2451 ugeth_vdbg("%s: IN", __FUNCTION__); 2451 ugeth_vdbg("%s: IN", __func__);
2452 uccf = ugeth->uccf; 2452 uccf = ugeth->uccf;
2453 ug_info = ugeth->ug_info; 2453 ug_info = ugeth->ug_info;
2454 uf_info = &ug_info->uf_info; 2454 uf_info = &ug_info->uf_info;
@@ -2474,7 +2474,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2474 default: 2474 default:
2475 if (netif_msg_ifup(ugeth)) 2475 if (netif_msg_ifup(ugeth))
2476 ugeth_err("%s: Bad number of Rx threads value.", 2476 ugeth_err("%s: Bad number of Rx threads value.",
2477 __FUNCTION__); 2477 __func__);
2478 ucc_geth_memclean(ugeth); 2478 ucc_geth_memclean(ugeth);
2479 return -EINVAL; 2479 return -EINVAL;
2480 break; 2480 break;
@@ -2499,7 +2499,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2499 default: 2499 default:
2500 if (netif_msg_ifup(ugeth)) 2500 if (netif_msg_ifup(ugeth))
2501 ugeth_err("%s: Bad number of Tx threads value.", 2501 ugeth_err("%s: Bad number of Tx threads value.",
2502 __FUNCTION__); 2502 __func__);
2503 ucc_geth_memclean(ugeth); 2503 ucc_geth_memclean(ugeth);
2504 return -EINVAL; 2504 return -EINVAL;
2505 break; 2505 break;
@@ -2553,7 +2553,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2553 if (ret_val != 0) { 2553 if (ret_val != 0) {
2554 if (netif_msg_ifup(ugeth)) 2554 if (netif_msg_ifup(ugeth))
2555 ugeth_err("%s: IPGIFG initialization parameter too large.", 2555 ugeth_err("%s: IPGIFG initialization parameter too large.",
2556 __FUNCTION__); 2556 __func__);
2557 ucc_geth_memclean(ugeth); 2557 ucc_geth_memclean(ugeth);
2558 return ret_val; 2558 return ret_val;
2559 } 2559 }
@@ -2571,7 +2571,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2571 if (ret_val != 0) { 2571 if (ret_val != 0) {
2572 if (netif_msg_ifup(ugeth)) 2572 if (netif_msg_ifup(ugeth))
2573 ugeth_err("%s: Half Duplex initialization parameter too large.", 2573 ugeth_err("%s: Half Duplex initialization parameter too large.",
2574 __FUNCTION__); 2574 __func__);
2575 ucc_geth_memclean(ugeth); 2575 ucc_geth_memclean(ugeth);
2576 return ret_val; 2576 return ret_val;
2577 } 2577 }
@@ -2626,7 +2626,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2626 if (netif_msg_ifup(ugeth)) 2626 if (netif_msg_ifup(ugeth))
2627 ugeth_err 2627 ugeth_err
2628 ("%s: Can not allocate memory for Tx bd rings.", 2628 ("%s: Can not allocate memory for Tx bd rings.",
2629 __FUNCTION__); 2629 __func__);
2630 ucc_geth_memclean(ugeth); 2630 ucc_geth_memclean(ugeth);
2631 return -ENOMEM; 2631 return -ENOMEM;
2632 } 2632 }
@@ -2662,7 +2662,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2662 if (netif_msg_ifup(ugeth)) 2662 if (netif_msg_ifup(ugeth))
2663 ugeth_err 2663 ugeth_err
2664 ("%s: Can not allocate memory for Rx bd rings.", 2664 ("%s: Can not allocate memory for Rx bd rings.",
2665 __FUNCTION__); 2665 __func__);
2666 ucc_geth_memclean(ugeth); 2666 ucc_geth_memclean(ugeth);
2667 return -ENOMEM; 2667 return -ENOMEM;
2668 } 2668 }
@@ -2678,7 +2678,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2678 if (ugeth->tx_skbuff[j] == NULL) { 2678 if (ugeth->tx_skbuff[j] == NULL) {
2679 if (netif_msg_ifup(ugeth)) 2679 if (netif_msg_ifup(ugeth))
2680 ugeth_err("%s: Could not allocate tx_skbuff", 2680 ugeth_err("%s: Could not allocate tx_skbuff",
2681 __FUNCTION__); 2681 __func__);
2682 ucc_geth_memclean(ugeth); 2682 ucc_geth_memclean(ugeth);
2683 return -ENOMEM; 2683 return -ENOMEM;
2684 } 2684 }
@@ -2710,7 +2710,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2710 if (ugeth->rx_skbuff[j] == NULL) { 2710 if (ugeth->rx_skbuff[j] == NULL) {
2711 if (netif_msg_ifup(ugeth)) 2711 if (netif_msg_ifup(ugeth))
2712 ugeth_err("%s: Could not allocate rx_skbuff", 2712 ugeth_err("%s: Could not allocate rx_skbuff",
2713 __FUNCTION__); 2713 __func__);
2714 ucc_geth_memclean(ugeth); 2714 ucc_geth_memclean(ugeth);
2715 return -ENOMEM; 2715 return -ENOMEM;
2716 } 2716 }
@@ -2744,7 +2744,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2744 if (netif_msg_ifup(ugeth)) 2744 if (netif_msg_ifup(ugeth))
2745 ugeth_err 2745 ugeth_err
2746 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2746 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2747 __FUNCTION__); 2747 __func__);
2748 ucc_geth_memclean(ugeth); 2748 ucc_geth_memclean(ugeth);
2749 return -ENOMEM; 2749 return -ENOMEM;
2750 } 2750 }
@@ -2767,7 +2767,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2767 if (netif_msg_ifup(ugeth)) 2767 if (netif_msg_ifup(ugeth))
2768 ugeth_err 2768 ugeth_err
2769 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2769 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2770 __FUNCTION__); 2770 __func__);
2771 ucc_geth_memclean(ugeth); 2771 ucc_geth_memclean(ugeth);
2772 return -ENOMEM; 2772 return -ENOMEM;
2773 } 2773 }
@@ -2797,7 +2797,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2797 if (netif_msg_ifup(ugeth)) 2797 if (netif_msg_ifup(ugeth))
2798 ugeth_err 2798 ugeth_err
2799 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2799 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2800 __FUNCTION__); 2800 __func__);
2801 ucc_geth_memclean(ugeth); 2801 ucc_geth_memclean(ugeth);
2802 return -ENOMEM; 2802 return -ENOMEM;
2803 } 2803 }
@@ -2841,7 +2841,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2841 if (netif_msg_ifup(ugeth)) 2841 if (netif_msg_ifup(ugeth))
2842 ugeth_err 2842 ugeth_err
2843 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2843 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2844 __FUNCTION__); 2844 __func__);
2845 ucc_geth_memclean(ugeth); 2845 ucc_geth_memclean(ugeth);
2846 return -ENOMEM; 2846 return -ENOMEM;
2847 } 2847 }
@@ -2892,7 +2892,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2892 ugeth_err 2892 ugeth_err
2893 ("%s: Can not allocate DPRAM memory for" 2893 ("%s: Can not allocate DPRAM memory for"
2894 " p_tx_fw_statistics_pram.", 2894 " p_tx_fw_statistics_pram.",
2895 __FUNCTION__); 2895 __func__);
2896 ucc_geth_memclean(ugeth); 2896 ucc_geth_memclean(ugeth);
2897 return -ENOMEM; 2897 return -ENOMEM;
2898 } 2898 }
@@ -2932,7 +2932,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2932 if (netif_msg_ifup(ugeth)) 2932 if (netif_msg_ifup(ugeth))
2933 ugeth_err 2933 ugeth_err
2934 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2934 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2935 __FUNCTION__); 2935 __func__);
2936 ucc_geth_memclean(ugeth); 2936 ucc_geth_memclean(ugeth);
2937 return -ENOMEM; 2937 return -ENOMEM;
2938 } 2938 }
@@ -2954,7 +2954,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2954 if (netif_msg_ifup(ugeth)) 2954 if (netif_msg_ifup(ugeth))
2955 ugeth_err 2955 ugeth_err
2956 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2956 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2957 __FUNCTION__); 2957 __func__);
2958 ucc_geth_memclean(ugeth); 2958 ucc_geth_memclean(ugeth);
2959 return -ENOMEM; 2959 return -ENOMEM;
2960 } 2960 }
@@ -2978,7 +2978,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2978 if (netif_msg_ifup(ugeth)) 2978 if (netif_msg_ifup(ugeth))
2979 ugeth_err 2979 ugeth_err
2980 ("%s: Can not allocate DPRAM memory for" 2980 ("%s: Can not allocate DPRAM memory for"
2981 " p_rx_fw_statistics_pram.", __FUNCTION__); 2981 " p_rx_fw_statistics_pram.", __func__);
2982 ucc_geth_memclean(ugeth); 2982 ucc_geth_memclean(ugeth);
2983 return -ENOMEM; 2983 return -ENOMEM;
2984 } 2984 }
@@ -3001,7 +3001,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3001 if (netif_msg_ifup(ugeth)) 3001 if (netif_msg_ifup(ugeth))
3002 ugeth_err 3002 ugeth_err
3003 ("%s: Can not allocate DPRAM memory for" 3003 ("%s: Can not allocate DPRAM memory for"
3004 " p_rx_irq_coalescing_tbl.", __FUNCTION__); 3004 " p_rx_irq_coalescing_tbl.", __func__);
3005 ucc_geth_memclean(ugeth); 3005 ucc_geth_memclean(ugeth);
3006 return -ENOMEM; 3006 return -ENOMEM;
3007 } 3007 }
@@ -3070,7 +3070,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3070 if (netif_msg_ifup(ugeth)) 3070 if (netif_msg_ifup(ugeth))
3071 ugeth_err 3071 ugeth_err
3072 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 3072 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3073 __FUNCTION__); 3073 __func__);
3074 ucc_geth_memclean(ugeth); 3074 ucc_geth_memclean(ugeth);
3075 return -ENOMEM; 3075 return -ENOMEM;
3076 } 3076 }
@@ -3147,7 +3147,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3147 if (!ug_info->extendedFilteringChainPointer) { 3147 if (!ug_info->extendedFilteringChainPointer) {
3148 if (netif_msg_ifup(ugeth)) 3148 if (netif_msg_ifup(ugeth))
3149 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 3149 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3150 __FUNCTION__); 3150 __func__);
3151 ucc_geth_memclean(ugeth); 3151 ucc_geth_memclean(ugeth);
3152 return -EINVAL; 3152 return -EINVAL;
3153 } 3153 }
@@ -3161,7 +3161,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3161 if (netif_msg_ifup(ugeth)) 3161 if (netif_msg_ifup(ugeth))
3162 ugeth_err 3162 ugeth_err
3163 ("%s: Can not allocate DPRAM memory for" 3163 ("%s: Can not allocate DPRAM memory for"
3164 " p_exf_glbl_param.", __FUNCTION__); 3164 " p_exf_glbl_param.", __func__);
3165 ucc_geth_memclean(ugeth); 3165 ucc_geth_memclean(ugeth);
3166 return -ENOMEM; 3166 return -ENOMEM;
3167 } 3167 }
@@ -3209,7 +3209,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3209 if (netif_msg_ifup(ugeth)) 3209 if (netif_msg_ifup(ugeth))
3210 ugeth_err 3210 ugeth_err
3211 ("%s: Can not allocate memory for" 3211 ("%s: Can not allocate memory for"
3212 " p_UccInitEnetParamShadows.", __FUNCTION__); 3212 " p_UccInitEnetParamShadows.", __func__);
3213 ucc_geth_memclean(ugeth); 3213 ucc_geth_memclean(ugeth);
3214 return -ENOMEM; 3214 return -ENOMEM;
3215 } 3215 }
@@ -3244,7 +3244,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3244 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3244 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3245 if (netif_msg_ifup(ugeth)) 3245 if (netif_msg_ifup(ugeth))
3246 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3246 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3247 __FUNCTION__); 3247 __func__);
3248 ucc_geth_memclean(ugeth); 3248 ucc_geth_memclean(ugeth);
3249 return -EINVAL; 3249 return -EINVAL;
3250 } 3250 }
@@ -3271,7 +3271,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3271 ug_info->riscRx, 1)) != 0) { 3271 ug_info->riscRx, 1)) != 0) {
3272 if (netif_msg_ifup(ugeth)) 3272 if (netif_msg_ifup(ugeth))
3273 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3273 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3274 __FUNCTION__); 3274 __func__);
3275 ucc_geth_memclean(ugeth); 3275 ucc_geth_memclean(ugeth);
3276 return ret_val; 3276 return ret_val;
3277 } 3277 }
@@ -3287,7 +3287,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3287 ug_info->riscTx, 0)) != 0) { 3287 ug_info->riscTx, 0)) != 0) {
3288 if (netif_msg_ifup(ugeth)) 3288 if (netif_msg_ifup(ugeth))
3289 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3289 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3290 __FUNCTION__); 3290 __func__);
3291 ucc_geth_memclean(ugeth); 3291 ucc_geth_memclean(ugeth);
3292 return ret_val; 3292 return ret_val;
3293 } 3293 }
@@ -3297,7 +3297,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3297 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3297 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3298 if (netif_msg_ifup(ugeth)) 3298 if (netif_msg_ifup(ugeth))
3299 ugeth_err("%s: Can not fill Rx bds with buffers.", 3299 ugeth_err("%s: Can not fill Rx bds with buffers.",
3300 __FUNCTION__); 3300 __func__);
3301 ucc_geth_memclean(ugeth); 3301 ucc_geth_memclean(ugeth);
3302 return ret_val; 3302 return ret_val;
3303 } 3303 }
@@ -3309,7 +3309,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3309 if (netif_msg_ifup(ugeth)) 3309 if (netif_msg_ifup(ugeth))
3310 ugeth_err 3310 ugeth_err
3311 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3311 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3312 __FUNCTION__); 3312 __func__);
3313 ucc_geth_memclean(ugeth); 3313 ucc_geth_memclean(ugeth);
3314 return -ENOMEM; 3314 return -ENOMEM;
3315 } 3315 }
@@ -3360,7 +3360,7 @@ static void ucc_geth_timeout(struct net_device *dev)
3360{ 3360{
3361 struct ucc_geth_private *ugeth = netdev_priv(dev); 3361 struct ucc_geth_private *ugeth = netdev_priv(dev);
3362 3362
3363 ugeth_vdbg("%s: IN", __FUNCTION__); 3363 ugeth_vdbg("%s: IN", __func__);
3364 3364
3365 dev->stats.tx_errors++; 3365 dev->stats.tx_errors++;
3366 3366
@@ -3386,7 +3386,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3386 u32 bd_status; 3386 u32 bd_status;
3387 u8 txQ = 0; 3387 u8 txQ = 0;
3388 3388
3389 ugeth_vdbg("%s: IN", __FUNCTION__); 3389 ugeth_vdbg("%s: IN", __func__);
3390 3390
3391 spin_lock_irq(&ugeth->lock); 3391 spin_lock_irq(&ugeth->lock);
3392 3392
@@ -3459,7 +3459,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3459 u8 *bdBuffer; 3459 u8 *bdBuffer;
3460 struct net_device *dev; 3460 struct net_device *dev;
3461 3461
3462 ugeth_vdbg("%s: IN", __FUNCTION__); 3462 ugeth_vdbg("%s: IN", __func__);
3463 3463
3464 dev = ugeth->dev; 3464 dev = ugeth->dev;
3465 3465
@@ -3481,7 +3481,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3481 (bd_status & R_ERRORS_FATAL)) { 3481 (bd_status & R_ERRORS_FATAL)) {
3482 if (netif_msg_rx_err(ugeth)) 3482 if (netif_msg_rx_err(ugeth))
3483 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3483 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3484 __FUNCTION__, __LINE__, (u32) skb); 3484 __func__, __LINE__, (u32) skb);
3485 if (skb) 3485 if (skb)
3486 dev_kfree_skb_any(skb); 3486 dev_kfree_skb_any(skb);
3487 3487
@@ -3507,7 +3507,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3507 skb = get_new_skb(ugeth, bd); 3507 skb = get_new_skb(ugeth, bd);
3508 if (!skb) { 3508 if (!skb) {
3509 if (netif_msg_rx_err(ugeth)) 3509 if (netif_msg_rx_err(ugeth))
3510 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); 3510 ugeth_warn("%s: No Rx Data Buffer", __func__);
3511 dev->stats.rx_dropped++; 3511 dev->stats.rx_dropped++;
3512 break; 3512 break;
3513 } 3513 }
@@ -3613,7 +3613,7 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3613 register u32 tx_mask; 3613 register u32 tx_mask;
3614 u8 i; 3614 u8 i;
3615 3615
3616 ugeth_vdbg("%s: IN", __FUNCTION__); 3616 ugeth_vdbg("%s: IN", __func__);
3617 3617
3618 uccf = ugeth->uccf; 3618 uccf = ugeth->uccf;
3619 ug_info = ugeth->ug_info; 3619 ug_info = ugeth->ug_info;
@@ -3683,13 +3683,13 @@ static int ucc_geth_open(struct net_device *dev)
3683 struct ucc_geth_private *ugeth = netdev_priv(dev); 3683 struct ucc_geth_private *ugeth = netdev_priv(dev);
3684 int err; 3684 int err;
3685 3685
3686 ugeth_vdbg("%s: IN", __FUNCTION__); 3686 ugeth_vdbg("%s: IN", __func__);
3687 3687
3688 /* Test station address */ 3688 /* Test station address */
3689 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3689 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3690 if (netif_msg_ifup(ugeth)) 3690 if (netif_msg_ifup(ugeth))
3691 ugeth_err("%s: Multicast address used for station address" 3691 ugeth_err("%s: Multicast address used for station address"
3692 " - is this what you wanted?", __FUNCTION__); 3692 " - is this what you wanted?", __func__);
3693 return -EINVAL; 3693 return -EINVAL;
3694 } 3694 }
3695 3695
@@ -3772,7 +3772,7 @@ static int ucc_geth_close(struct net_device *dev)
3772{ 3772{
3773 struct ucc_geth_private *ugeth = netdev_priv(dev); 3773 struct ucc_geth_private *ugeth = netdev_priv(dev);
3774 3774
3775 ugeth_vdbg("%s: IN", __FUNCTION__); 3775 ugeth_vdbg("%s: IN", __func__);
3776 3776
3777 napi_disable(&ugeth->napi); 3777 napi_disable(&ugeth->napi);
3778 3778
@@ -3840,7 +3840,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3840 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3840 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
3841 }; 3841 };
3842 3842
3843 ugeth_vdbg("%s: IN", __FUNCTION__); 3843 ugeth_vdbg("%s: IN", __func__);
3844 3844
3845 prop = of_get_property(np, "cell-index", NULL); 3845 prop = of_get_property(np, "cell-index", NULL);
3846 if (!prop) { 3846 if (!prop) {
@@ -3857,7 +3857,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3857 if (ug_info == NULL) { 3857 if (ug_info == NULL) {
3858 if (netif_msg_probe(&debug)) 3858 if (netif_msg_probe(&debug))
3859 ugeth_err("%s: [%d] Missing additional data!", 3859 ugeth_err("%s: [%d] Missing additional data!",
3860 __FUNCTION__, ucc_num); 3860 __func__, ucc_num);
3861 return -ENODEV; 3861 return -ENODEV;
3862 } 3862 }
3863 3863
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 6e42b5a8c22b..1164c52e2c0a 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -92,9 +92,6 @@
92 92
93#define HSO_NET_TX_TIMEOUT (HZ*10) 93#define HSO_NET_TX_TIMEOUT (HZ*10)
94 94
95/* Serial port defines and structs. */
96#define HSO_SERIAL_FLAG_RX_SENT 0
97
98#define HSO_SERIAL_MAGIC 0x48534f31 95#define HSO_SERIAL_MAGIC 0x48534f31
99 96
100/* Number of ttys to handle */ 97/* Number of ttys to handle */
@@ -179,6 +176,12 @@ struct hso_net {
179 unsigned long flags; 176 unsigned long flags;
180}; 177};
181 178
179enum rx_ctrl_state{
180 RX_IDLE,
181 RX_SENT,
182 RX_PENDING
183};
184
182struct hso_serial { 185struct hso_serial {
183 struct hso_device *parent; 186 struct hso_device *parent;
184 int magic; 187 int magic;
@@ -205,7 +208,7 @@ struct hso_serial {
205 struct usb_endpoint_descriptor *in_endp; 208 struct usb_endpoint_descriptor *in_endp;
206 struct usb_endpoint_descriptor *out_endp; 209 struct usb_endpoint_descriptor *out_endp;
207 210
208 unsigned long flags; 211 enum rx_ctrl_state rx_state;
209 u8 rts_state; 212 u8 rts_state;
210 u8 dtr_state; 213 u8 dtr_state;
211 unsigned tx_urb_used:1; 214 unsigned tx_urb_used:1;
@@ -216,6 +219,15 @@ struct hso_serial {
216 spinlock_t serial_lock; 219 spinlock_t serial_lock;
217 220
218 int (*write_data) (struct hso_serial *serial); 221 int (*write_data) (struct hso_serial *serial);
222 /* Hacks required to get flow control
223 * working on the serial receive buffers
224 * so as not to drop characters on the floor.
225 */
226 int curr_rx_urb_idx;
227 u16 curr_rx_urb_offset;
228 u8 rx_urb_filled[MAX_RX_URBS];
229 struct tasklet_struct unthrottle_tasklet;
230 struct work_struct retry_unthrottle_workqueue;
219}; 231};
220 232
221struct hso_device { 233struct hso_device {
@@ -271,7 +283,7 @@ struct hso_device {
271static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, 283static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
272 unsigned int set, unsigned int clear); 284 unsigned int set, unsigned int clear);
273static void ctrl_callback(struct urb *urb); 285static void ctrl_callback(struct urb *urb);
274static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial); 286static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial);
275static void hso_kick_transmit(struct hso_serial *serial); 287static void hso_kick_transmit(struct hso_serial *serial);
276/* Helper functions */ 288/* Helper functions */
277static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, 289static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
@@ -287,6 +299,8 @@ static int hso_start_net_device(struct hso_device *hso_dev);
287static void hso_free_shared_int(struct hso_shared_int *shared_int); 299static void hso_free_shared_int(struct hso_shared_int *shared_int);
288static int hso_stop_net_device(struct hso_device *hso_dev); 300static int hso_stop_net_device(struct hso_device *hso_dev);
289static void hso_serial_ref_free(struct kref *ref); 301static void hso_serial_ref_free(struct kref *ref);
302static void hso_std_serial_read_bulk_callback(struct urb *urb);
303static int hso_mux_serial_read(struct hso_serial *serial);
290static void async_get_intf(struct work_struct *data); 304static void async_get_intf(struct work_struct *data);
291static void async_put_intf(struct work_struct *data); 305static void async_put_intf(struct work_struct *data);
292static int hso_put_activity(struct hso_device *hso_dev); 306static int hso_put_activity(struct hso_device *hso_dev);
@@ -458,6 +472,17 @@ static ssize_t hso_sysfs_show_porttype(struct device *dev,
458} 472}
459static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL); 473static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL);
460 474
475static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb)
476{
477 int idx;
478
479 for (idx = 0; idx < serial->num_rx_urbs; idx++)
480 if (serial->rx_urb[idx] == urb)
481 return idx;
482 dev_err(serial->parent->dev, "hso_urb_to_index failed\n");
483 return -1;
484}
485
461/* converts mux value to a port spec value */ 486/* converts mux value to a port spec value */
462static u32 hso_mux_to_port(int mux) 487static u32 hso_mux_to_port(int mux)
463{ 488{
@@ -1039,6 +1064,158 @@ static void _hso_serial_set_termios(struct tty_struct *tty,
1039 return; 1064 return;
1040} 1065}
1041 1066
1067static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb)
1068{
1069 int result;
1070#ifdef CONFIG_HSO_AUTOPM
1071 usb_mark_last_busy(urb->dev);
1072#endif
1073 /* We are done with this URB, resubmit it. Prep the USB to wait for
1074 * another frame */
1075 usb_fill_bulk_urb(urb, serial->parent->usb,
1076 usb_rcvbulkpipe(serial->parent->usb,
1077 serial->in_endp->
1078 bEndpointAddress & 0x7F),
1079 urb->transfer_buffer, serial->rx_data_length,
1080 hso_std_serial_read_bulk_callback, serial);
1081 /* Give this to the USB subsystem so it can tell us when more data
1082 * arrives. */
1083 result = usb_submit_urb(urb, GFP_ATOMIC);
1084 if (result) {
1085 dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d\n",
1086 __func__, result);
1087 }
1088}
1089
1090
1091
1092
1093static void put_rxbuf_data_and_resubmit_bulk_urb(struct hso_serial *serial)
1094{
1095 int count;
1096 struct urb *curr_urb;
1097
1098 while (serial->rx_urb_filled[serial->curr_rx_urb_idx]) {
1099 curr_urb = serial->rx_urb[serial->curr_rx_urb_idx];
1100 count = put_rxbuf_data(curr_urb, serial);
1101 if (count == -1)
1102 return;
1103 if (count == 0) {
1104 serial->curr_rx_urb_idx++;
1105 if (serial->curr_rx_urb_idx >= serial->num_rx_urbs)
1106 serial->curr_rx_urb_idx = 0;
1107 hso_resubmit_rx_bulk_urb(serial, curr_urb);
1108 }
1109 }
1110}
1111
1112static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
1113{
1114 int count = 0;
1115 struct urb *urb;
1116
1117 urb = serial->rx_urb[0];
1118 if (serial->open_count > 0) {
1119 count = put_rxbuf_data(urb, serial);
1120 if (count == -1)
1121 return;
1122 }
1123 /* Re issue a read as long as we receive data. */
1124
1125 if (count == 0 && ((urb->actual_length != 0) ||
1126 (serial->rx_state == RX_PENDING))) {
1127 serial->rx_state = RX_SENT;
1128 hso_mux_serial_read(serial);
1129 } else
1130 serial->rx_state = RX_IDLE;
1131}
1132
1133
1134/* read callback for Diag and CS port */
1135static void hso_std_serial_read_bulk_callback(struct urb *urb)
1136{
1137 struct hso_serial *serial = urb->context;
1138 int status = urb->status;
1139
1140 /* sanity check */
1141 if (!serial) {
1142 D1("serial == NULL");
1143 return;
1144 } else if (status) {
1145 log_usb_status(status, __func__);
1146 return;
1147 }
1148
1149 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1150 D1("Actual length = %d\n", urb->actual_length);
1151 DUMP1(urb->transfer_buffer, urb->actual_length);
1152
1153 /* Anyone listening? */
1154 if (serial->open_count == 0)
1155 return;
1156
1157 if (status == 0) {
1158 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
1159 u32 rest;
1160 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1161 rest =
1162 urb->actual_length %
1163 serial->in_endp->wMaxPacketSize;
1164 if (((rest == 5) || (rest == 6))
1165 && !memcmp(((u8 *) urb->transfer_buffer) +
1166 urb->actual_length - 4, crc_check, 4)) {
1167 urb->actual_length -= 4;
1168 }
1169 }
1170 /* Valid data, handle RX data */
1171 spin_lock(&serial->serial_lock);
1172 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
1173 put_rxbuf_data_and_resubmit_bulk_urb(serial);
1174 spin_unlock(&serial->serial_lock);
1175 } else if (status == -ENOENT || status == -ECONNRESET) {
1176 /* Unlinked - check for throttled port. */
1177 D2("Port %d, successfully unlinked urb", serial->minor);
1178 spin_lock(&serial->serial_lock);
1179 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
1180 hso_resubmit_rx_bulk_urb(serial, urb);
1181 spin_unlock(&serial->serial_lock);
1182 } else {
1183 D2("Port %d, status = %d for read urb", serial->minor, status);
1184 return;
1185 }
1186}
1187
1188/*
1189 * This needs to be a tasklet otherwise we will
1190 * end up recursively calling this function.
1191 */
1192void hso_unthrottle_tasklet(struct hso_serial *serial)
1193{
1194 unsigned long flags;
1195
1196 spin_lock_irqsave(&serial->serial_lock, flags);
1197 if ((serial->parent->port_spec & HSO_INTF_MUX))
1198 put_rxbuf_data_and_resubmit_ctrl_urb(serial);
1199 else
1200 put_rxbuf_data_and_resubmit_bulk_urb(serial);
1201 spin_unlock_irqrestore(&serial->serial_lock, flags);
1202}
1203
1204static void hso_unthrottle(struct tty_struct *tty)
1205{
1206 struct hso_serial *serial = get_serial_by_tty(tty);
1207
1208 tasklet_hi_schedule(&serial->unthrottle_tasklet);
1209}
1210
1211void hso_unthrottle_workfunc(struct work_struct *work)
1212{
1213 struct hso_serial *serial =
1214 container_of(work, struct hso_serial,
1215 retry_unthrottle_workqueue);
1216 hso_unthrottle_tasklet(serial);
1217}
1218
1042/* open the requested serial port */ 1219/* open the requested serial port */
1043static int hso_serial_open(struct tty_struct *tty, struct file *filp) 1220static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1044{ 1221{
@@ -1064,13 +1241,18 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1064 tty->driver_data = serial; 1241 tty->driver_data = serial;
1065 serial->tty = tty; 1242 serial->tty = tty;
1066 1243
1067 /* check for port allready opened, if not set the termios */ 1244 /* check for port already opened, if not set the termios */
1068 serial->open_count++; 1245 serial->open_count++;
1069 if (serial->open_count == 1) { 1246 if (serial->open_count == 1) {
1070 tty->low_latency = 1; 1247 tty->low_latency = 1;
1071 serial->flags = 0; 1248 serial->rx_state = RX_IDLE;
1072 /* Force default termio settings */ 1249 /* Force default termio settings */
1073 _hso_serial_set_termios(tty, NULL); 1250 _hso_serial_set_termios(tty, NULL);
1251 tasklet_init(&serial->unthrottle_tasklet,
1252 (void (*)(unsigned long))hso_unthrottle_tasklet,
1253 (unsigned long)serial);
1254 INIT_WORK(&serial->retry_unthrottle_workqueue,
1255 hso_unthrottle_workfunc);
1074 result = hso_start_serial_device(serial->parent, GFP_KERNEL); 1256 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
1075 if (result) { 1257 if (result) {
1076 hso_stop_serial_device(serial->parent); 1258 hso_stop_serial_device(serial->parent);
@@ -1117,9 +1299,13 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1117 } 1299 }
1118 if (!usb_gone) 1300 if (!usb_gone)
1119 hso_stop_serial_device(serial->parent); 1301 hso_stop_serial_device(serial->parent);
1302 tasklet_kill(&serial->unthrottle_tasklet);
1303 cancel_work_sync(&serial->retry_unthrottle_workqueue);
1120 } 1304 }
1305
1121 if (!usb_gone) 1306 if (!usb_gone)
1122 usb_autopm_put_interface(serial->parent->interface); 1307 usb_autopm_put_interface(serial->parent->interface);
1308
1123 mutex_unlock(&serial->parent->mutex); 1309 mutex_unlock(&serial->parent->mutex);
1124} 1310}
1125 1311
@@ -1422,15 +1608,21 @@ static void intr_callback(struct urb *urb)
1422 (1 << i)); 1608 (1 << i));
1423 if (serial != NULL) { 1609 if (serial != NULL) {
1424 D1("Pending read interrupt on port %d\n", i); 1610 D1("Pending read interrupt on port %d\n", i);
1425 if (!test_and_set_bit(HSO_SERIAL_FLAG_RX_SENT, 1611 spin_lock(&serial->serial_lock);
1426 &serial->flags)) { 1612 if (serial->rx_state == RX_IDLE) {
1427 /* Setup and send a ctrl req read on 1613 /* Setup and send a ctrl req read on
1428 * port i */ 1614 * port i */
1429 hso_mux_serial_read(serial); 1615 if (!serial->rx_urb_filled[0]) {
1616 serial->rx_state = RX_SENT;
1617 hso_mux_serial_read(serial);
1618 } else
1619 serial->rx_state = RX_PENDING;
1620
1430 } else { 1621 } else {
1431 D1("Already pending a read on " 1622 D1("Already pending a read on "
1432 "port %d\n", i); 1623 "port %d\n", i);
1433 } 1624 }
1625 spin_unlock(&serial->serial_lock);
1434 } 1626 }
1435 } 1627 }
1436 } 1628 }
@@ -1532,16 +1724,10 @@ static void ctrl_callback(struct urb *urb)
1532 if (req->bRequestType == 1724 if (req->bRequestType ==
1533 (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { 1725 (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) {
1534 /* response to a read command */ 1726 /* response to a read command */
1535 if (serial->open_count > 0) { 1727 serial->rx_urb_filled[0] = 1;
1536 /* handle RX data the normal way */ 1728 spin_lock(&serial->serial_lock);
1537 put_rxbuf_data(urb, serial); 1729 put_rxbuf_data_and_resubmit_ctrl_urb(serial);
1538 } 1730 spin_unlock(&serial->serial_lock);
1539
1540 /* Re issue a read as long as we receive data. */
1541 if (urb->actual_length != 0)
1542 hso_mux_serial_read(serial);
1543 else
1544 clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags);
1545 } else { 1731 } else {
1546 hso_put_activity(serial->parent); 1732 hso_put_activity(serial->parent);
1547 if (serial->tty) 1733 if (serial->tty)
@@ -1552,91 +1738,42 @@ static void ctrl_callback(struct urb *urb)
1552} 1738}
1553 1739
1554/* handle RX data for serial port */ 1740/* handle RX data for serial port */
1555static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial) 1741static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
1556{ 1742{
1557 struct tty_struct *tty = serial->tty; 1743 struct tty_struct *tty = serial->tty;
1558 1744 int write_length_remaining = 0;
1745 int curr_write_len;
1559 /* Sanity check */ 1746 /* Sanity check */
1560 if (urb == NULL || serial == NULL) { 1747 if (urb == NULL || serial == NULL) {
1561 D1("serial = NULL"); 1748 D1("serial = NULL");
1562 return; 1749 return -2;
1563 } 1750 }
1564 1751
1565 /* Push data to tty */ 1752 /* Push data to tty */
1566 if (tty && urb->actual_length) { 1753 if (tty) {
1754 write_length_remaining = urb->actual_length -
1755 serial->curr_rx_urb_offset;
1567 D1("data to push to tty"); 1756 D1("data to push to tty");
1568 tty_insert_flip_string(tty, urb->transfer_buffer, 1757 while (write_length_remaining) {
1569 urb->actual_length); 1758 if (test_bit(TTY_THROTTLED, &tty->flags))
1570 tty_flip_buffer_push(tty); 1759 return -1;
1571 } 1760 curr_write_len = tty_insert_flip_string
1572} 1761 (tty, urb->transfer_buffer +
1573 1762 serial->curr_rx_urb_offset,
1574/* read callback for Diag and CS port */ 1763 write_length_remaining);
1575static void hso_std_serial_read_bulk_callback(struct urb *urb) 1764 serial->curr_rx_urb_offset += curr_write_len;
1576{ 1765 write_length_remaining -= curr_write_len;
1577 struct hso_serial *serial = urb->context; 1766 tty_flip_buffer_push(tty);
1578 int result;
1579 int status = urb->status;
1580
1581 /* sanity check */
1582 if (!serial) {
1583 D1("serial == NULL");
1584 return;
1585 } else if (status) {
1586 log_usb_status(status, __func__);
1587 return;
1588 }
1589
1590 D4("\n--- Got serial_read_bulk callback %02x ---", status);
1591 D1("Actual length = %d\n", urb->actual_length);
1592 DUMP1(urb->transfer_buffer, urb->actual_length);
1593
1594 /* Anyone listening? */
1595 if (serial->open_count == 0)
1596 return;
1597
1598 if (status == 0) {
1599 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) {
1600 u32 rest;
1601 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1602 rest =
1603 urb->actual_length %
1604 serial->in_endp->wMaxPacketSize;
1605 if (((rest == 5) || (rest == 6))
1606 && !memcmp(((u8 *) urb->transfer_buffer) +
1607 urb->actual_length - 4, crc_check, 4)) {
1608 urb->actual_length -= 4;
1609 }
1610 } 1767 }
1611 /* Valid data, handle RX data */
1612 put_rxbuf_data(urb, serial);
1613 } else if (status == -ENOENT || status == -ECONNRESET) {
1614 /* Unlinked - check for throttled port. */
1615 D2("Port %d, successfully unlinked urb", serial->minor);
1616 } else {
1617 D2("Port %d, status = %d for read urb", serial->minor, status);
1618 return;
1619 } 1768 }
1620 1769 if (write_length_remaining == 0) {
1621 usb_mark_last_busy(urb->dev); 1770 serial->curr_rx_urb_offset = 0;
1622 1771 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
1623 /* We are done with this URB, resubmit it. Prep the USB to wait for
1624 * another frame */
1625 usb_fill_bulk_urb(urb, serial->parent->usb,
1626 usb_rcvbulkpipe(serial->parent->usb,
1627 serial->in_endp->
1628 bEndpointAddress & 0x7F),
1629 urb->transfer_buffer, serial->rx_data_length,
1630 hso_std_serial_read_bulk_callback, serial);
1631 /* Give this to the USB subsystem so it can tell us when more data
1632 * arrives. */
1633 result = usb_submit_urb(urb, GFP_ATOMIC);
1634 if (result) {
1635 dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d",
1636 __func__, result);
1637 } 1772 }
1773 return write_length_remaining;
1638} 1774}
1639 1775
1776
1640/* Base driver functions */ 1777/* Base driver functions */
1641 1778
1642static void hso_log_port(struct hso_device *hso_dev) 1779static void hso_log_port(struct hso_device *hso_dev)
@@ -1794,9 +1931,13 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
1794 return -ENODEV; 1931 return -ENODEV;
1795 1932
1796 for (i = 0; i < serial->num_rx_urbs; i++) { 1933 for (i = 0; i < serial->num_rx_urbs; i++) {
1797 if (serial->rx_urb[i]) 1934 if (serial->rx_urb[i]) {
1798 usb_kill_urb(serial->rx_urb[i]); 1935 usb_kill_urb(serial->rx_urb[i]);
1936 serial->rx_urb_filled[i] = 0;
1937 }
1799 } 1938 }
1939 serial->curr_rx_urb_idx = 0;
1940 serial->curr_rx_urb_offset = 0;
1800 1941
1801 if (serial->tx_urb) 1942 if (serial->tx_urb)
1802 usb_kill_urb(serial->tx_urb); 1943 usb_kill_urb(serial->tx_urb);
@@ -2211,14 +2352,14 @@ static struct hso_device *hso_create_bulk_serial_device(
2211 USB_DIR_IN); 2352 USB_DIR_IN);
2212 if (!serial->in_endp) { 2353 if (!serial->in_endp) {
2213 dev_err(&interface->dev, "Failed to find BULK IN ep\n"); 2354 dev_err(&interface->dev, "Failed to find BULK IN ep\n");
2214 goto exit; 2355 goto exit2;
2215 } 2356 }
2216 2357
2217 if (! 2358 if (!
2218 (serial->out_endp = 2359 (serial->out_endp =
2219 hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) { 2360 hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) {
2220 dev_err(&interface->dev, "Failed to find BULK IN ep\n"); 2361 dev_err(&interface->dev, "Failed to find BULK IN ep\n");
2221 goto exit; 2362 goto exit2;
2222 } 2363 }
2223 2364
2224 serial->write_data = hso_std_serial_write_data; 2365 serial->write_data = hso_std_serial_write_data;
@@ -2231,9 +2372,10 @@ static struct hso_device *hso_create_bulk_serial_device(
2231 2372
2232 /* done, return it */ 2373 /* done, return it */
2233 return hso_dev; 2374 return hso_dev;
2375
2376exit2:
2377 hso_serial_common_free(serial);
2234exit: 2378exit:
2235 if (hso_dev && serial)
2236 hso_serial_common_free(serial);
2237 kfree(serial); 2379 kfree(serial);
2238 hso_free_device(hso_dev); 2380 hso_free_device(hso_dev);
2239 return NULL; 2381 return NULL;
@@ -2740,6 +2882,7 @@ static const struct tty_operations hso_serial_ops = {
2740 .chars_in_buffer = hso_serial_chars_in_buffer, 2882 .chars_in_buffer = hso_serial_chars_in_buffer,
2741 .tiocmget = hso_serial_tiocmget, 2883 .tiocmget = hso_serial_tiocmget,
2742 .tiocmset = hso_serial_tiocmset, 2884 .tiocmset = hso_serial_tiocmset,
2885 .unthrottle = hso_unthrottle
2743}; 2886};
2744 2887
2745static struct usb_driver hso_driver = { 2888static struct usb_driver hso_driver = {
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index ca9d00c1194e..b5143509e8be 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -118,7 +118,7 @@ static void mcs7830_async_cmd_callback(struct urb *urb)
118 118
119 if (urb->status < 0) 119 if (urb->status < 0)
120 printk(KERN_DEBUG "%s() failed with %d\n", 120 printk(KERN_DEBUG "%s() failed with %d\n",
121 __FUNCTION__, urb->status); 121 __func__, urb->status);
122 122
123 kfree(req); 123 kfree(req);
124 usb_free_urb(urb); 124 usb_free_urb(urb);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 8c19307e5040..38b90e7a7ed3 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -119,7 +119,7 @@ static void ctrl_callback(struct urb *urb)
119 default: 119 default:
120 if (netif_msg_drv(pegasus) && printk_ratelimit()) 120 if (netif_msg_drv(pegasus) && printk_ratelimit())
121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n", 121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n",
122 __FUNCTION__, urb->status); 122 __func__, urb->status);
123 } 123 }
124 pegasus->flags &= ~ETH_REGS_CHANGED; 124 pegasus->flags &= ~ETH_REGS_CHANGED;
125 wake_up(&pegasus->ctrl_wait); 125 wake_up(&pegasus->ctrl_wait);
@@ -136,7 +136,7 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
136 if (!buffer) { 136 if (!buffer) {
137 if (netif_msg_drv(pegasus)) 137 if (netif_msg_drv(pegasus))
138 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 138 dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
139 __FUNCTION__); 139 __func__);
140 return -ENOMEM; 140 return -ENOMEM;
141 } 141 }
142 add_wait_queue(&pegasus->ctrl_wait, &wait); 142 add_wait_queue(&pegasus->ctrl_wait, &wait);
@@ -224,7 +224,7 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
224 netif_device_detach(pegasus->net); 224 netif_device_detach(pegasus->net);
225 if (netif_msg_drv(pegasus)) 225 if (netif_msg_drv(pegasus))
226 dev_err(&pegasus->intf->dev, "%s, status %d\n", 226 dev_err(&pegasus->intf->dev, "%s, status %d\n",
227 __FUNCTION__, ret); 227 __func__, ret);
228 goto out; 228 goto out;
229 } 229 }
230 230
@@ -246,7 +246,7 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
246 if (!tmp) { 246 if (!tmp) {
247 if (netif_msg_drv(pegasus)) 247 if (netif_msg_drv(pegasus))
248 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 248 dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
249 __FUNCTION__); 249 __func__);
250 return -ENOMEM; 250 return -ENOMEM;
251 } 251 }
252 memcpy(tmp, &data, 1); 252 memcpy(tmp, &data, 1);
@@ -277,7 +277,7 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
277 netif_device_detach(pegasus->net); 277 netif_device_detach(pegasus->net);
278 if (netif_msg_drv(pegasus) && printk_ratelimit()) 278 if (netif_msg_drv(pegasus) && printk_ratelimit())
279 dev_err(&pegasus->intf->dev, "%s, status %d\n", 279 dev_err(&pegasus->intf->dev, "%s, status %d\n",
280 __FUNCTION__, ret); 280 __func__, ret);
281 goto out; 281 goto out;
282 } 282 }
283 283
@@ -310,7 +310,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
310 netif_device_detach(pegasus->net); 310 netif_device_detach(pegasus->net);
311 if (netif_msg_drv(pegasus)) 311 if (netif_msg_drv(pegasus))
312 dev_err(&pegasus->intf->dev, "%s, status %d\n", 312 dev_err(&pegasus->intf->dev, "%s, status %d\n",
313 __FUNCTION__, ret); 313 __func__, ret);
314 } 314 }
315 315
316 return ret; 316 return ret;
@@ -341,7 +341,7 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
341 } 341 }
342fail: 342fail:
343 if (netif_msg_drv(pegasus)) 343 if (netif_msg_drv(pegasus))
344 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 344 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
345 345
346 return ret; 346 return ret;
347} 347}
@@ -378,7 +378,7 @@ static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
378 378
379fail: 379fail:
380 if (netif_msg_drv(pegasus)) 380 if (netif_msg_drv(pegasus))
381 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 381 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
382 return -ETIMEDOUT; 382 return -ETIMEDOUT;
383} 383}
384 384
@@ -415,7 +415,7 @@ static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
415 415
416fail: 416fail:
417 if (netif_msg_drv(pegasus)) 417 if (netif_msg_drv(pegasus))
418 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 418 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
419 return -ETIMEDOUT; 419 return -ETIMEDOUT;
420} 420}
421 421
@@ -463,7 +463,7 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
463 return ret; 463 return ret;
464fail: 464fail:
465 if (netif_msg_drv(pegasus)) 465 if (netif_msg_drv(pegasus))
466 dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 466 dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
467 return -ETIMEDOUT; 467 return -ETIMEDOUT;
468} 468}
469#endif /* PEGASUS_WRITE_EEPROM */ 469#endif /* PEGASUS_WRITE_EEPROM */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8463efb9e0b1..02d25c743994 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -512,14 +512,13 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
512 int count = 0; 512 int count = 0;
513 513
514 spin_lock_irqsave (&q->lock, flags); 514 spin_lock_irqsave (&q->lock, flags);
515 for (skb = q->next; skb != (struct sk_buff *) q; skb = skbnext) { 515 skb_queue_walk_safe(q, skb, skbnext) {
516 struct skb_data *entry; 516 struct skb_data *entry;
517 struct urb *urb; 517 struct urb *urb;
518 int retval; 518 int retval;
519 519
520 entry = (struct skb_data *) skb->cb; 520 entry = (struct skb_data *) skb->cb;
521 urb = entry->urb; 521 urb = entry->urb;
522 skbnext = skb->next;
523 522
524 // during some PM-driven resume scenarios, 523 // during some PM-driven resume scenarios,
525 // these (async) unlinks complete immediately 524 // these (async) unlinks complete immediately
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 96dff04334b8..5b7870080c56 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -914,7 +914,7 @@ static void alloc_rbufs(struct net_device *dev)
914 914
915 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 915 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
916 for (i = 0; i < RX_RING_SIZE; i++) { 916 for (i = 0; i < RX_RING_SIZE; i++) {
917 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz); 917 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
918 rp->rx_skbuff[i] = skb; 918 rp->rx_skbuff[i] = skb;
919 if (skb == NULL) 919 if (skb == NULL)
920 break; 920 break;
@@ -1473,8 +1473,8 @@ static int rhine_rx(struct net_device *dev, int limit)
1473 /* Check if the packet is long enough to accept without 1473 /* Check if the packet is long enough to accept without
1474 copying to a minimally-sized skbuff. */ 1474 copying to a minimally-sized skbuff. */
1475 if (pkt_len < rx_copybreak && 1475 if (pkt_len < rx_copybreak &&
1476 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1476 (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
1477 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1477 skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */
1478 pci_dma_sync_single_for_cpu(rp->pdev, 1478 pci_dma_sync_single_for_cpu(rp->pdev,
1479 rp->rx_skbuff_dma[entry], 1479 rp->rx_skbuff_dma[entry],
1480 rp->rx_buf_sz, 1480 rp->rx_buf_sz,
@@ -1518,7 +1518,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1518 struct sk_buff *skb; 1518 struct sk_buff *skb;
1519 entry = rp->dirty_rx % RX_RING_SIZE; 1519 entry = rp->dirty_rx % RX_RING_SIZE;
1520 if (rp->rx_skbuff[entry] == NULL) { 1520 if (rp->rx_skbuff[entry] == NULL) {
1521 skb = dev_alloc_skb(rp->rx_buf_sz); 1521 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1522 rp->rx_skbuff[entry] = skb; 1522 rp->rx_skbuff[entry] = skb;
1523 if (skb == NULL) 1523 if (skb == NULL)
1524 break; /* Better luck next round. */ 1524 break; /* Better luck next round. */
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 1b95b04c9257..29a33090d3d4 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1381,7 +1381,7 @@ enum velocity_msg_level {
1381#define ASSERT(x) { \ 1381#define ASSERT(x) { \
1382 if (!(x)) { \ 1382 if (!(x)) { \
1383 printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\ 1383 printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\
1384 __FUNCTION__, __LINE__);\ 1384 __func__, __LINE__);\
1385 BUG(); \ 1385 BUG(); \
1386 }\ 1386 }\
1387} 1387}
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index d14e6678deed..a5ddc6c8963e 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -407,7 +407,7 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
407 if (cfm->version != CFM_VERSION) { 407 if (cfm->version != CFM_VERSION) {
408 printk(KERN_ERR "%s:%s: firmware format %u rejected! " 408 printk(KERN_ERR "%s:%s: firmware format %u rejected! "
409 "Expecting %u.\n", 409 "Expecting %u.\n",
410 modname, __FUNCTION__, cfm->version, CFM_VERSION); 410 modname, __func__, cfm->version, CFM_VERSION);
411 return -EINVAL; 411 return -EINVAL;
412 } 412 }
413 413
@@ -420,7 +420,7 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
420*/ 420*/
421 if (cksum != cfm->checksum) { 421 if (cksum != cfm->checksum) {
422 printk(KERN_ERR "%s:%s: firmware corrupted!\n", 422 printk(KERN_ERR "%s:%s: firmware corrupted!\n",
423 modname, __FUNCTION__); 423 modname, __func__);
424 printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n", 424 printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n",
425 len - (int)sizeof(struct cycx_firmware) - 1, 425 len - (int)sizeof(struct cycx_firmware) - 1,
426 cfm->info.codesize); 426 cfm->info.codesize);
@@ -432,7 +432,7 @@ static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
432 /* If everything is ok, set reset, data and code pointers */ 432 /* If everything is ok, set reset, data and code pointers */
433 img_hdr = (struct cycx_fw_header *)&cfm->image; 433 img_hdr = (struct cycx_fw_header *)&cfm->image;
434#ifdef FIRMWARE_DEBUG 434#ifdef FIRMWARE_DEBUG
435 printk(KERN_INFO "%s:%s: image sizes\n", __FUNCTION__, modname); 435 printk(KERN_INFO "%s:%s: image sizes\n", __func__, modname);
436 printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size); 436 printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size);
437 printk(KERN_INFO " data=%lu\n", img_hdr->data_size); 437 printk(KERN_INFO " data=%lu\n", img_hdr->data_size);
438 printk(KERN_INFO " code=%lu\n", img_hdr->code_size); 438 printk(KERN_INFO " code=%lu\n", img_hdr->code_size);
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index d3b28b01b9f9..5a7303dc0965 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -874,7 +874,7 @@ static void cycx_x25_irq_connect(struct cycx_device *card,
874 nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1); 874 nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
875 875
876 dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n", 876 dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n",
877 __FUNCTION__, lcn, loc, rem); 877 __func__, lcn, loc, rem);
878 878
879 dev = cycx_x25_get_dev_by_dte_addr(wandev, rem); 879 dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
880 if (!dev) { 880 if (!dev) {
@@ -902,7 +902,7 @@ static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
902 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 902 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
903 cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key)); 903 cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
904 dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n", 904 dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n",
905 card->devname, __FUNCTION__, lcn, key); 905 card->devname, __func__, lcn, key);
906 906
907 dev = cycx_x25_get_dev_by_lcn(wandev, -key); 907 dev = cycx_x25_get_dev_by_lcn(wandev, -key);
908 if (!dev) { 908 if (!dev) {
@@ -929,7 +929,7 @@ static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
929 929
930 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 930 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
931 dprintk(1, KERN_INFO "%s: %s:lcn=%d\n", 931 dprintk(1, KERN_INFO "%s: %s:lcn=%d\n",
932 card->devname, __FUNCTION__, lcn); 932 card->devname, __func__, lcn);
933 dev = cycx_x25_get_dev_by_lcn(wandev, lcn); 933 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
934 if (!dev) { 934 if (!dev) {
935 /* Invalid channel, discard packet */ 935 /* Invalid channel, discard packet */
@@ -950,7 +950,7 @@ static void cycx_x25_irq_disconnect(struct cycx_device *card,
950 u8 lcn; 950 u8 lcn;
951 951
952 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 952 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
953 dprintk(1, KERN_INFO "%s:lcn=%d\n", __FUNCTION__, lcn); 953 dprintk(1, KERN_INFO "%s:lcn=%d\n", __func__, lcn);
954 954
955 dev = cycx_x25_get_dev_by_lcn(wandev, lcn); 955 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
956 if (dev) { 956 if (dev) {
@@ -1381,7 +1381,7 @@ static void cycx_x25_chan_timer(unsigned long d)
1381 cycx_x25_chan_disconnect(dev); 1381 cycx_x25_chan_disconnect(dev);
1382 else 1382 else
1383 printk(KERN_ERR "%s: %s for svc (%s) not connected!\n", 1383 printk(KERN_ERR "%s: %s for svc (%s) not connected!\n",
1384 chan->card->devname, __FUNCTION__, dev->name); 1384 chan->card->devname, __func__, dev->name);
1385} 1385}
1386 1386
1387/* Set logical channel state. */ 1387/* Set logical channel state. */
@@ -1485,7 +1485,7 @@ static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
1485 unsigned char *ptr; 1485 unsigned char *ptr;
1486 1486
1487 if ((skb = dev_alloc_skb(1)) == NULL) { 1487 if ((skb = dev_alloc_skb(1)) == NULL) {
1488 printk(KERN_ERR "%s: out of memory\n", __FUNCTION__); 1488 printk(KERN_ERR "%s: out of memory\n", __func__);
1489 return; 1489 return;
1490 } 1490 }
1491 1491
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index f5d55ad02267..5f1ccb2b08b1 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -647,7 +647,7 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
647 647
648 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; 648 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
649 if (!skb) { 649 if (!skb) {
650 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__); 650 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__);
651 goto refill; 651 goto refill;
652 } 652 }
653 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2)); 653 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index 8b7e5d2e2ac9..cbcbf6f0414c 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -163,15 +163,17 @@ static void x25_close(struct net_device *dev)
163 163
164static int x25_rx(struct sk_buff *skb) 164static int x25_rx(struct sk_buff *skb)
165{ 165{
166 struct net_device *dev = skb->dev;
167
166 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 168 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
167 skb->dev->stats.rx_dropped++; 169 dev->stats.rx_dropped++;
168 return NET_RX_DROP; 170 return NET_RX_DROP;
169 } 171 }
170 172
171 if (lapb_data_received(skb->dev, skb) == LAPB_OK) 173 if (lapb_data_received(dev, skb) == LAPB_OK)
172 return NET_RX_SUCCESS; 174 return NET_RX_SUCCESS;
173 175
174 skb->dev->stats.rx_errors++; 176 dev->stats.rx_errors++;
175 dev_kfree_skb_any(skb); 177 dev_kfree_skb_any(skb);
176 return NET_RX_DROP; 178 return NET_RX_DROP;
177} 179}
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4518d0aa2480..4917a94943bd 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -548,7 +548,7 @@ static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
548{ 548{
549 st_cpc_tty_area *cpc_tty; 549 st_cpc_tty_area *cpc_tty;
550 550
551 CPC_TTY_DBG("%s: set:%x clear:%x\n", __FUNCTION__, set, clear); 551 CPC_TTY_DBG("%s: set:%x clear:%x\n", __func__, set, clear);
552 552
553 if (!tty || !tty->driver_data ) { 553 if (!tty || !tty->driver_data ) {
554 CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n"); 554 CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 6596cd0742b9..f972fef87c98 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -856,7 +856,7 @@ prepare_to_send( struct sk_buff *skb, struct net_device *dev )
856 len = SBNI_MIN_LEN; 856 len = SBNI_MIN_LEN;
857 857
858 nl->tx_buf_p = skb; 858 nl->tx_buf_p = skb;
859 nl->tx_frameno = (len + nl->maxframe - 1) / nl->maxframe; 859 nl->tx_frameno = DIV_ROUND_UP(len, nl->maxframe);
860 nl->framelen = len < nl->maxframe ? len : nl->maxframe; 860 nl->framelen = len < nl->maxframe ? len : nl->maxframe;
861 861
862 outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 ); 862 outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 9931b5ab59cd..45bdf0b339bb 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -300,6 +300,19 @@ config LIBERTAS_DEBUG
300 ---help--- 300 ---help---
301 Debugging support. 301 Debugging support.
302 302
303config LIBERTAS_THINFIRM
304 tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware"
305 depends on WLAN_80211 && MAC80211
306 select FW_LOADER
307 ---help---
308 A library for Marvell Libertas 8xxx devices using thinfirm.
309
310config LIBERTAS_THINFIRM_USB
311 tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
312 depends on LIBERTAS_THINFIRM && USB
313 ---help---
314 A driver for Marvell Libertas 8388 USB devices using thinfirm.
315
303config AIRO 316config AIRO
304 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 317 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
305 depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN) 318 depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN)
@@ -322,6 +335,9 @@ config HERMES
322 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" 335 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
323 depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211 336 depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211
324 select WIRELESS_EXT 337 select WIRELESS_EXT
338 select FW_LOADER
339 select CRYPTO
340 select CRYPTO_MICHAEL_MIC
325 ---help--- 341 ---help---
326 A driver for 802.11b wireless cards based on the "Hermes" or 342 A driver for 802.11b wireless cards based on the "Hermes" or
327 Intersil HFA384x (Prism 2) MAC controller. This includes the vast 343 Intersil HFA384x (Prism 2) MAC controller. This includes the vast
@@ -411,7 +427,6 @@ config PCMCIA_HERMES
411config PCMCIA_SPECTRUM 427config PCMCIA_SPECTRUM
412 tristate "Symbol Spectrum24 Trilogy PCMCIA card support" 428 tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
413 depends on PCMCIA && HERMES 429 depends on PCMCIA && HERMES
414 select FW_LOADER
415 ---help--- 430 ---help---
416 431
417 This is a driver for 802.11b cards using RAM-loadable Symbol 432 This is a driver for 802.11b cards using RAM-loadable Symbol
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 59aa89ec6e81..59d2d805f60b 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_WAVELAN) += wavelan.o
16obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o 16obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
17obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o 17obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
18 18
19obj-$(CONFIG_HERMES) += orinoco.o hermes.o 19obj-$(CONFIG_HERMES) += orinoco.o hermes.o hermes_dld.o
20obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o 20obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o
21obj-$(CONFIG_APPLE_AIRPORT) += airport.o 21obj-$(CONFIG_APPLE_AIRPORT) += airport.o
22obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o 22obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o
@@ -48,6 +48,8 @@ obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o
48obj-$(CONFIG_USB_ZD1201) += zd1201.o 48obj-$(CONFIG_USB_ZD1201) += zd1201.o
49obj-$(CONFIG_LIBERTAS) += libertas/ 49obj-$(CONFIG_LIBERTAS) += libertas/
50 50
51obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/
52
51rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o 53rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o
52rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o 54rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o
53 55
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 3333d4596b8d..b2c050b68890 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -765,11 +765,11 @@ static void adm8211_update_mode(struct ieee80211_hw *dev)
765 765
766 priv->soft_rx_crc = 0; 766 priv->soft_rx_crc = 0;
767 switch (priv->mode) { 767 switch (priv->mode) {
768 case IEEE80211_IF_TYPE_STA: 768 case NL80211_IFTYPE_STATION:
769 priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA); 769 priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA);
770 priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR; 770 priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR;
771 break; 771 break;
772 case IEEE80211_IF_TYPE_IBSS: 772 case NL80211_IFTYPE_ADHOC:
773 priv->nar &= ~ADM8211_NAR_PR; 773 priv->nar &= ~ADM8211_NAR_PR;
774 priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR; 774 priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR;
775 775
@@ -777,7 +777,7 @@ static void adm8211_update_mode(struct ieee80211_hw *dev)
777 if (priv->pdev->revision >= ADM8211_REV_BA) 777 if (priv->pdev->revision >= ADM8211_REV_BA)
778 priv->soft_rx_crc = 1; 778 priv->soft_rx_crc = 1;
779 break; 779 break;
780 case IEEE80211_IF_TYPE_MNTR: 780 case NL80211_IFTYPE_MONITOR:
781 priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST); 781 priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST);
782 priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR; 782 priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR;
783 break; 783 break;
@@ -1410,11 +1410,11 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
1410 struct ieee80211_if_init_conf *conf) 1410 struct ieee80211_if_init_conf *conf)
1411{ 1411{
1412 struct adm8211_priv *priv = dev->priv; 1412 struct adm8211_priv *priv = dev->priv;
1413 if (priv->mode != IEEE80211_IF_TYPE_MNTR) 1413 if (priv->mode != NL80211_IFTYPE_MONITOR)
1414 return -EOPNOTSUPP; 1414 return -EOPNOTSUPP;
1415 1415
1416 switch (conf->type) { 1416 switch (conf->type) {
1417 case IEEE80211_IF_TYPE_STA: 1417 case NL80211_IFTYPE_STATION:
1418 priv->mode = conf->type; 1418 priv->mode = conf->type;
1419 break; 1419 break;
1420 default: 1420 default:
@@ -1437,7 +1437,7 @@ static void adm8211_remove_interface(struct ieee80211_hw *dev,
1437 struct ieee80211_if_init_conf *conf) 1437 struct ieee80211_if_init_conf *conf)
1438{ 1438{
1439 struct adm8211_priv *priv = dev->priv; 1439 struct adm8211_priv *priv = dev->priv;
1440 priv->mode = IEEE80211_IF_TYPE_MNTR; 1440 priv->mode = NL80211_IFTYPE_MONITOR;
1441} 1441}
1442 1442
1443static int adm8211_init_rings(struct ieee80211_hw *dev) 1443static int adm8211_init_rings(struct ieee80211_hw *dev)
@@ -1556,7 +1556,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
1556 ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE | 1556 ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE |
1557 ADM8211_IER_RCIE | ADM8211_IER_TCIE | 1557 ADM8211_IER_RCIE | ADM8211_IER_TCIE |
1558 ADM8211_IER_TDUIE | ADM8211_IER_GPTIE); 1558 ADM8211_IER_TDUIE | ADM8211_IER_GPTIE);
1559 priv->mode = IEEE80211_IF_TYPE_MNTR; 1559 priv->mode = NL80211_IFTYPE_MONITOR;
1560 adm8211_update_mode(dev); 1560 adm8211_update_mode(dev);
1561 ADM8211_CSR_WRITE(RDR, 0); 1561 ADM8211_CSR_WRITE(RDR, 0);
1562 1562
@@ -1571,7 +1571,7 @@ static void adm8211_stop(struct ieee80211_hw *dev)
1571{ 1571{
1572 struct adm8211_priv *priv = dev->priv; 1572 struct adm8211_priv *priv = dev->priv;
1573 1573
1574 priv->mode = IEEE80211_IF_TYPE_INVALID; 1574 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1575 priv->nar = 0; 1575 priv->nar = 0;
1576 ADM8211_CSR_WRITE(NAR, 0); 1576 ADM8211_CSR_WRITE(NAR, 0);
1577 ADM8211_CSR_WRITE(IER, 0); 1577 ADM8211_CSR_WRITE(IER, 0);
@@ -1884,6 +1884,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1884 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); 1884 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
1885 /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */ 1885 /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
1886 dev->flags = IEEE80211_HW_SIGNAL_UNSPEC; 1886 dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
1887 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1887 1888
1888 dev->channel_change_time = 1000; 1889 dev->channel_change_time = 1000;
1889 dev->max_signal = 100; /* FIXME: find better value */ 1890 dev->max_signal = 100; /* FIXME: find better value */
@@ -1895,7 +1896,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1895 priv->tx_power = 0x40; 1896 priv->tx_power = 0x40;
1896 priv->lpf_cutoff = 0xFF; 1897 priv->lpf_cutoff = 0xFF;
1897 priv->lnags_threshold = 0xFF; 1898 priv->lnags_threshold = 0xFF;
1898 priv->mode = IEEE80211_IF_TYPE_INVALID; 1899 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1899 1900
1900 /* Power-on issue. EEPROM won't read correctly without */ 1901 /* Power-on issue. EEPROM won't read correctly without */
1901 if (pdev->revision >= ADM8211_REV_BA) { 1902 if (pdev->revision >= ADM8211_REV_BA) {
@@ -1985,7 +1986,7 @@ static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)
1985 struct ieee80211_hw *dev = pci_get_drvdata(pdev); 1986 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
1986 struct adm8211_priv *priv = dev->priv; 1987 struct adm8211_priv *priv = dev->priv;
1987 1988
1988 if (priv->mode != IEEE80211_IF_TYPE_INVALID) { 1989 if (priv->mode != NL80211_IFTYPE_UNSPECIFIED) {
1989 ieee80211_stop_queues(dev); 1990 ieee80211_stop_queues(dev);
1990 adm8211_stop(dev); 1991 adm8211_stop(dev);
1991 } 1992 }
@@ -2003,7 +2004,7 @@ static int adm8211_resume(struct pci_dev *pdev)
2003 pci_set_power_state(pdev, PCI_D0); 2004 pci_set_power_state(pdev, PCI_D0);
2004 pci_restore_state(pdev); 2005 pci_restore_state(pdev);
2005 2006
2006 if (priv->mode != IEEE80211_IF_TYPE_INVALID) { 2007 if (priv->mode != NL80211_IFTYPE_UNSPECIFIED) {
2007 adm8211_start(dev); 2008 adm8211_start(dev);
2008 ieee80211_wake_queues(dev); 2009 ieee80211_wake_queues(dev);
2009 } 2010 }
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index b5cd850a4a59..370133e492d2 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1915,7 +1915,7 @@ static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
1915 struct airo_info *ai = dev->priv; 1915 struct airo_info *ai = dev->priv;
1916 1916
1917 if (!skb) { 1917 if (!skb) {
1918 airo_print_err(dev->name, "%s: skb == NULL!",__FUNCTION__); 1918 airo_print_err(dev->name, "%s: skb == NULL!",__func__);
1919 return 0; 1919 return 0;
1920 } 1920 }
1921 npacks = skb_queue_len (&ai->txq); 1921 npacks = skb_queue_len (&ai->txq);
@@ -1964,7 +1964,7 @@ static int mpi_send_packet (struct net_device *dev)
1964 if ((skb = skb_dequeue(&ai->txq)) == NULL) { 1964 if ((skb = skb_dequeue(&ai->txq)) == NULL) {
1965 airo_print_err(dev->name, 1965 airo_print_err(dev->name,
1966 "%s: Dequeue'd zero in send_packet()", 1966 "%s: Dequeue'd zero in send_packet()",
1967 __FUNCTION__); 1967 __func__);
1968 return 0; 1968 return 0;
1969 } 1969 }
1970 1970
@@ -2115,7 +2115,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
2115 u32 *fids = priv->fids; 2115 u32 *fids = priv->fids;
2116 2116
2117 if ( skb == NULL ) { 2117 if ( skb == NULL ) {
2118 airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__); 2118 airo_print_err(dev->name, "%s: skb == NULL!", __func__);
2119 return 0; 2119 return 0;
2120 } 2120 }
2121 2121
@@ -2186,7 +2186,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
2186 } 2186 }
2187 2187
2188 if ( skb == NULL ) { 2188 if ( skb == NULL ) {
2189 airo_print_err(dev->name, "%s: skb == NULL!", __FUNCTION__); 2189 airo_print_err(dev->name, "%s: skb == NULL!", __func__);
2190 return 0; 2190 return 0;
2191 } 2191 }
2192 2192
@@ -4127,7 +4127,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4127 if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid)) 4127 if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid))
4128 airo_print_err(ai->dev->name, 4128 airo_print_err(ai->dev->name,
4129 "%s: MAC should be disabled (rid=%04x)", 4129 "%s: MAC should be disabled (rid=%04x)",
4130 __FUNCTION__, rid); 4130 __func__, rid);
4131 memset(&cmd, 0, sizeof(cmd)); 4131 memset(&cmd, 0, sizeof(cmd));
4132 memset(&rsp, 0, sizeof(rsp)); 4132 memset(&rsp, 0, sizeof(rsp));
4133 4133
@@ -4142,7 +4142,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4142 &ai->config_desc.rid_desc, sizeof(Rid)); 4142 &ai->config_desc.rid_desc, sizeof(Rid));
4143 4143
4144 if (len < 4 || len > 2047) { 4144 if (len < 4 || len > 2047) {
4145 airo_print_err(ai->dev->name, "%s: len=%d", __FUNCTION__, len); 4145 airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
4146 rc = -1; 4146 rc = -1;
4147 } else { 4147 } else {
4148 memcpy((char *)ai->config_desc.virtual_host_addr, 4148 memcpy((char *)ai->config_desc.virtual_host_addr,
@@ -4151,9 +4151,9 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
4151 rc = issuecommand(ai, &cmd, &rsp); 4151 rc = issuecommand(ai, &cmd, &rsp);
4152 if ((rc & 0xff00) != 0) { 4152 if ((rc & 0xff00) != 0) {
4153 airo_print_err(ai->dev->name, "%s: Write rid Error %d", 4153 airo_print_err(ai->dev->name, "%s: Write rid Error %d",
4154 __FUNCTION__, rc); 4154 __func__, rc);
4155 airo_print_err(ai->dev->name, "%s: Cmd=%04x", 4155 airo_print_err(ai->dev->name, "%s: Cmd=%04x",
4156 __FUNCTION__, cmd.cmd); 4156 __func__, cmd.cmd);
4157 } 4157 }
4158 4158
4159 if ((rsp.status & 0x7f00)) 4159 if ((rsp.status & 0x7f00))
@@ -7107,7 +7107,7 @@ static int airo_get_aplist(struct net_device *dev,
7107 */ 7107 */
7108static int airo_set_scan(struct net_device *dev, 7108static int airo_set_scan(struct net_device *dev,
7109 struct iw_request_info *info, 7109 struct iw_request_info *info,
7110 struct iw_param *vwrq, 7110 struct iw_point *dwrq,
7111 char *extra) 7111 char *extra)
7112{ 7112{
7113 struct airo_info *ai = dev->priv; 7113 struct airo_info *ai = dev->priv;
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index f12355398fe7..fd72e427cb28 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -147,7 +147,7 @@ static int airo_probe(struct pcmcia_device *p_dev)
147 DEBUG(0, "airo_attach()\n"); 147 DEBUG(0, "airo_attach()\n");
148 148
149 /* Interrupt setup */ 149 /* Interrupt setup */
150 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 150 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
151 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; 151 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
152 p_dev->irq.Handler = NULL; 152 p_dev->irq.Handler = NULL;
153 153
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 6f7eb9f59223..ce03a2e865fa 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -180,7 +180,8 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
180 } 180 }
181 181
182 /* Allocate space for private device-specific data */ 182 /* Allocate space for private device-specific data */
183 dev = alloc_orinocodev(sizeof(*card), airport_hard_reset); 183 dev = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev,
184 airport_hard_reset, NULL);
184 if (! dev) { 185 if (! dev) {
185 printk(KERN_ERR PFX "Cannot allocate network device\n"); 186 printk(KERN_ERR PFX "Cannot allocate network device\n");
186 return -ENODEV; 187 return -ENODEV;
diff --git a/drivers/net/wireless/ath5k/Makefile b/drivers/net/wireless/ath5k/Makefile
index 564ecd0c5d4b..719cfaef7085 100644
--- a/drivers/net/wireless/ath5k/Makefile
+++ b/drivers/net/wireless/ath5k/Makefile
@@ -1,6 +1,14 @@
1ath5k-y += base.o 1ath5k-y += caps.o
2ath5k-y += hw.o
3ath5k-y += initvals.o 2ath5k-y += initvals.o
3ath5k-y += eeprom.o
4ath5k-y += gpio.o
5ath5k-y += desc.o
6ath5k-y += dma.o
7ath5k-y += qcu.o
8ath5k-y += pcu.o
4ath5k-y += phy.o 9ath5k-y += phy.o
10ath5k-y += reset.o
11ath5k-y += attach.o
12ath5k-y += base.o
5ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o 13ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
6obj-$(CONFIG_ATH5K) += ath5k.o 14obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index 9102eea3c8bf..7134c40d6a69 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -18,18 +18,23 @@
18#ifndef _ATH5K_H 18#ifndef _ATH5K_H
19#define _ATH5K_H 19#define _ATH5K_H
20 20
21/* Set this to 1 to disable regulatory domain restrictions for channel tests. 21/* TODO: Clean up channel debuging -doesn't work anyway- and start
22 * WARNING: This is for debuging only and has side effects (eg. scan takes too 22 * working on reg. control code using all available eeprom information
23 * long and results timeouts). It's also illegal to tune to some of the 23 * -rev. engineering needed- */
24 * supported frequencies in some countries, so use this at your own risk,
25 * you've been warned. */
26#define CHAN_DEBUG 0 24#define CHAN_DEBUG 0
27 25
28#include <linux/io.h> 26#include <linux/io.h>
29#include <linux/types.h> 27#include <linux/types.h>
30#include <net/mac80211.h> 28#include <net/mac80211.h>
31 29
32#include "hw.h" 30/* RX/TX descriptor hw structs
31 * TODO: Driver part should only see sw structs */
32#include "desc.h"
33
34/* EEPROM structs/offsets
35 * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
36 * and clean up common bits, then introduce set/get functions in eeprom.c */
37#include "eeprom.h"
33 38
34/* PCI IDs */ 39/* PCI IDs */
35#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */ 40#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
@@ -87,7 +92,92 @@
87 ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__) 92 ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__)
88 93
89/* 94/*
95 * AR5K REGISTER ACCESS
96 */
97
98/* Some macros to read/write fields */
99
100/* First shift, then mask */
101#define AR5K_REG_SM(_val, _flags) \
102 (((_val) << _flags##_S) & (_flags))
103
104/* First mask, then shift */
105#define AR5K_REG_MS(_val, _flags) \
106 (((_val) & (_flags)) >> _flags##_S)
107
108/* Some registers can hold multiple values of interest. For this
109 * reason when we want to write to these registers we must first
110 * retrieve the values which we do not want to clear (lets call this
111 * old_data) and then set the register with this and our new_value:
112 * ( old_data | new_value) */
113#define AR5K_REG_WRITE_BITS(ah, _reg, _flags, _val) \
114 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & ~(_flags)) | \
115 (((_val) << _flags##_S) & (_flags)), _reg)
116
117#define AR5K_REG_MASKED_BITS(ah, _reg, _flags, _mask) \
118 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & \
119 (_mask)) | (_flags), _reg)
120
121#define AR5K_REG_ENABLE_BITS(ah, _reg, _flags) \
122 ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) | (_flags), _reg)
123
124#define AR5K_REG_DISABLE_BITS(ah, _reg, _flags) \
125 ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) & ~(_flags), _reg)
126
127/* Access to PHY registers */
128#define AR5K_PHY_READ(ah, _reg) \
129 ath5k_hw_reg_read(ah, (ah)->ah_phy + ((_reg) << 2))
130
131#define AR5K_PHY_WRITE(ah, _reg, _val) \
132 ath5k_hw_reg_write(ah, _val, (ah)->ah_phy + ((_reg) << 2))
133
134/* Access QCU registers per queue */
135#define AR5K_REG_READ_Q(ah, _reg, _queue) \
136 (ath5k_hw_reg_read(ah, _reg) & (1 << _queue)) \
137
138#define AR5K_REG_WRITE_Q(ah, _reg, _queue) \
139 ath5k_hw_reg_write(ah, (1 << _queue), _reg)
140
141#define AR5K_Q_ENABLE_BITS(_reg, _queue) do { \
142 _reg |= 1 << _queue; \
143} while (0)
144
145#define AR5K_Q_DISABLE_BITS(_reg, _queue) do { \
146 _reg &= ~(1 << _queue); \
147} while (0)
148
149/* Used while writing initvals */
150#define AR5K_REG_WAIT(_i) do { \
151 if (_i % 64) \
152 udelay(1); \
153} while (0)
154
155/* Register dumps are done per operation mode */
156#define AR5K_INI_RFGAIN_5GHZ 0
157#define AR5K_INI_RFGAIN_2GHZ 1
158
159/* TODO: Clean this up */
160#define AR5K_INI_VAL_11A 0
161#define AR5K_INI_VAL_11A_TURBO 1
162#define AR5K_INI_VAL_11B 2
163#define AR5K_INI_VAL_11G 3
164#define AR5K_INI_VAL_11G_TURBO 4
165#define AR5K_INI_VAL_XR 0
166#define AR5K_INI_VAL_MAX 5
167
168#define AR5K_RF5111_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
169#define AR5K_RF5112_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
170
171/* Used for BSSID etc manipulation */
172#define AR5K_LOW_ID(_a)( \
173(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24 \
174)
175
176#define AR5K_HIGH_ID(_a) ((_a)[4] | (_a)[5] << 8)
177
178/*
90 * Some tuneable values (these should be changeable by the user) 179 * Some tuneable values (these should be changeable by the user)
180 * TODO: Make use of them and add more options OR use debug/configfs
91 */ 181 */
92#define AR5K_TUNE_DMA_BEACON_RESP 2 182#define AR5K_TUNE_DMA_BEACON_RESP 2
93#define AR5K_TUNE_SW_BEACON_RESP 10 183#define AR5K_TUNE_SW_BEACON_RESP 10
@@ -98,13 +188,13 @@
98#define AR5K_TUNE_REGISTER_TIMEOUT 20000 188#define AR5K_TUNE_REGISTER_TIMEOUT 20000
99/* Register for RSSI threshold has a mask of 0xff, so 255 seems to 189/* Register for RSSI threshold has a mask of 0xff, so 255 seems to
100 * be the max value. */ 190 * be the max value. */
101#define AR5K_TUNE_RSSI_THRES 129 191#define AR5K_TUNE_RSSI_THRES 129
102/* This must be set when setting the RSSI threshold otherwise it can 192/* This must be set when setting the RSSI threshold otherwise it can
103 * prevent a reset. If AR5K_RSSI_THR is read after writing to it 193 * prevent a reset. If AR5K_RSSI_THR is read after writing to it
104 * the BMISS_THRES will be seen as 0, seems harware doesn't keep 194 * the BMISS_THRES will be seen as 0, seems harware doesn't keep
105 * track of it. Max value depends on harware. For AR5210 this is just 7. 195 * track of it. Max value depends on harware. For AR5210 this is just 7.
106 * For AR5211+ this seems to be up to 255. */ 196 * For AR5211+ this seems to be up to 255. */
107#define AR5K_TUNE_BMISS_THRES 7 197#define AR5K_TUNE_BMISS_THRES 7
108#define AR5K_TUNE_REGISTER_DWELL_TIME 20000 198#define AR5K_TUNE_REGISTER_DWELL_TIME 20000
109#define AR5K_TUNE_BEACON_INTERVAL 100 199#define AR5K_TUNE_BEACON_INTERVAL 100
110#define AR5K_TUNE_AIFS 2 200#define AR5K_TUNE_AIFS 2
@@ -123,6 +213,55 @@
123#define AR5K_TUNE_ANT_DIVERSITY true 213#define AR5K_TUNE_ANT_DIVERSITY true
124#define AR5K_TUNE_HWTXTRIES 4 214#define AR5K_TUNE_HWTXTRIES 4
125 215
216#define AR5K_INIT_CARR_SENSE_EN 1
217
218/*Swap RX/TX Descriptor for big endian archs*/
219#if defined(__BIG_ENDIAN)
220#define AR5K_INIT_CFG ( \
221 AR5K_CFG_SWTD | AR5K_CFG_SWRD \
222)
223#else
224#define AR5K_INIT_CFG 0x00000000
225#endif
226
227/* Initial values */
228#define AR5K_INIT_TX_LATENCY 502
229#define AR5K_INIT_USEC 39
230#define AR5K_INIT_USEC_TURBO 79
231#define AR5K_INIT_USEC_32 31
232#define AR5K_INIT_SLOT_TIME 396
233#define AR5K_INIT_SLOT_TIME_TURBO 480
234#define AR5K_INIT_ACK_CTS_TIMEOUT 1024
235#define AR5K_INIT_ACK_CTS_TIMEOUT_TURBO 0x08000800
236#define AR5K_INIT_PROG_IFS 920
237#define AR5K_INIT_PROG_IFS_TURBO 960
238#define AR5K_INIT_EIFS 3440
239#define AR5K_INIT_EIFS_TURBO 6880
240#define AR5K_INIT_SIFS 560
241#define AR5K_INIT_SIFS_TURBO 480
242#define AR5K_INIT_SH_RETRY 10
243#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY
244#define AR5K_INIT_SSH_RETRY 32
245#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
246#define AR5K_INIT_TX_RETRY 10
247
248#define AR5K_INIT_TRANSMIT_LATENCY ( \
249 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
250 (AR5K_INIT_USEC) \
251)
252#define AR5K_INIT_TRANSMIT_LATENCY_TURBO ( \
253 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
254 (AR5K_INIT_USEC_TURBO) \
255)
256#define AR5K_INIT_PROTO_TIME_CNTRL ( \
257 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS << 12) | \
258 (AR5K_INIT_PROG_IFS) \
259)
260#define AR5K_INIT_PROTO_TIME_CNTRL_TURBO ( \
261 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS_TURBO << 12) | \
262 (AR5K_INIT_PROG_IFS_TURBO) \
263)
264
126/* token to use for aifs, cwmin, cwmax in MadWiFi */ 265/* token to use for aifs, cwmin, cwmax in MadWiFi */
127#define AR5K_TXQ_USEDEFAULT ((u32) -1) 266#define AR5K_TXQ_USEDEFAULT ((u32) -1)
128 267
@@ -142,7 +281,9 @@ enum ath5k_radio {
142 AR5K_RF5112 = 2, 281 AR5K_RF5112 = 2,
143 AR5K_RF2413 = 3, 282 AR5K_RF2413 = 3,
144 AR5K_RF5413 = 4, 283 AR5K_RF5413 = 4,
145 AR5K_RF2425 = 5, 284 AR5K_RF2316 = 5,
285 AR5K_RF2317 = 6,
286 AR5K_RF2425 = 7,
146}; 287};
147 288
148/* 289/*
@@ -150,7 +291,7 @@ enum ath5k_radio {
150 */ 291 */
151 292
152enum ath5k_srev_type { 293enum ath5k_srev_type {
153 AR5K_VERSION_VER, 294 AR5K_VERSION_MAC,
154 AR5K_VERSION_RAD, 295 AR5K_VERSION_RAD,
155}; 296};
156 297
@@ -162,23 +303,24 @@ struct ath5k_srev_name {
162 303
163#define AR5K_SREV_UNKNOWN 0xffff 304#define AR5K_SREV_UNKNOWN 0xffff
164 305
165#define AR5K_SREV_VER_AR5210 0x00 306#define AR5K_SREV_AR5210 0x00 /* Crete */
166#define AR5K_SREV_VER_AR5311 0x10 307#define AR5K_SREV_AR5311 0x10 /* Maui 1 */
167#define AR5K_SREV_VER_AR5311A 0x20 308#define AR5K_SREV_AR5311A 0x20 /* Maui 2 */
168#define AR5K_SREV_VER_AR5311B 0x30 309#define AR5K_SREV_AR5311B 0x30 /* Spirit */
169#define AR5K_SREV_VER_AR5211 0x40 310#define AR5K_SREV_AR5211 0x40 /* Oahu */
170#define AR5K_SREV_VER_AR5212 0x50 311#define AR5K_SREV_AR5212 0x50 /* Venice */
171#define AR5K_SREV_VER_AR5213 0x55 312#define AR5K_SREV_AR5213 0x55 /* ??? */
172#define AR5K_SREV_VER_AR5213A 0x59 313#define AR5K_SREV_AR5213A 0x59 /* Hainan */
173#define AR5K_SREV_VER_AR2413 0x78 314#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
174#define AR5K_SREV_VER_AR2414 0x79 315#define AR5K_SREV_AR2414 0x70 /* Griffin */
175#define AR5K_SREV_VER_AR2424 0xa0 /* PCI-E */ 316#define AR5K_SREV_AR5424 0x90 /* Condor */
176#define AR5K_SREV_VER_AR5424 0xa3 /* PCI-E */ 317#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
177#define AR5K_SREV_VER_AR5413 0xa4 318#define AR5K_SREV_AR5414 0xa0 /* Eagle */
178#define AR5K_SREV_VER_AR5414 0xa5 319#define AR5K_SREV_AR2415 0xb0 /* Cobra */
179#define AR5K_SREV_VER_AR5416 0xc0 /* PCI-E */ 320#define AR5K_SREV_AR5416 0xc0 /* PCI-E */
180#define AR5K_SREV_VER_AR5418 0xca /* PCI-E */ 321#define AR5K_SREV_AR5418 0xca /* PCI-E */
181#define AR5K_SREV_VER_AR2425 0xe2 /* PCI-E */ 322#define AR5K_SREV_AR2425 0xe0 /* Swan */
323#define AR5K_SREV_AR2417 0xf0 /* Nala */
182 324
183#define AR5K_SREV_RAD_5110 0x00 325#define AR5K_SREV_RAD_5110 0x00
184#define AR5K_SREV_RAD_5111 0x10 326#define AR5K_SREV_RAD_5111 0x10
@@ -190,13 +332,22 @@ struct ath5k_srev_name {
190#define AR5K_SREV_RAD_2112 0x40 332#define AR5K_SREV_RAD_2112 0x40
191#define AR5K_SREV_RAD_2112A 0x45 333#define AR5K_SREV_RAD_2112A 0x45
192#define AR5K_SREV_RAD_2112B 0x46 334#define AR5K_SREV_RAD_2112B 0x46
193#define AR5K_SREV_RAD_SC0 0x50 /* Found on 2413/2414 */ 335#define AR5K_SREV_RAD_2413 0x50
194#define AR5K_SREV_RAD_SC1 0x60 /* Found on 5413/5414 */ 336#define AR5K_SREV_RAD_5413 0x60
195#define AR5K_SREV_RAD_SC2 0xa0 /* Found on 2424-5/5424 */ 337#define AR5K_SREV_RAD_2316 0x70
196#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */ 338#define AR5K_SREV_RAD_2317 0x80
339#define AR5K_SREV_RAD_5424 0xa0 /* Mostly same as 5413 */
340#define AR5K_SREV_RAD_2425 0xa2
341#define AR5K_SREV_RAD_5133 0xc0
342
343#define AR5K_SREV_PHY_5211 0x30
344#define AR5K_SREV_PHY_5212 0x41
345#define AR5K_SREV_PHY_2112B 0x43
346#define AR5K_SREV_PHY_2413 0x45
347#define AR5K_SREV_PHY_5413 0x61
348#define AR5K_SREV_PHY_2425 0x70
197 349
198/* IEEE defs */ 350/* IEEE defs */
199
200#define IEEE80211_MAX_LEN 2500 351#define IEEE80211_MAX_LEN 2500
201 352
202/* TODO add support to mac80211 for vendor-specific rates and modes */ 353/* TODO add support to mac80211 for vendor-specific rates and modes */
@@ -268,21 +419,13 @@ enum ath5k_driver_mode {
268 AR5K_MODE_MAX = 5 419 AR5K_MODE_MAX = 5
269}; 420};
270 421
271/* adding this flag to rate_code enables short preamble, see ar5212_reg.h */
272#define AR5K_SET_SHORT_PREAMBLE 0x04
273
274#define HAS_SHPREAMBLE(_ix) \
275 (rt->rates[_ix].modulation == IEEE80211_RATE_SHORT_PREAMBLE)
276#define SHPREAMBLE_FLAG(_ix) \
277 (HAS_SHPREAMBLE(_ix) ? AR5K_SET_SHORT_PREAMBLE : 0)
278
279 422
280/****************\ 423/****************\
281 TX DEFINITIONS 424 TX DEFINITIONS
282\****************/ 425\****************/
283 426
284/* 427/*
285 * TX Status 428 * TX Status descriptor
286 */ 429 */
287struct ath5k_tx_status { 430struct ath5k_tx_status {
288 u16 ts_seqnum; 431 u16 ts_seqnum;
@@ -354,7 +497,6 @@ enum ath5k_tx_queue_id {
354 AR5K_TX_QUEUE_ID_XR_DATA = 9, 497 AR5K_TX_QUEUE_ID_XR_DATA = 9,
355}; 498};
356 499
357
358/* 500/*
359 * Flags to set hw queue's parameters... 501 * Flags to set hw queue's parameters...
360 */ 502 */
@@ -387,7 +529,8 @@ struct ath5k_txq_info {
387 529
388/* 530/*
389 * Transmit packet types. 531 * Transmit packet types.
390 * These are not fully used inside OpenHAL yet 532 * used on tx control descriptor
533 * TODO: Use them inside base.c corectly
391 */ 534 */
392enum ath5k_pkt_type { 535enum ath5k_pkt_type {
393 AR5K_PKT_TYPE_NORMAL = 0, 536 AR5K_PKT_TYPE_NORMAL = 0,
@@ -430,7 +573,7 @@ enum ath5k_dmasize {
430\****************/ 573\****************/
431 574
432/* 575/*
433 * RX Status 576 * RX Status descriptor
434 */ 577 */
435struct ath5k_rx_status { 578struct ath5k_rx_status {
436 u16 rs_datalen; 579 u16 rs_datalen;
@@ -494,34 +637,59 @@ struct ath5k_beacon_state {
494#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10) 637#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
495 638
496 639
640/*******************************\
641 GAIN OPTIMIZATION DEFINITIONS
642\*******************************/
643
644enum ath5k_rfgain {
645 AR5K_RFGAIN_INACTIVE = 0,
646 AR5K_RFGAIN_READ_REQUESTED,
647 AR5K_RFGAIN_NEED_CHANGE,
648};
649
650#define AR5K_GAIN_CRN_FIX_BITS_5111 4
651#define AR5K_GAIN_CRN_FIX_BITS_5112 7
652#define AR5K_GAIN_CRN_MAX_FIX_BITS AR5K_GAIN_CRN_FIX_BITS_5112
653#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN 15
654#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN 20
655#define AR5K_GAIN_CCK_PROBE_CORR 5
656#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA 15
657#define AR5K_GAIN_STEP_COUNT 10
658#define AR5K_GAIN_PARAM_TX_CLIP 0
659#define AR5K_GAIN_PARAM_PD_90 1
660#define AR5K_GAIN_PARAM_PD_84 2
661#define AR5K_GAIN_PARAM_GAIN_SEL 3
662#define AR5K_GAIN_PARAM_MIX_ORN 0
663#define AR5K_GAIN_PARAM_PD_138 1
664#define AR5K_GAIN_PARAM_PD_137 2
665#define AR5K_GAIN_PARAM_PD_136 3
666#define AR5K_GAIN_PARAM_PD_132 4
667#define AR5K_GAIN_PARAM_PD_131 5
668#define AR5K_GAIN_PARAM_PD_130 6
669#define AR5K_GAIN_CHECK_ADJUST(_g) \
670 ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
671
672struct ath5k_gain_opt_step {
673 s16 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
674 s32 gos_gain;
675};
676
677struct ath5k_gain {
678 u32 g_step_idx;
679 u32 g_current;
680 u32 g_target;
681 u32 g_low;
682 u32 g_high;
683 u32 g_f_corr;
684 u32 g_active;
685 const struct ath5k_gain_opt_step *g_step;
686};
687
688
497/********************\ 689/********************\
498 COMMON DEFINITIONS 690 COMMON DEFINITIONS
499\********************/ 691\********************/
500 692
501/*
502 * Atheros hardware descriptor
503 * This is read and written to by the hardware
504 */
505struct ath5k_desc {
506 u32 ds_link; /* physical address of the next descriptor */
507 u32 ds_data; /* physical address of data buffer (skb) */
508
509 union {
510 struct ath5k_hw_5210_tx_desc ds_tx5210;
511 struct ath5k_hw_5212_tx_desc ds_tx5212;
512 struct ath5k_hw_all_rx_desc ds_rx;
513 } ud;
514} __packed;
515
516#define AR5K_RXDESC_INTREQ 0x0020
517
518#define AR5K_TXDESC_CLRDMASK 0x0001
519#define AR5K_TXDESC_NOACK 0x0002 /*[5211+]*/
520#define AR5K_TXDESC_RTSENA 0x0004
521#define AR5K_TXDESC_CTSENA 0x0008
522#define AR5K_TXDESC_INTREQ 0x0010
523#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/
524
525#define AR5K_SLOT_TIME_9 396 693#define AR5K_SLOT_TIME_9 396
526#define AR5K_SLOT_TIME_20 880 694#define AR5K_SLOT_TIME_20 880
527#define AR5K_SLOT_TIME_MAX 0xffff 695#define AR5K_SLOT_TIME_MAX 0xffff
@@ -553,167 +721,79 @@ struct ath5k_desc {
553#define CHANNEL_MODES CHANNEL_ALL 721#define CHANNEL_MODES CHANNEL_ALL
554 722
555/* 723/*
556 * Used internaly in OpenHAL (ar5211.c/ar5212.c 724 * Used internaly for reset_tx_queue).
557 * for reset_tx_queue). Also see struct struct ieee80211_channel. 725 * Also see struct struct ieee80211_channel.
558 */ 726 */
559#define IS_CHAN_XR(_c) ((_c.hw_value & CHANNEL_XR) != 0) 727#define IS_CHAN_XR(_c) ((_c.hw_value & CHANNEL_XR) != 0)
560#define IS_CHAN_B(_c) ((_c.hw_value & CHANNEL_B) != 0) 728#define IS_CHAN_B(_c) ((_c.hw_value & CHANNEL_B) != 0)
561 729
562/* 730/*
563 * The following structure will be used to map 2GHz channels to 731 * The following structure is used to map 2GHz channels to
564 * 5GHz Atheros channels. 732 * 5GHz Atheros channels.
733 * TODO: Clean up
565 */ 734 */
566struct ath5k_athchan_2ghz { 735struct ath5k_athchan_2ghz {
567 u32 a2_flags; 736 u32 a2_flags;
568 u16 a2_athchan; 737 u16 a2_athchan;
569}; 738};
570 739
571/*
572 * Rate definitions
573 * TODO: Clean them up or move them on mac80211 -most of these infos are
574 * used by the rate control algorytm on MadWiFi.
575 */
576 740
577/* Max number of rates on the rate table and what it seems 741/******************\
578 * Atheros hardware supports */ 742 RATE DEFINITIONS
579#define AR5K_MAX_RATES 32 743\******************/
580 744
581/** 745/**
582 * struct ath5k_rate - rate structure 746 * Seems the ar5xxx harware supports up to 32 rates, indexed by 1-32.
583 * @valid: is this a valid rate for rate control (remove)
584 * @modulation: respective mac80211 modulation
585 * @rate_kbps: rate in kbit/s
586 * @rate_code: hardware rate value, used in &struct ath5k_desc, on RX on
587 * &struct ath5k_rx_status.rs_rate and on TX on
588 * &struct ath5k_tx_status.ts_rate. Seems the ar5xxx harware supports
589 * up to 32 rates, indexed by 1-32. This means we really only need
590 * 6 bits for the rate_code.
591 * @dot11_rate: respective IEEE-802.11 rate value
592 * @control_rate: index of rate assumed to be used to send control frames.
593 * This can be used to set override the value on the rate duration
594 * registers. This is only useful if we can override in the harware at
595 * what rate we want to send control frames at. Note that IEEE-802.11
596 * Ch. 9.6 (after IEEE 802.11g changes) defines the rate at which we
597 * should send ACK/CTS, if we change this value we can be breaking
598 * the spec.
599 * 747 *
600 * This structure is used to get the RX rate or set the TX rate on the 748 * The rate code is used to get the RX rate or set the TX rate on the
601 * hardware descriptors. It is also used for internal modulation control 749 * hardware descriptors. It is also used for internal modulation control
602 * and settings. 750 * and settings.
603 * 751 *
604 * On RX after the &struct ath5k_desc is parsed by the appropriate 752 * This is the hardware rate map we are aware of:
605 * ah_proc_rx_desc() the respective hardware rate value is set in
606 * &struct ath5k_rx_status.rs_rate. On TX the desired rate is set in
607 * &struct ath5k_tx_status.ts_rate which is later used to setup the
608 * &struct ath5k_desc correctly. This is the hardware rate map we are
609 * aware of:
610 * 753 *
611 * rate_code 1 2 3 4 5 6 7 8 754 * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08
612 * rate_kbps 3000 1000 ? ? ? 2000 500 48000 755 * rate_kbps 3000 1000 ? ? ? 2000 500 48000
613 * 756 *
614 * rate_code 9 10 11 12 13 14 15 16 757 * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10
615 * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ? 758 * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ?
616 * 759 *
617 * rate_code 17 18 19 20 21 22 23 24 760 * rate_code 17 18 19 20 21 22 23 24
618 * rate_kbps ? ? ? ? ? ? ? 11000 761 * rate_kbps ? ? ? ? ? ? ? 11000
619 * 762 *
620 * rate_code 25 26 27 28 29 30 31 32 763 * rate_code 25 26 27 28 29 30 31 32
621 * rate_kbps 5500 2000 1000 ? ? ? ? ? 764 * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ?
622 * 765 *
766 * "S" indicates CCK rates with short preamble.
767 *
768 * AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
769 * lowest 4 bits, so they are the same as below with a 0xF mask.
770 * (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
771 * We handle this in ath5k_setup_bands().
623 */ 772 */
624struct ath5k_rate { 773#define AR5K_MAX_RATES 32
625 u8 valid;
626 u32 modulation;
627 u16 rate_kbps;
628 u8 rate_code;
629 u8 dot11_rate;
630 u8 control_rate;
631};
632
633/* XXX: GRR all this stuff to get leds blinking ??? (check out setcurmode) */
634struct ath5k_rate_table {
635 u16 rate_count;
636 u8 rate_code_to_index[AR5K_MAX_RATES]; /* Back-mapping */
637 struct ath5k_rate rates[AR5K_MAX_RATES];
638};
639
640/*
641 * Rate tables...
642 * TODO: CLEAN THIS !!!
643 */
644#define AR5K_RATES_11A { 8, { \
645 255, 255, 255, 255, 255, 255, 255, 255, 6, 4, 2, 0, \
646 7, 5, 3, 1, 255, 255, 255, 255, 255, 255, 255, 255, \
647 255, 255, 255, 255, 255, 255, 255, 255 }, { \
648 { 1, 0, 6000, 11, 140, 0 }, \
649 { 1, 0, 9000, 15, 18, 0 }, \
650 { 1, 0, 12000, 10, 152, 2 }, \
651 { 1, 0, 18000, 14, 36, 2 }, \
652 { 1, 0, 24000, 9, 176, 4 }, \
653 { 1, 0, 36000, 13, 72, 4 }, \
654 { 1, 0, 48000, 8, 96, 4 }, \
655 { 1, 0, 54000, 12, 108, 4 } } \
656}
657
658#define AR5K_RATES_11B { 4, { \
659 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \
660 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \
661 3, 2, 1, 0, 255, 255, 255, 255 }, { \
662 { 1, 0, 1000, 27, 130, 0 }, \
663 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 2000, 26, 132, 1 }, \
664 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 5500, 25, 139, 1 }, \
665 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 11000, 24, 150, 1 } } \
666}
667
668#define AR5K_RATES_11G { 12, { \
669 255, 255, 255, 255, 255, 255, 255, 255, 10, 8, 6, 4, \
670 11, 9, 7, 5, 255, 255, 255, 255, 255, 255, 255, 255, \
671 3, 2, 1, 0, 255, 255, 255, 255 }, { \
672 { 1, 0, 1000, 27, 2, 0 }, \
673 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 2000, 26, 4, 1 }, \
674 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 5500, 25, 11, 1 }, \
675 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 11000, 24, 22, 1 }, \
676 { 0, 0, 6000, 11, 12, 4 }, \
677 { 0, 0, 9000, 15, 18, 4 }, \
678 { 1, 0, 12000, 10, 24, 6 }, \
679 { 1, 0, 18000, 14, 36, 6 }, \
680 { 1, 0, 24000, 9, 48, 8 }, \
681 { 1, 0, 36000, 13, 72, 8 }, \
682 { 1, 0, 48000, 8, 96, 8 }, \
683 { 1, 0, 54000, 12, 108, 8 } } \
684}
685
686#define AR5K_RATES_TURBO { 8, { \
687 255, 255, 255, 255, 255, 255, 255, 255, 6, 4, 2, 0, \
688 7, 5, 3, 1, 255, 255, 255, 255, 255, 255, 255, 255, \
689 255, 255, 255, 255, 255, 255, 255, 255 }, { \
690 { 1, MODULATION_TURBO, 6000, 11, 140, 0 }, \
691 { 1, MODULATION_TURBO, 9000, 15, 18, 0 }, \
692 { 1, MODULATION_TURBO, 12000, 10, 152, 2 }, \
693 { 1, MODULATION_TURBO, 18000, 14, 36, 2 }, \
694 { 1, MODULATION_TURBO, 24000, 9, 176, 4 }, \
695 { 1, MODULATION_TURBO, 36000, 13, 72, 4 }, \
696 { 1, MODULATION_TURBO, 48000, 8, 96, 4 }, \
697 { 1, MODULATION_TURBO, 54000, 12, 108, 4 } } \
698}
699 774
700#define AR5K_RATES_XR { 12, { \ 775/* B */
701 255, 3, 1, 255, 255, 255, 2, 0, 10, 8, 6, 4, \ 776#define ATH5K_RATE_CODE_1M 0x1B
702 11, 9, 7, 5, 255, 255, 255, 255, 255, 255, 255, 255, \ 777#define ATH5K_RATE_CODE_2M 0x1A
703 255, 255, 255, 255, 255, 255, 255, 255 }, { \ 778#define ATH5K_RATE_CODE_5_5M 0x19
704 { 1, MODULATION_XR, 500, 7, 129, 0 }, \ 779#define ATH5K_RATE_CODE_11M 0x18
705 { 1, MODULATION_XR, 1000, 2, 139, 1 }, \ 780/* A and G */
706 { 1, MODULATION_XR, 2000, 6, 150, 2 }, \ 781#define ATH5K_RATE_CODE_6M 0x0B
707 { 1, MODULATION_XR, 3000, 1, 150, 3 }, \ 782#define ATH5K_RATE_CODE_9M 0x0F
708 { 1, 0, 6000, 11, 140, 4 }, \ 783#define ATH5K_RATE_CODE_12M 0x0A
709 { 1, 0, 9000, 15, 18, 4 }, \ 784#define ATH5K_RATE_CODE_18M 0x0E
710 { 1, 0, 12000, 10, 152, 6 }, \ 785#define ATH5K_RATE_CODE_24M 0x09
711 { 1, 0, 18000, 14, 36, 6 }, \ 786#define ATH5K_RATE_CODE_36M 0x0D
712 { 1, 0, 24000, 9, 176, 8 }, \ 787#define ATH5K_RATE_CODE_48M 0x08
713 { 1, 0, 36000, 13, 72, 8 }, \ 788#define ATH5K_RATE_CODE_54M 0x0C
714 { 1, 0, 48000, 8, 96, 8 }, \ 789/* XR */
715 { 1, 0, 54000, 12, 108, 8 } } \ 790#define ATH5K_RATE_CODE_XR_500K 0x07
716} 791#define ATH5K_RATE_CODE_XR_1M 0x02
792#define ATH5K_RATE_CODE_XR_2M 0x06
793#define ATH5K_RATE_CODE_XR_3M 0x01
794
795/* adding this flag to rate_code enables short preamble */
796#define AR5K_SET_SHORT_PREAMBLE 0x04
717 797
718/* 798/*
719 * Crypto definitions 799 * Crypto definitions
@@ -735,7 +815,6 @@ struct ath5k_rate_table {
735 return (false); \ 815 return (false); \
736} while (0) 816} while (0)
737 817
738
739enum ath5k_ant_setting { 818enum ath5k_ant_setting {
740 AR5K_ANT_VARIABLE = 0, /* variable by programming */ 819 AR5K_ANT_VARIABLE = 0, /* variable by programming */
741 AR5K_ANT_FIXED_A = 1, /* fixed to 11a frequencies */ 820 AR5K_ANT_FIXED_A = 1, /* fixed to 11a frequencies */
@@ -846,7 +925,8 @@ enum ath5k_power_mode {
846 925
847/* 926/*
848 * These match net80211 definitions (not used in 927 * These match net80211 definitions (not used in
849 * d80211). 928 * mac80211).
929 * TODO: Clean this up
850 */ 930 */
851#define AR5K_LED_INIT 0 /*IEEE80211_S_INIT*/ 931#define AR5K_LED_INIT 0 /*IEEE80211_S_INIT*/
852#define AR5K_LED_SCAN 1 /*IEEE80211_S_SCAN*/ 932#define AR5K_LED_SCAN 1 /*IEEE80211_S_SCAN*/
@@ -862,7 +942,8 @@ enum ath5k_power_mode {
862/* 942/*
863 * Chipset capabilities -see ath5k_hw_get_capability- 943 * Chipset capabilities -see ath5k_hw_get_capability-
864 * get_capability function is not yet fully implemented 944 * get_capability function is not yet fully implemented
865 * in OpenHAL so most of these don't work yet... 945 * in ath5k so most of these don't work yet...
946 * TODO: Implement these & merge with _TUNE_ stuff above
866 */ 947 */
867enum ath5k_capability_type { 948enum ath5k_capability_type {
868 AR5K_CAP_REG_DMN = 0, /* Used to get current reg. domain id */ 949 AR5K_CAP_REG_DMN = 0, /* Used to get current reg. domain id */
@@ -931,6 +1012,7 @@ struct ath5k_capabilities {
931#define AR5K_MAX_GPIO 10 1012#define AR5K_MAX_GPIO 10
932#define AR5K_MAX_RF_BANKS 8 1013#define AR5K_MAX_RF_BANKS 8
933 1014
1015/* TODO: Clean up and merge with ath5k_softc */
934struct ath5k_hw { 1016struct ath5k_hw {
935 u32 ah_magic; 1017 u32 ah_magic;
936 1018
@@ -939,7 +1021,7 @@ struct ath5k_hw {
939 1021
940 enum ath5k_int ah_imr; 1022 enum ath5k_int ah_imr;
941 1023
942 enum ieee80211_if_types ah_op_mode; 1024 enum nl80211_iftype ah_op_mode;
943 enum ath5k_power_mode ah_power_mode; 1025 enum ath5k_power_mode ah_power_mode;
944 struct ieee80211_channel ah_current_channel; 1026 struct ieee80211_channel ah_current_channel;
945 bool ah_turbo; 1027 bool ah_turbo;
@@ -1023,11 +1105,13 @@ struct ath5k_hw {
1023 /* 1105 /*
1024 * Function pointers 1106 * Function pointers
1025 */ 1107 */
1108 int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
1109 u32 size, unsigned int flags);
1026 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1110 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1027 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, 1111 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
1028 unsigned int, unsigned int, unsigned int, unsigned int, 1112 unsigned int, unsigned int, unsigned int, unsigned int,
1029 unsigned int, unsigned int, unsigned int); 1113 unsigned int, unsigned int, unsigned int);
1030 int (*ah_setup_xtx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1114 int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1031 unsigned int, unsigned int, unsigned int, unsigned int, 1115 unsigned int, unsigned int, unsigned int, unsigned int,
1032 unsigned int, unsigned int); 1116 unsigned int, unsigned int);
1033 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1117 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
@@ -1040,33 +1124,38 @@ struct ath5k_hw {
1040 * Prototypes 1124 * Prototypes
1041 */ 1125 */
1042 1126
1043/* General Functions */
1044extern int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, bool is_set);
1045/* Attach/Detach Functions */ 1127/* Attach/Detach Functions */
1046extern struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version); 1128extern struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version);
1047extern const struct ath5k_rate_table *ath5k_hw_get_rate_table(struct ath5k_hw *ah, unsigned int mode);
1048extern void ath5k_hw_detach(struct ath5k_hw *ah); 1129extern void ath5k_hw_detach(struct ath5k_hw *ah);
1130
1049/* Reset Functions */ 1131/* Reset Functions */
1050extern int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode, struct ieee80211_channel *channel, bool change_channel); 1132extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
1133extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel);
1051/* Power management functions */ 1134/* Power management functions */
1052extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration); 1135extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration);
1136
1053/* DMA Related Functions */ 1137/* DMA Related Functions */
1054extern void ath5k_hw_start_rx(struct ath5k_hw *ah); 1138extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
1055extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah); 1139extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
1056extern u32 ath5k_hw_get_rx_buf(struct ath5k_hw *ah); 1140extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
1057extern void ath5k_hw_put_rx_buf(struct ath5k_hw *ah, u32 phys_addr); 1141extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
1058extern int ath5k_hw_tx_start(struct ath5k_hw *ah, unsigned int queue); 1142extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
1059extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue); 1143extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
1060extern u32 ath5k_hw_get_tx_buf(struct ath5k_hw *ah, unsigned int queue); 1144extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
1061extern int ath5k_hw_put_tx_buf(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr); 1145extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
1146 u32 phys_addr);
1062extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase); 1147extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
1063/* Interrupt handling */ 1148/* Interrupt handling */
1064extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah); 1149extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
1065extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask); 1150extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
1066extern enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask); 1151extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum
1152ath5k_int new_mask);
1067extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats); 1153extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
1154
1068/* EEPROM access functions */ 1155/* EEPROM access functions */
1069extern int ath5k_hw_set_regdomain(struct ath5k_hw *ah, u16 regdomain); 1156extern int ath5k_eeprom_init(struct ath5k_hw *ah);
1157extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
1158
1070/* Protocol Control Unit Functions */ 1159/* Protocol Control Unit Functions */
1071extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1160extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
1072/* BSSID Functions */ 1161/* BSSID Functions */
@@ -1076,14 +1165,14 @@ extern void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc
1076extern int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); 1165extern int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
1077/* Receive start/stop functions */ 1166/* Receive start/stop functions */
1078extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah); 1167extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
1079extern void ath5k_hw_stop_pcu_recv(struct ath5k_hw *ah); 1168extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
1080/* RX Filter functions */ 1169/* RX Filter functions */
1081extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1); 1170extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
1082extern int ath5k_hw_set_mcast_filterindex(struct ath5k_hw *ah, u32 index); 1171extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
1083extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index); 1172extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index);
1084extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah); 1173extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
1085extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter); 1174extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
1086/* Beacon related functions */ 1175/* Beacon control functions */
1087extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah); 1176extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah);
1088extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); 1177extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
1089extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah); 1178extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
@@ -1105,61 +1194,129 @@ extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
1105extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry); 1194extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
1106extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac); 1195extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac);
1107extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac); 1196extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
1197
1108/* Queue Control Unit, DFS Control Unit Functions */ 1198/* Queue Control Unit, DFS Control Unit Functions */
1109extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, struct ath5k_txq_info *queue_info);
1110extern int ath5k_hw_setup_tx_queueprops(struct ath5k_hw *ah, int queue, const struct ath5k_txq_info *queue_info);
1111extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info); 1199extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info);
1200extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
1201 const struct ath5k_txq_info *queue_info);
1202extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
1203 enum ath5k_tx_queue queue_type,
1204 struct ath5k_txq_info *queue_info);
1205extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
1112extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1206extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1113extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1207extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1114extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
1115extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
1116extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah); 1208extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah);
1209extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
1210
1117/* Hardware Descriptor Functions */ 1211/* Hardware Descriptor Functions */
1118extern int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, u32 size, unsigned int flags); 1212extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
1213
1119/* GPIO Functions */ 1214/* GPIO Functions */
1120extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state); 1215extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
1121extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
1122extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio); 1216extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
1217extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
1123extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio); 1218extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
1124extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val); 1219extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
1125extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level); 1220extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
1221
1126/* Misc functions */ 1222/* Misc functions */
1223int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
1127extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result); 1224extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
1128 1225extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
1226extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
1129 1227
1130/* Initial register settings functions */ 1228/* Initial register settings functions */
1131extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel); 1229extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
1230
1132/* Initialize RF */ 1231/* Initialize RF */
1133extern int ath5k_hw_rfregs(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int mode); 1232extern int ath5k_hw_rfregs(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int mode);
1134extern int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq); 1233extern int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq);
1135extern enum ath5k_rfgain ath5k_hw_get_rf_gain(struct ath5k_hw *ah); 1234extern enum ath5k_rfgain ath5k_hw_get_rf_gain(struct ath5k_hw *ah);
1136extern int ath5k_hw_set_rfgain_opt(struct ath5k_hw *ah); 1235extern int ath5k_hw_set_rfgain_opt(struct ath5k_hw *ah);
1137
1138
1139/* PHY/RF channel functions */ 1236/* PHY/RF channel functions */
1140extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); 1237extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
1141extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1238extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1142/* PHY calibration */ 1239/* PHY calibration */
1143extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1240extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1144extern int ath5k_hw_phy_disable(struct ath5k_hw *ah); 1241extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
1145/* Misc PHY functions */ 1242/* Misc PHY functions */
1146extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan); 1243extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
1147extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant); 1244extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, unsigned int ant);
1148extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah); 1245extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
1149extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq); 1246extern int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1150/* TX power setup */ 1247/* TX power setup */
1151extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int txpower); 1248extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, unsigned int txpower);
1152extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power); 1249extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power);
1153 1250
1251/*
1252 * Functions used internaly
1253 */
1154 1254
1255/*
1256 * Translate usec to hw clock units
1257 */
1258static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
1259{
1260 return turbo ? (usec * 80) : (usec * 40);
1261}
1262
1263/*
1264 * Translate hw clock units to usec
1265 */
1266static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
1267{
1268 return turbo ? (clock / 80) : (clock / 40);
1269}
1270
1271/*
1272 * Read from a register
1273 */
1155static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) 1274static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
1156{ 1275{
1157 return ioread32(ah->ah_iobase + reg); 1276 return ioread32(ah->ah_iobase + reg);
1158} 1277}
1159 1278
1279/*
1280 * Write to a register
1281 */
1160static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg) 1282static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1161{ 1283{
1162 iowrite32(val, ah->ah_iobase + reg); 1284 iowrite32(val, ah->ah_iobase + reg);
1163} 1285}
1164 1286
1287#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY)
1288/*
1289 * Check if a register write has been completed
1290 */
1291static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag,
1292 u32 val, bool is_set)
1293{
1294 int i;
1295 u32 data;
1296
1297 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
1298 data = ath5k_hw_reg_read(ah, reg);
1299 if (is_set && (data & flag))
1300 break;
1301 else if ((data & flag) == val)
1302 break;
1303 udelay(15);
1304 }
1305
1306 return (i <= 0) ? -EAGAIN : 0;
1307}
1308#endif
1309
1310static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1311{
1312 u32 retval = 0, bit, i;
1313
1314 for (i = 0; i < bits; i++) {
1315 bit = (val >> i) & 1;
1316 retval = (retval << 1) | bit;
1317 }
1318
1319 return retval;
1320}
1321
1165#endif 1322#endif
diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath5k/attach.c
new file mode 100644
index 000000000000..51d569883cdd
--- /dev/null
+++ b/drivers/net/wireless/ath5k/attach.c
@@ -0,0 +1,359 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*************************************\
20* Attach/Detach Functions and helpers *
21\*************************************/
22
23#include <linux/pci.h>
24#include "ath5k.h"
25#include "reg.h"
26#include "debug.h"
27#include "base.h"
28
29/**
30 * ath5k_hw_post - Power On Self Test helper function
31 *
32 * @ah: The &struct ath5k_hw
33 */
34static int ath5k_hw_post(struct ath5k_hw *ah)
35{
36
37 int i, c;
38 u16 cur_reg;
39 u16 regs[2] = {AR5K_STA_ID0, AR5K_PHY(8)};
40 u32 var_pattern;
41 u32 static_pattern[4] = {
42 0x55555555, 0xaaaaaaaa,
43 0x66666666, 0x99999999
44 };
45 u32 init_val;
46 u32 cur_val;
47
48 for (c = 0; c < 2; c++) {
49
50 cur_reg = regs[c];
51
52 /* Save previous value */
53 init_val = ath5k_hw_reg_read(ah, cur_reg);
54
55 for (i = 0; i < 256; i++) {
56 var_pattern = i << 16 | i;
57 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
58 cur_val = ath5k_hw_reg_read(ah, cur_reg);
59
60 if (cur_val != var_pattern) {
61 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
62 return -EAGAIN;
63 }
64
65 /* Found on ndiswrapper dumps */
66 var_pattern = 0x0039080f;
67 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
68 }
69
70 for (i = 0; i < 4; i++) {
71 var_pattern = static_pattern[i];
72 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
73 cur_val = ath5k_hw_reg_read(ah, cur_reg);
74
75 if (cur_val != var_pattern) {
76 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
77 return -EAGAIN;
78 }
79
80 /* Found on ndiswrapper dumps */
81 var_pattern = 0x003b080f;
82 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
83 }
84
85 /* Restore previous value */
86 ath5k_hw_reg_write(ah, init_val, cur_reg);
87
88 }
89
90 return 0;
91
92}
93
94/**
95 * ath5k_hw_attach - Check if hw is supported and init the needed structs
96 *
97 * @sc: The &struct ath5k_softc we got from the driver's attach function
98 * @mac_version: The mac version id (check out ath5k.h) based on pci id
99 *
100 * Check if the device is supported, perform a POST and initialize the needed
101 * structs. Returns -ENOMEM if we don't have memory for the needed structs,
102 * -ENODEV if the device is not supported or prints an error msg if something
103 * else went wrong.
104 */
105struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
106{
107 struct ath5k_hw *ah;
108 struct pci_dev *pdev = sc->pdev;
109 u8 mac[ETH_ALEN];
110 int ret;
111 u32 srev;
112
113 /*If we passed the test malloc a ath5k_hw struct*/
114 ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
115 if (ah == NULL) {
116 ret = -ENOMEM;
117 ATH5K_ERR(sc, "out of memory\n");
118 goto err;
119 }
120
121 ah->ah_sc = sc;
122 ah->ah_iobase = sc->iobase;
123
124 /*
125 * HW information
126 */
127 ah->ah_op_mode = NL80211_IFTYPE_STATION;
128 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
129 ah->ah_turbo = false;
130 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
131 ah->ah_imr = 0;
132 ah->ah_atim_window = 0;
133 ah->ah_aifs = AR5K_TUNE_AIFS;
134 ah->ah_cw_min = AR5K_TUNE_CWMIN;
135 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
136 ah->ah_software_retry = false;
137 ah->ah_ant_diversity = AR5K_TUNE_ANT_DIVERSITY;
138
139 /*
140 * Set the mac version based on the pci id
141 */
142 ah->ah_version = mac_version;
143
144 /*Fill the ath5k_hw struct with the needed functions*/
145 ret = ath5k_hw_init_desc_functions(ah);
146 if (ret)
147 goto err_free;
148
149 /* Bring device out of sleep and reset it's units */
150 ret = ath5k_hw_nic_wakeup(ah, CHANNEL_B, true);
151 if (ret)
152 goto err_free;
153
154 /* Get MAC, PHY and RADIO revisions */
155 srev = ath5k_hw_reg_read(ah, AR5K_SREV);
156 ah->ah_mac_srev = srev;
157 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
158 ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
159 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
160 0xffffffff;
161 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
162 CHANNEL_5GHZ);
163 ah->ah_phy = AR5K_PHY(0);
164
165 /* Try to identify radio chip based on it's srev */
166 switch (ah->ah_radio_5ghz_revision & 0xf0) {
167 case AR5K_SREV_RAD_5111:
168 ah->ah_radio = AR5K_RF5111;
169 ah->ah_single_chip = false;
170 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
171 CHANNEL_2GHZ);
172 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
173 break;
174 case AR5K_SREV_RAD_5112:
175 case AR5K_SREV_RAD_2112:
176 ah->ah_radio = AR5K_RF5112;
177 ah->ah_single_chip = false;
178 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
179 CHANNEL_2GHZ);
180 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
181 break;
182 case AR5K_SREV_RAD_2413:
183 ah->ah_radio = AR5K_RF2413;
184 ah->ah_single_chip = true;
185 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
186 break;
187 case AR5K_SREV_RAD_5413:
188 ah->ah_radio = AR5K_RF5413;
189 ah->ah_single_chip = true;
190 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
191 break;
192 case AR5K_SREV_RAD_2316:
193 ah->ah_radio = AR5K_RF2316;
194 ah->ah_single_chip = true;
195 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2316;
196 break;
197 case AR5K_SREV_RAD_2317:
198 ah->ah_radio = AR5K_RF2317;
199 ah->ah_single_chip = true;
200 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2317;
201 break;
202 case AR5K_SREV_RAD_5424:
203 if (ah->ah_mac_version == AR5K_SREV_AR2425 ||
204 ah->ah_mac_version == AR5K_SREV_AR2417){
205 ah->ah_radio = AR5K_RF2425;
206 ah->ah_single_chip = true;
207 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
208 } else {
209 ah->ah_radio = AR5K_RF5413;
210 ah->ah_single_chip = true;
211 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
212 }
213 break;
214 default:
215 /* Identify radio based on mac/phy srev */
216 if (ah->ah_version == AR5K_AR5210) {
217 ah->ah_radio = AR5K_RF5110;
218 ah->ah_single_chip = false;
219 } else if (ah->ah_version == AR5K_AR5211) {
220 ah->ah_radio = AR5K_RF5111;
221 ah->ah_single_chip = false;
222 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
223 CHANNEL_2GHZ);
224 } else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
225 ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
226 ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
227 ah->ah_radio = AR5K_RF2425;
228 ah->ah_single_chip = true;
229 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2425;
230 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
231 } else if (srev == AR5K_SREV_AR5213A &&
232 ah->ah_phy_revision == AR5K_SREV_PHY_2112B) {
233 ah->ah_radio = AR5K_RF5112;
234 ah->ah_single_chip = false;
235 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2112B;
236 } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) {
237 ah->ah_radio = AR5K_RF2316;
238 ah->ah_single_chip = true;
239 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
240 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2316;
241 } else if (ah->ah_mac_version == (AR5K_SREV_AR5414 >> 4) ||
242 ah->ah_phy_revision == AR5K_SREV_PHY_5413) {
243 ah->ah_radio = AR5K_RF5413;
244 ah->ah_single_chip = true;
245 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5413;
246 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
247 } else if (ah->ah_mac_version == (AR5K_SREV_AR2414 >> 4) ||
248 ah->ah_phy_revision == AR5K_SREV_PHY_2413) {
249 ah->ah_radio = AR5K_RF2413;
250 ah->ah_single_chip = true;
251 ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413;
252 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
253 } else {
254 ATH5K_ERR(sc, "Couldn't identify radio revision.\n");
255 ret = -ENODEV;
256 goto err_free;
257 }
258 }
259
260
261 /* Return on unsuported chips (unsupported eeprom etc) */
262 if ((srev >= AR5K_SREV_AR5416) &&
263 (srev < AR5K_SREV_AR2425)) {
264 ATH5K_ERR(sc, "Device not yet supported.\n");
265 ret = -ENODEV;
266 goto err_free;
267 }
268
269 /*
270 * Write PCI-E power save settings
271 */
272 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
273 ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
274 ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
275 /* Shut off RX when elecidle is asserted */
276 ath5k_hw_reg_write(ah, 0x28000039, AR5K_PCIE_SERDES);
277 ath5k_hw_reg_write(ah, 0x53160824, AR5K_PCIE_SERDES);
278 /* TODO: EEPROM work */
279 ath5k_hw_reg_write(ah, 0xe5980579, AR5K_PCIE_SERDES);
280 /* Shut off PLL and CLKREQ active in L1 */
281 ath5k_hw_reg_write(ah, 0x001defff, AR5K_PCIE_SERDES);
282 /* Preserce other settings */
283 ath5k_hw_reg_write(ah, 0x1aaabe40, AR5K_PCIE_SERDES);
284 ath5k_hw_reg_write(ah, 0xbe105554, AR5K_PCIE_SERDES);
285 ath5k_hw_reg_write(ah, 0x000e3007, AR5K_PCIE_SERDES);
286 /* Reset SERDES to load new settings */
287 ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
288 mdelay(1);
289 }
290
291 /*
292 * POST
293 */
294 ret = ath5k_hw_post(ah);
295 if (ret)
296 goto err_free;
297
298 /* Enable pci core retry fix on Hainan (5213A) and later chips */
299 if (srev >= AR5K_SREV_AR5213A)
300 ath5k_hw_reg_write(ah, AR5K_PCICFG_RETRY_FIX, AR5K_PCICFG);
301
302 /*
303 * Get card capabilities, calibration values etc
304 * TODO: EEPROM work
305 */
306 ret = ath5k_eeprom_init(ah);
307 if (ret) {
308 ATH5K_ERR(sc, "unable to init EEPROM\n");
309 goto err_free;
310 }
311
312 /* Get misc capabilities */
313 ret = ath5k_hw_set_capabilities(ah);
314 if (ret) {
315 ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
316 sc->pdev->device);
317 goto err_free;
318 }
319
320 /* Set MAC address */
321 ret = ath5k_eeprom_read_mac(ah, mac);
322 if (ret) {
323 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
324 sc->pdev->device);
325 goto err_free;
326 }
327
328 ath5k_hw_set_lladdr(ah, mac);
329 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
330 memset(ah->ah_bssid, 0xff, ETH_ALEN);
331 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
332 ath5k_hw_set_opmode(ah);
333
334 ath5k_hw_set_rfgain_opt(ah);
335
336 return ah;
337err_free:
338 kfree(ah);
339err:
340 return ERR_PTR(ret);
341}
342
343/**
344 * ath5k_hw_detach - Free the ath5k_hw struct
345 *
346 * @ah: The &struct ath5k_hw
347 */
348void ath5k_hw_detach(struct ath5k_hw *ah)
349{
350 ATH5K_TRACE(ah->ah_sc);
351
352 __set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
353
354 if (ah->ah_rf_banks != NULL)
355 kfree(ah->ah_rf_banks);
356
357 /* assume interrupts are down */
358 kfree(ah);
359}
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 0676c6d84383..c151588aa484 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -72,7 +72,7 @@ MODULE_AUTHOR("Nick Kossifidis");
72MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); 72MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
73MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); 73MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
74MODULE_LICENSE("Dual BSD/GPL"); 74MODULE_LICENSE("Dual BSD/GPL");
75MODULE_VERSION("0.5.0 (EXPERIMENTAL)"); 75MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
76 76
77 77
78/* Known PCI ids */ 78/* Known PCI ids */
@@ -93,45 +93,94 @@ static struct pci_device_id ath5k_pci_id_table[] __devinitdata = {
93 { PCI_VDEVICE(ATHEROS, 0x0019), .driver_data = AR5K_AR5212 }, /* 5212 combatible */ 93 { PCI_VDEVICE(ATHEROS, 0x0019), .driver_data = AR5K_AR5212 }, /* 5212 combatible */
94 { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */ 94 { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */
95 { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */ 95 { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */
96 { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/ 96 { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* PCI-E cards */
97 { PCI_VDEVICE(ATHEROS, 0x001d), .driver_data = AR5K_AR5212 }, /* 2417 Nala */
97 { 0 } 98 { 0 }
98}; 99};
99MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); 100MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
100 101
101/* Known SREVs */ 102/* Known SREVs */
102static struct ath5k_srev_name srev_names[] = { 103static struct ath5k_srev_name srev_names[] = {
103 { "5210", AR5K_VERSION_VER, AR5K_SREV_VER_AR5210 }, 104 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
104 { "5311", AR5K_VERSION_VER, AR5K_SREV_VER_AR5311 }, 105 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
105 { "5311A", AR5K_VERSION_VER, AR5K_SREV_VER_AR5311A }, 106 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
106 { "5311B", AR5K_VERSION_VER, AR5K_SREV_VER_AR5311B }, 107 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
107 { "5211", AR5K_VERSION_VER, AR5K_SREV_VER_AR5211 }, 108 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
108 { "5212", AR5K_VERSION_VER, AR5K_SREV_VER_AR5212 }, 109 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
109 { "5213", AR5K_VERSION_VER, AR5K_SREV_VER_AR5213 }, 110 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
110 { "5213A", AR5K_VERSION_VER, AR5K_SREV_VER_AR5213A }, 111 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
111 { "2413", AR5K_VERSION_VER, AR5K_SREV_VER_AR2413 }, 112 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
112 { "2414", AR5K_VERSION_VER, AR5K_SREV_VER_AR2414 }, 113 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
113 { "2424", AR5K_VERSION_VER, AR5K_SREV_VER_AR2424 }, 114 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
114 { "5424", AR5K_VERSION_VER, AR5K_SREV_VER_AR5424 }, 115 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
115 { "5413", AR5K_VERSION_VER, AR5K_SREV_VER_AR5413 }, 116 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
116 { "5414", AR5K_VERSION_VER, AR5K_SREV_VER_AR5414 }, 117 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
117 { "5416", AR5K_VERSION_VER, AR5K_SREV_VER_AR5416 }, 118 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
118 { "5418", AR5K_VERSION_VER, AR5K_SREV_VER_AR5418 }, 119 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
119 { "2425", AR5K_VERSION_VER, AR5K_SREV_VER_AR2425 }, 120 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
120 { "xxxxx", AR5K_VERSION_VER, AR5K_SREV_UNKNOWN }, 121 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
122 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
121 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, 123 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
122 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, 124 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
125 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
123 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 }, 126 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
124 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 }, 127 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
125 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, 128 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
129 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
126 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, 130 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
127 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, 131 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
128 { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC0 }, 132 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
129 { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC1 }, 133 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
130 { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC2 }, 134 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
135 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
136 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
137 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
131 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, 138 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
132 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, 139 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
133}; 140};
134 141
142static struct ieee80211_rate ath5k_rates[] = {
143 { .bitrate = 10,
144 .hw_value = ATH5K_RATE_CODE_1M, },
145 { .bitrate = 20,
146 .hw_value = ATH5K_RATE_CODE_2M,
147 .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
148 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
149 { .bitrate = 55,
150 .hw_value = ATH5K_RATE_CODE_5_5M,
151 .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
152 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
153 { .bitrate = 110,
154 .hw_value = ATH5K_RATE_CODE_11M,
155 .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
156 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
157 { .bitrate = 60,
158 .hw_value = ATH5K_RATE_CODE_6M,
159 .flags = 0 },
160 { .bitrate = 90,
161 .hw_value = ATH5K_RATE_CODE_9M,
162 .flags = 0 },
163 { .bitrate = 120,
164 .hw_value = ATH5K_RATE_CODE_12M,
165 .flags = 0 },
166 { .bitrate = 180,
167 .hw_value = ATH5K_RATE_CODE_18M,
168 .flags = 0 },
169 { .bitrate = 240,
170 .hw_value = ATH5K_RATE_CODE_24M,
171 .flags = 0 },
172 { .bitrate = 360,
173 .hw_value = ATH5K_RATE_CODE_36M,
174 .flags = 0 },
175 { .bitrate = 480,
176 .hw_value = ATH5K_RATE_CODE_48M,
177 .flags = 0 },
178 { .bitrate = 540,
179 .hw_value = ATH5K_RATE_CODE_54M,
180 .flags = 0 },
181 /* XR missing */
182};
183
135/* 184/*
136 * Prototypes - PCI stack related functions 185 * Prototypes - PCI stack related functions
137 */ 186 */
@@ -162,7 +211,8 @@ static struct pci_driver ath5k_pci_driver = {
162 * Prototypes - MAC 802.11 stack related functions 211 * Prototypes - MAC 802.11 stack related functions
163 */ 212 */
164static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 213static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
165static int ath5k_reset(struct ieee80211_hw *hw); 214static int ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel);
215static int ath5k_reset_wake(struct ath5k_softc *sc);
166static int ath5k_start(struct ieee80211_hw *hw); 216static int ath5k_start(struct ieee80211_hw *hw);
167static void ath5k_stop(struct ieee80211_hw *hw); 217static void ath5k_stop(struct ieee80211_hw *hw);
168static int ath5k_add_interface(struct ieee80211_hw *hw, 218static int ath5k_add_interface(struct ieee80211_hw *hw,
@@ -218,20 +268,16 @@ static void ath5k_detach(struct pci_dev *pdev,
218 struct ieee80211_hw *hw); 268 struct ieee80211_hw *hw);
219/* Channel/mode setup */ 269/* Channel/mode setup */
220static inline short ath5k_ieee2mhz(short chan); 270static inline short ath5k_ieee2mhz(short chan);
221static unsigned int ath5k_copy_rates(struct ieee80211_rate *rates,
222 const struct ath5k_rate_table *rt,
223 unsigned int max);
224static unsigned int ath5k_copy_channels(struct ath5k_hw *ah, 271static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
225 struct ieee80211_channel *channels, 272 struct ieee80211_channel *channels,
226 unsigned int mode, 273 unsigned int mode,
227 unsigned int max); 274 unsigned int max);
228static int ath5k_getchannels(struct ieee80211_hw *hw); 275static int ath5k_setup_bands(struct ieee80211_hw *hw);
229static int ath5k_chan_set(struct ath5k_softc *sc, 276static int ath5k_chan_set(struct ath5k_softc *sc,
230 struct ieee80211_channel *chan); 277 struct ieee80211_channel *chan);
231static void ath5k_setcurmode(struct ath5k_softc *sc, 278static void ath5k_setcurmode(struct ath5k_softc *sc,
232 unsigned int mode); 279 unsigned int mode);
233static void ath5k_mode_setup(struct ath5k_softc *sc); 280static void ath5k_mode_setup(struct ath5k_softc *sc);
234static void ath5k_set_total_hw_rates(struct ath5k_softc *sc);
235 281
236/* Descriptor setup */ 282/* Descriptor setup */
237static int ath5k_desc_alloc(struct ath5k_softc *sc, 283static int ath5k_desc_alloc(struct ath5k_softc *sc,
@@ -351,7 +397,11 @@ ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
351 for (i = 0; i < ARRAY_SIZE(srev_names); i++) { 397 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
352 if (srev_names[i].sr_type != type) 398 if (srev_names[i].sr_type != type)
353 continue; 399 continue;
354 if ((val & 0xff) < srev_names[i + 1].sr_val) { 400
401 if ((val & 0xf0) == srev_names[i].sr_val)
402 name = srev_names[i].sr_name;
403
404 if ((val & 0xff) == srev_names[i].sr_val) {
355 name = srev_names[i].sr_name; 405 name = srev_names[i].sr_name;
356 break; 406 break;
357 } 407 }
@@ -446,6 +496,12 @@ ath5k_pci_probe(struct pci_dev *pdev,
446 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 496 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
447 IEEE80211_HW_SIGNAL_DBM | 497 IEEE80211_HW_SIGNAL_DBM |
448 IEEE80211_HW_NOISE_DBM; 498 IEEE80211_HW_NOISE_DBM;
499
500 hw->wiphy->interface_modes =
501 BIT(NL80211_IFTYPE_STATION) |
502 BIT(NL80211_IFTYPE_ADHOC) |
503 BIT(NL80211_IFTYPE_MESH_POINT);
504
449 hw->extra_tx_headroom = 2; 505 hw->extra_tx_headroom = 2;
450 hw->channel_change_time = 5000; 506 hw->channel_change_time = 5000;
451 sc = hw->priv; 507 sc = hw->priv;
@@ -462,7 +518,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
462 518
463 sc->iobase = mem; /* So we can unmap it on detach */ 519 sc->iobase = mem; /* So we can unmap it on detach */
464 sc->cachelsz = csz * sizeof(u32); /* convert to bytes */ 520 sc->cachelsz = csz * sizeof(u32); /* convert to bytes */
465 sc->opmode = IEEE80211_IF_TYPE_STA; 521 sc->opmode = NL80211_IFTYPE_STATION;
466 mutex_init(&sc->lock); 522 mutex_init(&sc->lock);
467 spin_lock_init(&sc->rxbuflock); 523 spin_lock_init(&sc->rxbuflock);
468 spin_lock_init(&sc->txbuflock); 524 spin_lock_init(&sc->txbuflock);
@@ -491,7 +547,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
491 goto err_ah; 547 goto err_ah;
492 548
493 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n", 549 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
494 ath5k_chip_name(AR5K_VERSION_VER,sc->ah->ah_mac_srev), 550 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
495 sc->ah->ah_mac_srev, 551 sc->ah->ah_mac_srev,
496 sc->ah->ah_phy_revision); 552 sc->ah->ah_phy_revision);
497 553
@@ -646,7 +702,6 @@ err_no_irq:
646#endif /* CONFIG_PM */ 702#endif /* CONFIG_PM */
647 703
648 704
649
650/***********************\ 705/***********************\
651* Driver Initialization * 706* Driver Initialization *
652\***********************/ 707\***********************/
@@ -669,7 +724,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
669 * return false w/o doing anything. MAC's that do 724 * return false w/o doing anything. MAC's that do
670 * support it will return true w/o doing anything. 725 * support it will return true w/o doing anything.
671 */ 726 */
672 ret = ah->ah_setup_xtx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); 727 ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
673 if (ret < 0) 728 if (ret < 0)
674 goto err; 729 goto err;
675 if (ret > 0) 730 if (ret > 0)
@@ -688,15 +743,12 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
688 * on settings like the phy mode and regulatory 743 * on settings like the phy mode and regulatory
689 * domain restrictions. 744 * domain restrictions.
690 */ 745 */
691 ret = ath5k_getchannels(hw); 746 ret = ath5k_setup_bands(hw);
692 if (ret) { 747 if (ret) {
693 ATH5K_ERR(sc, "can't get channels\n"); 748 ATH5K_ERR(sc, "can't get channels\n");
694 goto err; 749 goto err;
695 } 750 }
696 751
697 /* Set *_rates so we can map hw rate index */
698 ath5k_set_total_hw_rates(sc);
699
700 /* NB: setup here so ath5k_rate_update is happy */ 752 /* NB: setup here so ath5k_rate_update is happy */
701 if (test_bit(AR5K_MODE_11A, ah->ah_modes)) 753 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
702 ath5k_setcurmode(sc, AR5K_MODE_11A); 754 ath5k_setcurmode(sc, AR5K_MODE_11A);
@@ -813,27 +865,6 @@ ath5k_ieee2mhz(short chan)
813} 865}
814 866
815static unsigned int 867static unsigned int
816ath5k_copy_rates(struct ieee80211_rate *rates,
817 const struct ath5k_rate_table *rt,
818 unsigned int max)
819{
820 unsigned int i, count;
821
822 if (rt == NULL)
823 return 0;
824
825 for (i = 0, count = 0; i < rt->rate_count && max > 0; i++) {
826 rates[count].bitrate = rt->rates[i].rate_kbps / 100;
827 rates[count].hw_value = rt->rates[i].rate_code;
828 rates[count].flags = rt->rates[i].modulation;
829 count++;
830 max--;
831 }
832
833 return count;
834}
835
836static unsigned int
837ath5k_copy_channels(struct ath5k_hw *ah, 868ath5k_copy_channels(struct ath5k_hw *ah,
838 struct ieee80211_channel *channels, 869 struct ieee80211_channel *channels,
839 unsigned int mode, 870 unsigned int mode,
@@ -895,74 +926,97 @@ ath5k_copy_channels(struct ath5k_hw *ah,
895 return count; 926 return count;
896} 927}
897 928
929static void
930ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
931{
932 u8 i;
933
934 for (i = 0; i < AR5K_MAX_RATES; i++)
935 sc->rate_idx[b->band][i] = -1;
936
937 for (i = 0; i < b->n_bitrates; i++) {
938 sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
939 if (b->bitrates[i].hw_value_short)
940 sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
941 }
942}
943
898static int 944static int
899ath5k_getchannels(struct ieee80211_hw *hw) 945ath5k_setup_bands(struct ieee80211_hw *hw)
900{ 946{
901 struct ath5k_softc *sc = hw->priv; 947 struct ath5k_softc *sc = hw->priv;
902 struct ath5k_hw *ah = sc->ah; 948 struct ath5k_hw *ah = sc->ah;
903 struct ieee80211_supported_band *sbands = sc->sbands; 949 struct ieee80211_supported_band *sband;
904 const struct ath5k_rate_table *hw_rates; 950 int max_c, count_c = 0;
905 unsigned int max_r, max_c, count_r, count_c; 951 int i;
906 int mode2g = AR5K_MODE_11G;
907 952
908 BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS); 953 BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
909
910 max_r = ARRAY_SIZE(sc->rates);
911 max_c = ARRAY_SIZE(sc->channels); 954 max_c = ARRAY_SIZE(sc->channels);
912 count_r = count_c = 0;
913 955
914 /* 2GHz band */ 956 /* 2GHz band */
915 if (!test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) { 957 sband = &sc->sbands[IEEE80211_BAND_2GHZ];
916 mode2g = AR5K_MODE_11B; 958 sband->band = IEEE80211_BAND_2GHZ;
917 if (!test_bit(AR5K_MODE_11B, 959 sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
918 sc->ah->ah_capabilities.cap_mode))
919 mode2g = -1;
920 }
921 960
922 if (mode2g > 0) { 961 if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
923 struct ieee80211_supported_band *sband = 962 /* G mode */
924 &sbands[IEEE80211_BAND_2GHZ]; 963 memcpy(sband->bitrates, &ath5k_rates[0],
964 sizeof(struct ieee80211_rate) * 12);
965 sband->n_bitrates = 12;
925 966
926 sband->bitrates = sc->rates;
927 sband->channels = sc->channels; 967 sband->channels = sc->channels;
928
929 sband->band = IEEE80211_BAND_2GHZ;
930 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 968 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
931 mode2g, max_c); 969 AR5K_MODE_11G, max_c);
932
933 hw_rates = ath5k_hw_get_rate_table(ah, mode2g);
934 sband->n_bitrates = ath5k_copy_rates(sband->bitrates,
935 hw_rates, max_r);
936 970
971 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
937 count_c = sband->n_channels; 972 count_c = sband->n_channels;
938 count_r = sband->n_bitrates; 973 max_c -= count_c;
974 } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
975 /* B mode */
976 memcpy(sband->bitrates, &ath5k_rates[0],
977 sizeof(struct ieee80211_rate) * 4);
978 sband->n_bitrates = 4;
979
980 /* 5211 only supports B rates and uses 4bit rate codes
981 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
982 * fix them up here:
983 */
984 if (ah->ah_version == AR5K_AR5211) {
985 for (i = 0; i < 4; i++) {
986 sband->bitrates[i].hw_value =
987 sband->bitrates[i].hw_value & 0xF;
988 sband->bitrates[i].hw_value_short =
989 sband->bitrates[i].hw_value_short & 0xF;
990 }
991 }
939 992
940 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 993 sband->channels = sc->channels;
994 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
995 AR5K_MODE_11B, max_c);
941 996
942 max_r -= count_r; 997 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
998 count_c = sband->n_channels;
943 max_c -= count_c; 999 max_c -= count_c;
944
945 } 1000 }
1001 ath5k_setup_rate_idx(sc, sband);
946 1002
947 /* 5GHz band */ 1003 /* 5GHz band, A mode */
948
949 if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) { 1004 if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
950 struct ieee80211_supported_band *sband = 1005 sband = &sc->sbands[IEEE80211_BAND_5GHZ];
951 &sbands[IEEE80211_BAND_5GHZ]; 1006 sband->band = IEEE80211_BAND_5GHZ;
1007 sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
952 1008
953 sband->bitrates = &sc->rates[count_r]; 1009 memcpy(sband->bitrates, &ath5k_rates[4],
954 sband->channels = &sc->channels[count_c]; 1010 sizeof(struct ieee80211_rate) * 8);
1011 sband->n_bitrates = 8;
955 1012
956 sband->band = IEEE80211_BAND_5GHZ; 1013 sband->channels = &sc->channels[count_c];
957 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 1014 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
958 AR5K_MODE_11A, max_c); 1015 AR5K_MODE_11A, max_c);
959 1016
960 hw_rates = ath5k_hw_get_rate_table(ah, AR5K_MODE_11A);
961 sband->n_bitrates = ath5k_copy_rates(sband->bitrates,
962 hw_rates, max_r);
963
964 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 1017 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
965 } 1018 }
1019 ath5k_setup_rate_idx(sc, sband);
966 1020
967 ath5k_debug_dump_bands(sc); 1021 ath5k_debug_dump_bands(sc);
968 1022
@@ -978,9 +1032,6 @@ ath5k_getchannels(struct ieee80211_hw *hw)
978static int 1032static int
979ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) 1033ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
980{ 1034{
981 struct ath5k_hw *ah = sc->ah;
982 int ret;
983
984 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n", 1035 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n",
985 sc->curchan->center_freq, chan->center_freq); 1036 sc->curchan->center_freq, chan->center_freq);
986 1037
@@ -996,41 +1047,7 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
996 * hardware at the new frequency, and then re-enable 1047 * hardware at the new frequency, and then re-enable
997 * the relevant bits of the h/w. 1048 * the relevant bits of the h/w.
998 */ 1049 */
999 ath5k_hw_set_intr(ah, 0); /* disable interrupts */ 1050 return ath5k_reset(sc, true, true);
1000 ath5k_txq_cleanup(sc); /* clear pending tx frames */
1001 ath5k_rx_stop(sc); /* turn off frame recv */
1002 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true);
1003 if (ret) {
1004 ATH5K_ERR(sc, "%s: unable to reset channel "
1005 "(%u Mhz)\n", __func__, chan->center_freq);
1006 return ret;
1007 }
1008
1009 ath5k_hw_set_txpower_limit(sc->ah, 0);
1010
1011 /*
1012 * Re-enable rx framework.
1013 */
1014 ret = ath5k_rx_start(sc);
1015 if (ret) {
1016 ATH5K_ERR(sc, "%s: unable to restart recv logic\n",
1017 __func__);
1018 return ret;
1019 }
1020
1021 /*
1022 * Change channels and update the h/w rate map
1023 * if we're switching; e.g. 11a to 11b/g.
1024 *
1025 * XXX needed?
1026 */
1027/* ath5k_chan_change(sc, chan); */
1028
1029 ath5k_beacon_config(sc);
1030 /*
1031 * Re-enable interrupts.
1032 */
1033 ath5k_hw_set_intr(ah, sc->imask);
1034 } 1051 }
1035 1052
1036 return 0; 1053 return 0;
@@ -1068,75 +1085,13 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1068 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 1085 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
1069} 1086}
1070 1087
1071/*
1072 * Match the hw provided rate index (through descriptors)
1073 * to an index for sc->curband->bitrates, so it can be used
1074 * by the stack.
1075 *
1076 * This one is a little bit tricky but i think i'm right
1077 * about this...
1078 *
1079 * We have 4 rate tables in the following order:
1080 * XR (4 rates)
1081 * 802.11a (8 rates)
1082 * 802.11b (4 rates)
1083 * 802.11g (12 rates)
1084 * that make the hw rate table.
1085 *
1086 * Lets take a 5211 for example that supports a and b modes only.
1087 * First comes the 802.11a table and then 802.11b (total 12 rates).
1088 * When hw returns eg. 11 it points to the last 802.11b rate (11Mbit),
1089 * if it returns 2 it points to the second 802.11a rate etc.
1090 *
1091 * Same goes for 5212 who has xr/a/b/g support (total 28 rates).
1092 * First comes the XR table, then 802.11a, 802.11b and 802.11g.
1093 * When hw returns eg. 27 it points to the last 802.11g rate (54Mbits) etc
1094 */
1095static void
1096ath5k_set_total_hw_rates(struct ath5k_softc *sc) {
1097
1098 struct ath5k_hw *ah = sc->ah;
1099
1100 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
1101 sc->a_rates = 8;
1102
1103 if (test_bit(AR5K_MODE_11B, ah->ah_modes))
1104 sc->b_rates = 4;
1105
1106 if (test_bit(AR5K_MODE_11G, ah->ah_modes))
1107 sc->g_rates = 12;
1108
1109 /* XXX: Need to see what what happens when
1110 xr disable bits in eeprom are set */
1111 if (ah->ah_version >= AR5K_AR5212)
1112 sc->xr_rates = 4;
1113
1114}
1115
1116static inline int 1088static inline int
1117ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) { 1089ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
1118 1090{
1119 int mac80211_rix; 1091 WARN_ON(hw_rix < 0 || hw_rix > AR5K_MAX_RATES);
1120 1092 return sc->rate_idx[sc->curband->band][hw_rix];
1121 if(sc->curband->band == IEEE80211_BAND_2GHZ) {
1122 /* We setup a g ratetable for both b/g modes */
1123 mac80211_rix =
1124 hw_rix - sc->b_rates - sc->a_rates - sc->xr_rates;
1125 } else {
1126 mac80211_rix = hw_rix - sc->xr_rates;
1127 }
1128
1129 /* Something went wrong, fallback to basic rate for this band */
1130 if ((mac80211_rix >= sc->curband->n_bitrates) ||
1131 (mac80211_rix <= 0 ))
1132 mac80211_rix = 1;
1133
1134 return mac80211_rix;
1135} 1093}
1136 1094
1137
1138
1139
1140/***************\ 1095/***************\
1141* Buffers setup * 1096* Buffers setup *
1142\***************/ 1097\***************/
@@ -1199,7 +1154,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1199 ds = bf->desc; 1154 ds = bf->desc;
1200 ds->ds_link = bf->daddr; /* link to self */ 1155 ds->ds_link = bf->daddr; /* link to self */
1201 ds->ds_data = bf->skbaddr; 1156 ds->ds_data = bf->skbaddr;
1202 ath5k_hw_setup_rx_desc(ah, ds, 1157 ah->ah_setup_rx_desc(ah, ds,
1203 skb_tailroom(skb), /* buffer size */ 1158 skb_tailroom(skb), /* buffer size */
1204 0); 1159 0);
1205 1160
@@ -1250,12 +1205,12 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1250 list_add_tail(&bf->list, &txq->q); 1205 list_add_tail(&bf->list, &txq->q);
1251 sc->tx_stats[txq->qnum].len++; 1206 sc->tx_stats[txq->qnum].len++;
1252 if (txq->link == NULL) /* is this first packet? */ 1207 if (txq->link == NULL) /* is this first packet? */
1253 ath5k_hw_put_tx_buf(ah, txq->qnum, bf->daddr); 1208 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
1254 else /* no, so only link it */ 1209 else /* no, so only link it */
1255 *txq->link = bf->daddr; 1210 *txq->link = bf->daddr;
1256 1211
1257 txq->link = &ds->ds_link; 1212 txq->link = &ds->ds_link;
1258 ath5k_hw_tx_start(ah, txq->qnum); 1213 ath5k_hw_start_tx_dma(ah, txq->qnum);
1259 mmiowb(); 1214 mmiowb();
1260 spin_unlock_bh(&txq->lock); 1215 spin_unlock_bh(&txq->lock);
1261 1216
@@ -1433,7 +1388,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1433 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); 1388 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
1434 if (ret) 1389 if (ret)
1435 return ret; 1390 return ret;
1436 if (sc->opmode == IEEE80211_IF_TYPE_AP) { 1391 if (sc->opmode == NL80211_IFTYPE_AP ||
1392 sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1437 /* 1393 /*
1438 * Always burst out beacon and CAB traffic 1394 * Always burst out beacon and CAB traffic
1439 * (aifs = cwmin = cwmax = 0) 1395 * (aifs = cwmin = cwmax = 0)
@@ -1441,7 +1397,7 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1441 qi.tqi_aifs = 0; 1397 qi.tqi_aifs = 0;
1442 qi.tqi_cw_min = 0; 1398 qi.tqi_cw_min = 0;
1443 qi.tqi_cw_max = 0; 1399 qi.tqi_cw_max = 0;
1444 } else if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { 1400 } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
1445 /* 1401 /*
1446 * Adhoc mode; backoff between 0 and (2 * cw_min). 1402 * Adhoc mode; backoff between 0 and (2 * cw_min).
1447 */ 1403 */
@@ -1454,7 +1410,7 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
1454 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n", 1410 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
1455 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max); 1411 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1456 1412
1457 ret = ath5k_hw_setup_tx_queueprops(ah, sc->bhalq, &qi); 1413 ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
1458 if (ret) { 1414 if (ret) {
1459 ATH5K_ERR(sc, "%s: unable to update parameters for beacon " 1415 ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1460 "hardware queue!\n", __func__); 1416 "hardware queue!\n", __func__);
@@ -1503,14 +1459,14 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
1503 /* don't touch the hardware if marked invalid */ 1459 /* don't touch the hardware if marked invalid */
1504 ath5k_hw_stop_tx_dma(ah, sc->bhalq); 1460 ath5k_hw_stop_tx_dma(ah, sc->bhalq);
1505 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n", 1461 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n",
1506 ath5k_hw_get_tx_buf(ah, sc->bhalq)); 1462 ath5k_hw_get_txdp(ah, sc->bhalq));
1507 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) 1463 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1508 if (sc->txqs[i].setup) { 1464 if (sc->txqs[i].setup) {
1509 ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum); 1465 ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
1510 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, " 1466 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
1511 "link %p\n", 1467 "link %p\n",
1512 sc->txqs[i].qnum, 1468 sc->txqs[i].qnum,
1513 ath5k_hw_get_tx_buf(ah, 1469 ath5k_hw_get_txdp(ah,
1514 sc->txqs[i].qnum), 1470 sc->txqs[i].qnum),
1515 sc->txqs[i].link); 1471 sc->txqs[i].link);
1516 } 1472 }
@@ -1570,8 +1526,8 @@ ath5k_rx_start(struct ath5k_softc *sc)
1570 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); 1526 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1571 spin_unlock_bh(&sc->rxbuflock); 1527 spin_unlock_bh(&sc->rxbuflock);
1572 1528
1573 ath5k_hw_put_rx_buf(ah, bf->daddr); 1529 ath5k_hw_set_rxdp(ah, bf->daddr);
1574 ath5k_hw_start_rx(ah); /* enable recv descriptors */ 1530 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1575 ath5k_mode_setup(sc); /* set filters, etc. */ 1531 ath5k_mode_setup(sc); /* set filters, etc. */
1576 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 1532 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1577 1533
@@ -1588,7 +1544,7 @@ ath5k_rx_stop(struct ath5k_softc *sc)
1588{ 1544{
1589 struct ath5k_hw *ah = sc->ah; 1545 struct ath5k_hw *ah = sc->ah;
1590 1546
1591 ath5k_hw_stop_pcu_recv(ah); /* disable PCU */ 1547 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1592 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ 1548 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1593 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ 1549 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
1594 1550
@@ -1602,7 +1558,7 @@ ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
1602 struct sk_buff *skb, struct ath5k_rx_status *rs) 1558 struct sk_buff *skb, struct ath5k_rx_status *rs)
1603{ 1559{
1604 struct ieee80211_hdr *hdr = (void *)skb->data; 1560 struct ieee80211_hdr *hdr = (void *)skb->data;
1605 unsigned int keyix, hlen = ieee80211_get_hdrlen_from_skb(skb); 1561 unsigned int keyix, hlen;
1606 1562
1607 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) && 1563 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1608 rs->rs_keyix != AR5K_RXKEYIX_INVALID) 1564 rs->rs_keyix != AR5K_RXKEYIX_INVALID)
@@ -1611,6 +1567,7 @@ ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
1611 /* Apparently when a default key is used to decrypt the packet 1567 /* Apparently when a default key is used to decrypt the packet
1612 the hw does not set the index used to decrypt. In such cases 1568 the hw does not set the index used to decrypt. In such cases
1613 get the index from the packet. */ 1569 get the index from the packet. */
1570 hlen = ieee80211_hdrlen(hdr->frame_control);
1614 if (ieee80211_has_protected(hdr->frame_control) && 1571 if (ieee80211_has_protected(hdr->frame_control) &&
1615 !(rs->rs_status & AR5K_RXERR_DECRYPT) && 1572 !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1616 skb->len >= hlen + 4) { 1573 skb->len >= hlen + 4) {
@@ -1768,7 +1725,7 @@ ath5k_tasklet_rx(unsigned long data)
1768 /* let crypto-error packets fall through in MNTR */ 1725 /* let crypto-error packets fall through in MNTR */
1769 if ((rs.rs_status & 1726 if ((rs.rs_status &
1770 ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || 1727 ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
1771 sc->opmode != IEEE80211_IF_TYPE_MNTR) 1728 sc->opmode != NL80211_IFTYPE_MONITOR)
1772 goto next; 1729 goto next;
1773 } 1730 }
1774accept: 1731accept:
@@ -1824,10 +1781,14 @@ accept:
1824 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); 1781 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
1825 rxs.flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); 1782 rxs.flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
1826 1783
1784 if (rxs.rate_idx >= 0 && rs.rs_rate ==
1785 sc->curband->bitrates[rxs.rate_idx].hw_value_short)
1786 rxs.flag |= RX_FLAG_SHORTPRE;
1787
1827 ath5k_debug_dump_skb(sc, skb, "RX ", 0); 1788 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
1828 1789
1829 /* check beacons in IBSS mode */ 1790 /* check beacons in IBSS mode */
1830 if (sc->opmode == IEEE80211_IF_TYPE_IBSS) 1791 if (sc->opmode == NL80211_IFTYPE_ADHOC)
1831 ath5k_check_ibss_tsf(sc, skb, &rxs); 1792 ath5k_check_ibss_tsf(sc, skb, &rxs);
1832 1793
1833 __ieee80211_rx(sc->hw, skb, &rxs); 1794 __ieee80211_rx(sc->hw, skb, &rxs);
@@ -1942,7 +1903,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1942 ds = bf->desc; 1903 ds = bf->desc;
1943 1904
1944 flags = AR5K_TXDESC_NOACK; 1905 flags = AR5K_TXDESC_NOACK;
1945 if (sc->opmode == IEEE80211_IF_TYPE_IBSS && ath5k_hw_hasveol(ah)) { 1906 if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
1946 ds->ds_link = bf->daddr; /* self-linked */ 1907 ds->ds_link = bf->daddr; /* self-linked */
1947 flags |= AR5K_TXDESC_VEOL; 1908 flags |= AR5K_TXDESC_VEOL;
1948 /* 1909 /*
@@ -1991,8 +1952,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
1991 1952
1992 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 1953 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
1993 1954
1994 if (unlikely(bf->skb == NULL || sc->opmode == IEEE80211_IF_TYPE_STA || 1955 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
1995 sc->opmode == IEEE80211_IF_TYPE_MNTR)) { 1956 sc->opmode == NL80211_IFTYPE_MONITOR)) {
1996 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); 1957 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
1997 return; 1958 return;
1998 } 1959 }
@@ -2032,8 +1993,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2032 /* NB: hw still stops DMA, so proceed */ 1993 /* NB: hw still stops DMA, so proceed */
2033 } 1994 }
2034 1995
2035 ath5k_hw_put_tx_buf(ah, sc->bhalq, bf->daddr); 1996 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
2036 ath5k_hw_tx_start(ah, sc->bhalq); 1997 ath5k_hw_start_tx_dma(ah, sc->bhalq);
2037 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", 1998 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
2038 sc->bhalq, (unsigned long long)bf->daddr, bf->desc); 1999 sc->bhalq, (unsigned long long)bf->daddr, bf->desc);
2039 2000
@@ -2162,13 +2123,13 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2162{ 2123{
2163 struct ath5k_hw *ah = sc->ah; 2124 struct ath5k_hw *ah = sc->ah;
2164 2125
2165 ath5k_hw_set_intr(ah, 0); 2126 ath5k_hw_set_imr(ah, 0);
2166 sc->bmisscount = 0; 2127 sc->bmisscount = 0;
2167 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); 2128 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2168 2129
2169 if (sc->opmode == IEEE80211_IF_TYPE_STA) { 2130 if (sc->opmode == NL80211_IFTYPE_STATION) {
2170 sc->imask |= AR5K_INT_BMISS; 2131 sc->imask |= AR5K_INT_BMISS;
2171 } else if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { 2132 } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2172 /* 2133 /*
2173 * In IBSS mode we use a self-linked tx descriptor and let the 2134 * In IBSS mode we use a self-linked tx descriptor and let the
2174 * hardware send the beacons automatically. We have to load it 2135 * hardware send the beacons automatically. We have to load it
@@ -2188,7 +2149,7 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2188 } 2149 }
2189 /* TODO else AP */ 2150 /* TODO else AP */
2190 2151
2191 ath5k_hw_set_intr(ah, sc->imask); 2152 ath5k_hw_set_imr(ah, sc->imask);
2192} 2153}
2193 2154
2194 2155
@@ -2220,36 +2181,13 @@ ath5k_init(struct ath5k_softc *sc)
2220 */ 2181 */
2221 sc->curchan = sc->hw->conf.channel; 2182 sc->curchan = sc->hw->conf.channel;
2222 sc->curband = &sc->sbands[sc->curchan->band]; 2183 sc->curband = &sc->sbands[sc->curchan->band];
2223 ret = ath5k_hw_reset(sc->ah, sc->opmode, sc->curchan, false);
2224 if (ret) {
2225 ATH5K_ERR(sc, "unable to reset hardware: %d\n", ret);
2226 goto done;
2227 }
2228 /*
2229 * This is needed only to setup initial state
2230 * but it's best done after a reset.
2231 */
2232 ath5k_hw_set_txpower_limit(sc->ah, 0);
2233
2234 /*
2235 * Setup the hardware after reset: the key cache
2236 * is filled as needed and the receive engine is
2237 * set going. Frame transmit is handled entirely
2238 * in the frame output path; there's nothing to do
2239 * here except setup the interrupt mask.
2240 */
2241 ret = ath5k_rx_start(sc);
2242 if (ret)
2243 goto done;
2244
2245 /*
2246 * Enable interrupts.
2247 */
2248 sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL | 2184 sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL |
2249 AR5K_INT_RXORN | AR5K_INT_FATAL | AR5K_INT_GLOBAL | 2185 AR5K_INT_RXORN | AR5K_INT_FATAL | AR5K_INT_GLOBAL |
2250 AR5K_INT_MIB; 2186 AR5K_INT_MIB;
2187 ret = ath5k_reset(sc, false, false);
2188 if (ret)
2189 goto done;
2251 2190
2252 ath5k_hw_set_intr(sc->ah, sc->imask);
2253 /* Set ack to be sent at low bit-rates */ 2191 /* Set ack to be sent at low bit-rates */
2254 ath5k_hw_set_ack_bitrate_high(sc->ah, false); 2192 ath5k_hw_set_ack_bitrate_high(sc->ah, false);
2255 2193
@@ -2290,7 +2228,7 @@ ath5k_stop_locked(struct ath5k_softc *sc)
2290 2228
2291 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2229 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2292 ath5k_led_off(sc); 2230 ath5k_led_off(sc);
2293 ath5k_hw_set_intr(ah, 0); 2231 ath5k_hw_set_imr(ah, 0);
2294 synchronize_irq(sc->pdev->irq); 2232 synchronize_irq(sc->pdev->irq);
2295 } 2233 }
2296 ath5k_txq_cleanup(sc); 2234 ath5k_txq_cleanup(sc);
@@ -2396,7 +2334,7 @@ ath5k_intr(int irq, void *dev_id)
2396 * transmission time) in order to detect wether 2334 * transmission time) in order to detect wether
2397 * automatic TSF updates happened. 2335 * automatic TSF updates happened.
2398 */ 2336 */
2399 if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { 2337 if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2400 /* XXX: only if VEOL suppported */ 2338 /* XXX: only if VEOL suppported */
2401 u64 tsf = ath5k_hw_get_tsf64(ah); 2339 u64 tsf = ath5k_hw_get_tsf64(ah);
2402 sc->nexttbtt += sc->bintval; 2340 sc->nexttbtt += sc->bintval;
@@ -2451,7 +2389,7 @@ ath5k_tasklet_reset(unsigned long data)
2451{ 2389{
2452 struct ath5k_softc *sc = (void *)data; 2390 struct ath5k_softc *sc = (void *)data;
2453 2391
2454 ath5k_reset(sc->hw); 2392 ath5k_reset_wake(sc);
2455} 2393}
2456 2394
2457/* 2395/*
@@ -2474,7 +2412,7 @@ ath5k_calibrate(unsigned long data)
2474 * to load new gain values. 2412 * to load new gain values.
2475 */ 2413 */
2476 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); 2414 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2477 ath5k_reset(sc->hw); 2415 ath5k_reset_wake(sc);
2478 } 2416 }
2479 if (ath5k_hw_phy_calibrate(ah, sc->curchan)) 2417 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
2480 ATH5K_ERR(sc, "calibration of channel %u failed\n", 2418 ATH5K_ERR(sc, "calibration of channel %u failed\n",
@@ -2626,7 +2564,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2626 2564
2627 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2565 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
2628 2566
2629 if (sc->opmode == IEEE80211_IF_TYPE_MNTR) 2567 if (sc->opmode == NL80211_IFTYPE_MONITOR)
2630 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n"); 2568 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n");
2631 2569
2632 /* 2570 /*
@@ -2675,48 +2613,67 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2675} 2613}
2676 2614
2677static int 2615static int
2678ath5k_reset(struct ieee80211_hw *hw) 2616ath5k_reset(struct ath5k_softc *sc, bool stop, bool change_channel)
2679{ 2617{
2680 struct ath5k_softc *sc = hw->priv;
2681 struct ath5k_hw *ah = sc->ah; 2618 struct ath5k_hw *ah = sc->ah;
2682 int ret; 2619 int ret;
2683 2620
2684 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); 2621 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2685 2622
2686 ath5k_hw_set_intr(ah, 0); 2623 if (stop) {
2687 ath5k_txq_cleanup(sc); 2624 ath5k_hw_set_imr(ah, 0);
2688 ath5k_rx_stop(sc); 2625 ath5k_txq_cleanup(sc);
2689 2626 ath5k_rx_stop(sc);
2627 }
2690 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true); 2628 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true);
2691 if (unlikely(ret)) { 2629 if (ret) {
2692 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); 2630 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
2693 goto err; 2631 goto err;
2694 } 2632 }
2633
2634 /*
2635 * This is needed only to setup initial state
2636 * but it's best done after a reset.
2637 */
2695 ath5k_hw_set_txpower_limit(sc->ah, 0); 2638 ath5k_hw_set_txpower_limit(sc->ah, 0);
2696 2639
2697 ret = ath5k_rx_start(sc); 2640 ret = ath5k_rx_start(sc);
2698 if (unlikely(ret)) { 2641 if (ret) {
2699 ATH5K_ERR(sc, "can't start recv logic\n"); 2642 ATH5K_ERR(sc, "can't start recv logic\n");
2700 goto err; 2643 goto err;
2701 } 2644 }
2645
2702 /* 2646 /*
2703 * We may be doing a reset in response to an ioctl 2647 * Change channels and update the h/w rate map if we're switching;
2704 * that changes the channel so update any state that 2648 * e.g. 11a to 11b/g.
2705 * might change as a result. 2649 *
2650 * We may be doing a reset in response to an ioctl that changes the
2651 * channel so update any state that might change as a result.
2706 * 2652 *
2707 * XXX needed? 2653 * XXX needed?
2708 */ 2654 */
2709/* ath5k_chan_change(sc, c); */ 2655/* ath5k_chan_change(sc, c); */
2710 ath5k_beacon_config(sc);
2711 /* intrs are started by ath5k_beacon_config */
2712 2656
2713 ieee80211_wake_queues(hw); 2657 ath5k_beacon_config(sc);
2658 /* intrs are enabled by ath5k_beacon_config */
2714 2659
2715 return 0; 2660 return 0;
2716err: 2661err:
2717 return ret; 2662 return ret;
2718} 2663}
2719 2664
2665static int
2666ath5k_reset_wake(struct ath5k_softc *sc)
2667{
2668 int ret;
2669
2670 ret = ath5k_reset(sc, true, true);
2671 if (!ret)
2672 ieee80211_wake_queues(sc->hw);
2673
2674 return ret;
2675}
2676
2720static int ath5k_start(struct ieee80211_hw *hw) 2677static int ath5k_start(struct ieee80211_hw *hw)
2721{ 2678{
2722 return ath5k_init(hw->priv); 2679 return ath5k_init(hw->priv);
@@ -2742,9 +2699,9 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2742 sc->vif = conf->vif; 2699 sc->vif = conf->vif;
2743 2700
2744 switch (conf->type) { 2701 switch (conf->type) {
2745 case IEEE80211_IF_TYPE_STA: 2702 case NL80211_IFTYPE_STATION:
2746 case IEEE80211_IF_TYPE_IBSS: 2703 case NL80211_IFTYPE_ADHOC:
2747 case IEEE80211_IF_TYPE_MNTR: 2704 case NL80211_IFTYPE_MONITOR:
2748 sc->opmode = conf->type; 2705 sc->opmode = conf->type;
2749 break; 2706 break;
2750 default: 2707 default:
@@ -2815,7 +2772,7 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2815 } 2772 }
2816 2773
2817 if (conf->changed & IEEE80211_IFCC_BEACON && 2774 if (conf->changed & IEEE80211_IFCC_BEACON &&
2818 vif->type == IEEE80211_IF_TYPE_IBSS) { 2775 vif->type == NL80211_IFTYPE_ADHOC) {
2819 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 2776 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2820 if (!beacon) { 2777 if (!beacon) {
2821 ret = -ENOMEM; 2778 ret = -ENOMEM;
@@ -2827,7 +2784,7 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2827 2784
2828 mutex_unlock(&sc->lock); 2785 mutex_unlock(&sc->lock);
2829 2786
2830 return ath5k_reset(hw); 2787 return ath5k_reset_wake(sc);
2831unlock: 2788unlock:
2832 mutex_unlock(&sc->lock); 2789 mutex_unlock(&sc->lock);
2833 return ret; 2790 return ret;
@@ -2934,16 +2891,17 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2934 2891
2935 /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */ 2892 /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
2936 2893
2937 if (sc->opmode == IEEE80211_IF_TYPE_MNTR) 2894 if (sc->opmode == NL80211_IFTYPE_MONITOR)
2938 rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON | 2895 rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON |
2939 AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM; 2896 AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM;
2940 if (sc->opmode != IEEE80211_IF_TYPE_STA) 2897 if (sc->opmode != NL80211_IFTYPE_STATION)
2941 rfilt |= AR5K_RX_FILTER_PROBEREQ; 2898 rfilt |= AR5K_RX_FILTER_PROBEREQ;
2942 if (sc->opmode != IEEE80211_IF_TYPE_AP && 2899 if (sc->opmode != NL80211_IFTYPE_AP &&
2900 sc->opmode != NL80211_IFTYPE_MESH_POINT &&
2943 test_bit(ATH_STAT_PROMISC, sc->status)) 2901 test_bit(ATH_STAT_PROMISC, sc->status))
2944 rfilt |= AR5K_RX_FILTER_PROM; 2902 rfilt |= AR5K_RX_FILTER_PROM;
2945 if (sc->opmode == IEEE80211_IF_TYPE_STA || 2903 if (sc->opmode == NL80211_IFTYPE_STATION ||
2946 sc->opmode == IEEE80211_IF_TYPE_IBSS) { 2904 sc->opmode == NL80211_IFTYPE_ADHOC) {
2947 rfilt |= AR5K_RX_FILTER_BEACON; 2905 rfilt |= AR5K_RX_FILTER_BEACON;
2948 } 2906 }
2949 2907
@@ -3048,7 +3006,7 @@ ath5k_reset_tsf(struct ieee80211_hw *hw)
3048 * in IBSS mode we need to update the beacon timers too. 3006 * in IBSS mode we need to update the beacon timers too.
3049 * this will also reset the TSF if we call it with 0 3007 * this will also reset the TSF if we call it with 0
3050 */ 3008 */
3051 if (sc->opmode == IEEE80211_IF_TYPE_IBSS) 3009 if (sc->opmode == NL80211_IFTYPE_ADHOC)
3052 ath5k_beacon_update_timers(sc, 0); 3010 ath5k_beacon_update_timers(sc, 0);
3053 else 3011 else
3054 ath5k_hw_reset_tsf(sc->ah); 3012 ath5k_hw_reset_tsf(sc->ah);
@@ -3063,7 +3021,7 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3063 3021
3064 ath5k_debug_dump_skb(sc, skb, "BC ", 1); 3022 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3065 3023
3066 if (sc->opmode != IEEE80211_IF_TYPE_IBSS) { 3024 if (sc->opmode != NL80211_IFTYPE_ADHOC) {
3067 ret = -EIO; 3025 ret = -EIO;
3068 goto end; 3026 goto end;
3069 } 3027 }
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 7ec2f377d5c7..9d0b728928e3 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -111,17 +111,13 @@ struct ath5k_softc {
111 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 111 struct ieee80211_hw *hw; /* IEEE 802.11 common */
112 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 112 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
113 struct ieee80211_channel channels[ATH_CHAN_MAX]; 113 struct ieee80211_channel channels[ATH_CHAN_MAX];
114 struct ieee80211_rate rates[AR5K_MAX_RATES * IEEE80211_NUM_BANDS]; 114 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
115 enum ieee80211_if_types opmode; 115 u8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
116 enum nl80211_iftype opmode;
116 struct ath5k_hw *ah; /* Atheros HW */ 117 struct ath5k_hw *ah; /* Atheros HW */
117 118
118 struct ieee80211_supported_band *curband; 119 struct ieee80211_supported_band *curband;
119 120
120 u8 a_rates;
121 u8 b_rates;
122 u8 g_rates;
123 u8 xr_rates;
124
125#ifdef CONFIG_ATH5K_DEBUG 121#ifdef CONFIG_ATH5K_DEBUG
126 struct ath5k_dbg_info debug; /* debug info */ 122 struct ath5k_dbg_info debug; /* debug info */
127#endif /* CONFIG_ATH5K_DEBUG */ 123#endif /* CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath5k/caps.c b/drivers/net/wireless/ath5k/caps.c
new file mode 100644
index 000000000000..150f5ed204a0
--- /dev/null
+++ b/drivers/net/wireless/ath5k/caps.c
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 */
19
20/**************\
21* Capabilities *
22\**************/
23
24#include "ath5k.h"
25#include "reg.h"
26#include "debug.h"
27#include "base.h"
28
29/*
30 * Fill the capabilities struct
31 * TODO: Merge this with EEPROM code when we are done with it
32 */
33int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
34{
35 u16 ee_header;
36
37 ATH5K_TRACE(ah->ah_sc);
38 /* Capabilities stored in the EEPROM */
39 ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
40
41 if (ah->ah_version == AR5K_AR5210) {
42 /*
43 * Set radio capabilities
44 * (The AR5110 only supports the middle 5GHz band)
45 */
46 ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
47 ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
48 ah->ah_capabilities.cap_range.range_2ghz_min = 0;
49 ah->ah_capabilities.cap_range.range_2ghz_max = 0;
50
51 /* Set supported modes */
52 __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
53 __set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode);
54 } else {
55 /*
56 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
57 * XXX and from 2312 to 2732GHz. There are problems with the
58 * XXX current ieee80211 implementation because the IEEE
59 * XXX channel mapping does not support negative channel
60 * XXX numbers (2312MHz is channel -19). Of course, this
61 * XXX doesn't matter because these channels are out of range
62 * XXX but some regulation domains like MKK (Japan) will
63 * XXX support frequencies somewhere around 4.8GHz.
64 */
65
66 /*
67 * Set radio capabilities
68 */
69
70 if (AR5K_EEPROM_HDR_11A(ee_header)) {
71 /* 4920 */
72 ah->ah_capabilities.cap_range.range_5ghz_min = 5005;
73 ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
74
75 /* Set supported modes */
76 __set_bit(AR5K_MODE_11A,
77 ah->ah_capabilities.cap_mode);
78 __set_bit(AR5K_MODE_11A_TURBO,
79 ah->ah_capabilities.cap_mode);
80 if (ah->ah_version == AR5K_AR5212)
81 __set_bit(AR5K_MODE_11G_TURBO,
82 ah->ah_capabilities.cap_mode);
83 }
84
85 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is
86 * connected */
87 if (AR5K_EEPROM_HDR_11B(ee_header) ||
88 AR5K_EEPROM_HDR_11G(ee_header)) {
89 /* 2312 */
90 ah->ah_capabilities.cap_range.range_2ghz_min = 2412;
91 ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
92
93 if (AR5K_EEPROM_HDR_11B(ee_header))
94 __set_bit(AR5K_MODE_11B,
95 ah->ah_capabilities.cap_mode);
96
97 if (AR5K_EEPROM_HDR_11G(ee_header))
98 __set_bit(AR5K_MODE_11G,
99 ah->ah_capabilities.cap_mode);
100 }
101 }
102
103 /* GPIO */
104 ah->ah_gpio_npins = AR5K_NUM_GPIO;
105
106 /* Set number of supported TX queues */
107 if (ah->ah_version == AR5K_AR5210)
108 ah->ah_capabilities.cap_queues.q_tx_num =
109 AR5K_NUM_TX_QUEUES_NOQCU;
110 else
111 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
112
113 return 0;
114}
115
116/* Main function used by the driver part to check caps */
117int ath5k_hw_get_capability(struct ath5k_hw *ah,
118 enum ath5k_capability_type cap_type,
119 u32 capability, u32 *result)
120{
121 ATH5K_TRACE(ah->ah_sc);
122
123 switch (cap_type) {
124 case AR5K_CAP_NUM_TXQUEUES:
125 if (result) {
126 if (ah->ah_version == AR5K_AR5210)
127 *result = AR5K_NUM_TX_QUEUES_NOQCU;
128 else
129 *result = AR5K_NUM_TX_QUEUES;
130 goto yes;
131 }
132 case AR5K_CAP_VEOL:
133 goto yes;
134 case AR5K_CAP_COMPRESSION:
135 if (ah->ah_version == AR5K_AR5212)
136 goto yes;
137 else
138 goto no;
139 case AR5K_CAP_BURST:
140 goto yes;
141 case AR5K_CAP_TPC:
142 goto yes;
143 case AR5K_CAP_BSSIDMASK:
144 if (ah->ah_version == AR5K_AR5212)
145 goto yes;
146 else
147 goto no;
148 case AR5K_CAP_XR:
149 if (ah->ah_version == AR5K_AR5212)
150 goto yes;
151 else
152 goto no;
153 default:
154 goto no;
155 }
156
157no:
158 return -EINVAL;
159yes:
160 return 0;
161}
162
163/*
164 * TODO: Following functions should be part of a new function
165 * set_capability
166 */
167
168int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
169 u16 assoc_id)
170{
171 ATH5K_TRACE(ah->ah_sc);
172
173 if (ah->ah_version == AR5K_AR5210) {
174 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
175 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
176 return 0;
177 }
178
179 return -EIO;
180}
181
182int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
183{
184 ATH5K_TRACE(ah->ah_sc);
185
186 if (ah->ah_version == AR5K_AR5210) {
187 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
188 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
189 return 0;
190 }
191
192 return -EIO;
193}
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 6fa6c8e04ff0..8f92d670f614 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -58,8 +58,8 @@
58 * THE POSSIBILITY OF SUCH DAMAGES. 58 * THE POSSIBILITY OF SUCH DAMAGES.
59 */ 59 */
60 60
61#include "debug.h"
62#include "base.h" 61#include "base.h"
62#include "debug.h"
63 63
64static unsigned int ath5k_debug; 64static unsigned int ath5k_debug;
65module_param_named(debug, ath5k_debug, uint, 0); 65module_param_named(debug, ath5k_debug, uint, 0);
@@ -525,7 +525,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
525 return; 525 return;
526 526
527 printk(KERN_DEBUG "rx queue %x, link %p\n", 527 printk(KERN_DEBUG "rx queue %x, link %p\n",
528 ath5k_hw_get_rx_buf(ah), sc->rxlink); 528 ath5k_hw_get_rxdp(ah), sc->rxlink);
529 529
530 spin_lock_bh(&sc->rxbuflock); 530 spin_lock_bh(&sc->rxbuflock);
531 list_for_each_entry(bf, &sc->rxbuf, list) { 531 list_for_each_entry(bf, &sc->rxbuf, list) {
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
new file mode 100644
index 000000000000..d45b90a6e06c
--- /dev/null
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -0,0 +1,667 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 *
18 */
19
20/******************************\
21 Hardware Descriptor Functions
22\******************************/
23
24#include "ath5k.h"
25#include "reg.h"
26#include "debug.h"
27#include "base.h"
28
29/*
30 * TX Descriptors
31 */
32
33/*
34 * Initialize the 2-word tx control descriptor on 5210/5211
35 */
36static int
37ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
38 unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
39 unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
40 unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
41 unsigned int rtscts_rate, unsigned int rtscts_duration)
42{
43 u32 frame_type;
44 struct ath5k_hw_2w_tx_ctl *tx_ctl;
45 unsigned int frame_len;
46
47 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
48
49 /*
50 * Validate input
51 * - Zero retries don't make sense.
52 * - A zero rate will put the HW into a mode where it continously sends
53 * noise on the channel, so it is important to avoid this.
54 */
55 if (unlikely(tx_tries0 == 0)) {
56 ATH5K_ERR(ah->ah_sc, "zero retries\n");
57 WARN_ON(1);
58 return -EINVAL;
59 }
60 if (unlikely(tx_rate0 == 0)) {
61 ATH5K_ERR(ah->ah_sc, "zero rate\n");
62 WARN_ON(1);
63 return -EINVAL;
64 }
65
66 /* Clear descriptor */
67 memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
68
69 /* Setup control descriptor */
70
71 /* Verify and set frame length */
72
73 /* remove padding we might have added before */
74 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
75
76 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
77 return -EINVAL;
78
79 tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
80
81 /* Verify and set buffer length */
82
83 /* NB: beacon's BufLen must be a multiple of 4 bytes */
84 if (type == AR5K_PKT_TYPE_BEACON)
85 pkt_len = roundup(pkt_len, 4);
86
87 if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
88 return -EINVAL;
89
90 tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
91
92 /*
93 * Verify and set header length
94 * XXX: I only found that on 5210 code, does it work on 5211 ?
95 */
96 if (ah->ah_version == AR5K_AR5210) {
97 if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
98 return -EINVAL;
99 tx_ctl->tx_control_0 |=
100 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
101 }
102
103 /*Diferences between 5210-5211*/
104 if (ah->ah_version == AR5K_AR5210) {
105 switch (type) {
106 case AR5K_PKT_TYPE_BEACON:
107 case AR5K_PKT_TYPE_PROBE_RESP:
108 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
109 case AR5K_PKT_TYPE_PIFS:
110 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
111 default:
112 frame_type = type /*<< 2 ?*/;
113 }
114
115 tx_ctl->tx_control_0 |=
116 AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
117 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
118
119 } else {
120 tx_ctl->tx_control_0 |=
121 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
122 AR5K_REG_SM(antenna_mode,
123 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
124 tx_ctl->tx_control_1 |=
125 AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
126 }
127#define _TX_FLAGS(_c, _flag) \
128 if (flags & AR5K_TXDESC_##_flag) { \
129 tx_ctl->tx_control_##_c |= \
130 AR5K_2W_TX_DESC_CTL##_c##_##_flag; \
131 }
132
133 _TX_FLAGS(0, CLRDMASK);
134 _TX_FLAGS(0, VEOL);
135 _TX_FLAGS(0, INTREQ);
136 _TX_FLAGS(0, RTSENA);
137 _TX_FLAGS(1, NOACK);
138
139#undef _TX_FLAGS
140
141 /*
142 * WEP crap
143 */
144 if (key_index != AR5K_TXKEYIX_INVALID) {
145 tx_ctl->tx_control_0 |=
146 AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
147 tx_ctl->tx_control_1 |=
148 AR5K_REG_SM(key_index,
149 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
150 }
151
152 /*
153 * RTS/CTS Duration [5210 ?]
154 */
155 if ((ah->ah_version == AR5K_AR5210) &&
156 (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
157 tx_ctl->tx_control_1 |= rtscts_duration &
158 AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
159
160 return 0;
161}
162
163/*
164 * Initialize the 4-word tx control descriptor on 5212
165 */
166static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
167 struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
168 enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
169 unsigned int tx_tries0, unsigned int key_index,
170 unsigned int antenna_mode, unsigned int flags,
171 unsigned int rtscts_rate,
172 unsigned int rtscts_duration)
173{
174 struct ath5k_hw_4w_tx_ctl *tx_ctl;
175 unsigned int frame_len;
176
177 ATH5K_TRACE(ah->ah_sc);
178 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
179
180 /*
181 * Validate input
182 * - Zero retries don't make sense.
183 * - A zero rate will put the HW into a mode where it continously sends
184 * noise on the channel, so it is important to avoid this.
185 */
186 if (unlikely(tx_tries0 == 0)) {
187 ATH5K_ERR(ah->ah_sc, "zero retries\n");
188 WARN_ON(1);
189 return -EINVAL;
190 }
191 if (unlikely(tx_rate0 == 0)) {
192 ATH5K_ERR(ah->ah_sc, "zero rate\n");
193 WARN_ON(1);
194 return -EINVAL;
195 }
196
197 /* Clear descriptor */
198 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
199
200 /* Setup control descriptor */
201
202 /* Verify and set frame length */
203
204 /* remove padding we might have added before */
205 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
206
207 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
208 return -EINVAL;
209
210 tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
211
212 /* Verify and set buffer length */
213
214 /* NB: beacon's BufLen must be a multiple of 4 bytes */
215 if (type == AR5K_PKT_TYPE_BEACON)
216 pkt_len = roundup(pkt_len, 4);
217
218 if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
219 return -EINVAL;
220
221 tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
222
223 tx_ctl->tx_control_0 |=
224 AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
225 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
226 tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
227 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
228 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
229 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
230 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
231
232#define _TX_FLAGS(_c, _flag) \
233 if (flags & AR5K_TXDESC_##_flag) { \
234 tx_ctl->tx_control_##_c |= \
235 AR5K_4W_TX_DESC_CTL##_c##_##_flag; \
236 }
237
238 _TX_FLAGS(0, CLRDMASK);
239 _TX_FLAGS(0, VEOL);
240 _TX_FLAGS(0, INTREQ);
241 _TX_FLAGS(0, RTSENA);
242 _TX_FLAGS(0, CTSENA);
243 _TX_FLAGS(1, NOACK);
244
245#undef _TX_FLAGS
246
247 /*
248 * WEP crap
249 */
250 if (key_index != AR5K_TXKEYIX_INVALID) {
251 tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
252 tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
253 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
254 }
255
256 /*
257 * RTS/CTS
258 */
259 if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
260 if ((flags & AR5K_TXDESC_RTSENA) &&
261 (flags & AR5K_TXDESC_CTSENA))
262 return -EINVAL;
263 tx_ctl->tx_control_2 |= rtscts_duration &
264 AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
265 tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate,
266 AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
267 }
268
269 return 0;
270}
271
272/*
273 * Initialize a 4-word multi rate retry tx control descriptor on 5212
274 */
275static int
276ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
277 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
278 u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
279{
280 struct ath5k_hw_4w_tx_ctl *tx_ctl;
281
282 /*
283 * Rates can be 0 as long as the retry count is 0 too.
284 * A zero rate and nonzero retry count will put the HW into a mode where
285 * it continously sends noise on the channel, so it is important to
286 * avoid this.
287 */
288 if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
289 (tx_rate2 == 0 && tx_tries2 != 0) ||
290 (tx_rate3 == 0 && tx_tries3 != 0))) {
291 ATH5K_ERR(ah->ah_sc, "zero rate\n");
292 WARN_ON(1);
293 return -EINVAL;
294 }
295
296 if (ah->ah_version == AR5K_AR5212) {
297 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
298
299#define _XTX_TRIES(_n) \
300 if (tx_tries##_n) { \
301 tx_ctl->tx_control_2 |= \
302 AR5K_REG_SM(tx_tries##_n, \
303 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \
304 tx_ctl->tx_control_3 |= \
305 AR5K_REG_SM(tx_rate##_n, \
306 AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \
307 }
308
309 _XTX_TRIES(1);
310 _XTX_TRIES(2);
311 _XTX_TRIES(3);
312
313#undef _XTX_TRIES
314
315 return 1;
316 }
317
318 return 0;
319}
320
321/*
322 * Proccess the tx status descriptor on 5210/5211
323 */
324static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
325 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
326{
327 struct ath5k_hw_2w_tx_ctl *tx_ctl;
328 struct ath5k_hw_tx_status *tx_status;
329
330 ATH5K_TRACE(ah->ah_sc);
331
332 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
333 tx_status = &desc->ud.ds_tx5210.tx_stat;
334
335 /* No frame has been send or error */
336 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
337 return -EINPROGRESS;
338
339 /*
340 * Get descriptor status
341 */
342 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
343 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
344 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
345 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
346 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
347 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
348 /*TODO: ts->ts_virtcol + test*/
349 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
350 AR5K_DESC_TX_STATUS1_SEQ_NUM);
351 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
352 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
353 ts->ts_antenna = 1;
354 ts->ts_status = 0;
355 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_0,
356 AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
357
358 if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
359 if (tx_status->tx_status_0 &
360 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
361 ts->ts_status |= AR5K_TXERR_XRETRY;
362
363 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
364 ts->ts_status |= AR5K_TXERR_FIFO;
365
366 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
367 ts->ts_status |= AR5K_TXERR_FILT;
368 }
369
370 return 0;
371}
372
373/*
374 * Proccess a tx status descriptor on 5212
375 */
376static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
377 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
378{
379 struct ath5k_hw_4w_tx_ctl *tx_ctl;
380 struct ath5k_hw_tx_status *tx_status;
381
382 ATH5K_TRACE(ah->ah_sc);
383
384 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
385 tx_status = &desc->ud.ds_tx5212.tx_stat;
386
387 /* No frame has been send or error */
388 if (unlikely(!(tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE)))
389 return -EINPROGRESS;
390
391 /*
392 * Get descriptor status
393 */
394 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
395 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
396 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
397 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
398 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
399 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
400 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
401 AR5K_DESC_TX_STATUS1_SEQ_NUM);
402 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
403 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
404 ts->ts_antenna = (tx_status->tx_status_1 &
405 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
406 ts->ts_status = 0;
407
408 switch (AR5K_REG_MS(tx_status->tx_status_1,
409 AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX)) {
410 case 0:
411 ts->ts_rate = tx_ctl->tx_control_3 &
412 AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
413 break;
414 case 1:
415 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
416 AR5K_4W_TX_DESC_CTL3_XMIT_RATE1);
417 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
418 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
419 break;
420 case 2:
421 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
422 AR5K_4W_TX_DESC_CTL3_XMIT_RATE2);
423 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
424 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2);
425 break;
426 case 3:
427 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
428 AR5K_4W_TX_DESC_CTL3_XMIT_RATE3);
429 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
430 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3);
431 break;
432 }
433
434 /* TX error */
435 if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
436 if (tx_status->tx_status_0 &
437 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
438 ts->ts_status |= AR5K_TXERR_XRETRY;
439
440 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
441 ts->ts_status |= AR5K_TXERR_FIFO;
442
443 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
444 ts->ts_status |= AR5K_TXERR_FILT;
445 }
446
447 return 0;
448}
449
450/*
451 * RX Descriptors
452 */
453
454/*
455 * Initialize an rx control descriptor
456 */
457static int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
458 u32 size, unsigned int flags)
459{
460 struct ath5k_hw_rx_ctl *rx_ctl;
461
462 ATH5K_TRACE(ah->ah_sc);
463 rx_ctl = &desc->ud.ds_rx.rx_ctl;
464
465 /*
466 * Clear the descriptor
467 * If we don't clean the status descriptor,
468 * while scanning we get too many results,
469 * most of them virtual, after some secs
470 * of scanning system hangs. M.F.
471 */
472 memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
473
474 /* Setup descriptor */
475 rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
476 if (unlikely(rx_ctl->rx_control_1 != size))
477 return -EINVAL;
478
479 if (flags & AR5K_RXDESC_INTREQ)
480 rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
481
482 return 0;
483}
484
485/*
486 * Proccess the rx status descriptor on 5210/5211
487 */
488static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
489 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
490{
491 struct ath5k_hw_rx_status *rx_status;
492
493 rx_status = &desc->ud.ds_rx.u.rx_stat;
494
495 /* No frame received / not ready */
496 if (unlikely(!(rx_status->rx_status_1 &
497 AR5K_5210_RX_DESC_STATUS1_DONE)))
498 return -EINPROGRESS;
499
500 /*
501 * Frame receive status
502 */
503 rs->rs_datalen = rx_status->rx_status_0 &
504 AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
505 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
506 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
507 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
508 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
509 rs->rs_antenna = rx_status->rx_status_0 &
510 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA;
511 rs->rs_more = rx_status->rx_status_0 &
512 AR5K_5210_RX_DESC_STATUS0_MORE;
513 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
514 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
515 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
516 rs->rs_status = 0;
517 rs->rs_phyerr = 0;
518
519 /*
520 * Key table status
521 */
522 if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
523 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
524 AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
525 else
526 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
527
528 /*
529 * Receive/descriptor errors
530 */
531 if (!(rx_status->rx_status_1 &
532 AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
533 if (rx_status->rx_status_1 &
534 AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
535 rs->rs_status |= AR5K_RXERR_CRC;
536
537 if (rx_status->rx_status_1 &
538 AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
539 rs->rs_status |= AR5K_RXERR_FIFO;
540
541 if (rx_status->rx_status_1 &
542 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
543 rs->rs_status |= AR5K_RXERR_PHY;
544 rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1,
545 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
546 }
547
548 if (rx_status->rx_status_1 &
549 AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
550 rs->rs_status |= AR5K_RXERR_DECRYPT;
551 }
552
553 return 0;
554}
555
556/*
557 * Proccess the rx status descriptor on 5212
558 */
559static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
560 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
561{
562 struct ath5k_hw_rx_status *rx_status;
563 struct ath5k_hw_rx_error *rx_err;
564
565 ATH5K_TRACE(ah->ah_sc);
566 rx_status = &desc->ud.ds_rx.u.rx_stat;
567
568 /* Overlay on error */
569 rx_err = &desc->ud.ds_rx.u.rx_err;
570
571 /* No frame received / not ready */
572 if (unlikely(!(rx_status->rx_status_1 &
573 AR5K_5212_RX_DESC_STATUS1_DONE)))
574 return -EINPROGRESS;
575
576 /*
577 * Frame receive status
578 */
579 rs->rs_datalen = rx_status->rx_status_0 &
580 AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
581 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
582 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
583 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
584 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
585 rs->rs_antenna = rx_status->rx_status_0 &
586 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA;
587 rs->rs_more = rx_status->rx_status_0 &
588 AR5K_5212_RX_DESC_STATUS0_MORE;
589 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
590 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
591 rs->rs_status = 0;
592 rs->rs_phyerr = 0;
593
594 /*
595 * Key table status
596 */
597 if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
598 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
599 AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
600 else
601 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
602
603 /*
604 * Receive/descriptor errors
605 */
606 if (!(rx_status->rx_status_1 &
607 AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
608 if (rx_status->rx_status_1 &
609 AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
610 rs->rs_status |= AR5K_RXERR_CRC;
611
612 if (rx_status->rx_status_1 &
613 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
614 rs->rs_status |= AR5K_RXERR_PHY;
615 rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
616 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
617 }
618
619 if (rx_status->rx_status_1 &
620 AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
621 rs->rs_status |= AR5K_RXERR_DECRYPT;
622
623 if (rx_status->rx_status_1 &
624 AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
625 rs->rs_status |= AR5K_RXERR_MIC;
626 }
627
628 return 0;
629}
630
631/*
632 * Init function pointers inside ath5k_hw struct
633 */
634int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
635{
636
637 if (ah->ah_version != AR5K_AR5210 &&
638 ah->ah_version != AR5K_AR5211 &&
639 ah->ah_version != AR5K_AR5212)
640 return -ENOTSUPP;
641
642 /* XXX: What is this magic value and where is it used ? */
643 if (ah->ah_version == AR5K_AR5212)
644 ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
645 else if (ah->ah_version == AR5K_AR5211)
646 ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
647
648 if (ah->ah_version == AR5K_AR5212) {
649 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
650 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
651 ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
652 ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
653 } else {
654 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
655 ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
656 ah->ah_setup_mrr_tx_desc = ath5k_hw_setup_mrr_tx_desc;
657 ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
658 }
659
660 if (ah->ah_version == AR5K_AR5212)
661 ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
662 else if (ah->ah_version <= AR5K_AR5211)
663 ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
664
665 return 0;
666}
667
diff --git a/drivers/net/wireless/ath5k/hw.h b/drivers/net/wireless/ath5k/desc.h
index 64fca8dcb386..56158c804e3e 100644
--- a/drivers/net/wireless/ath5k/hw.h
+++ b/drivers/net/wireless/ath5k/desc.h
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007 Matthew W. S. Bell <mentor@madwifi.org>
5 * Copyright (c) 2007 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
6 * 4 *
7 * Permission to use, copy, modify, and distribute this software for any 5 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -15,159 +13,9 @@
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
18 */ 17 */
19 18
20#include <linux/delay.h>
21
22/*
23 * Gain settings
24 */
25
26enum ath5k_rfgain {
27 AR5K_RFGAIN_INACTIVE = 0,
28 AR5K_RFGAIN_READ_REQUESTED,
29 AR5K_RFGAIN_NEED_CHANGE,
30};
31
32#define AR5K_GAIN_CRN_FIX_BITS_5111 4
33#define AR5K_GAIN_CRN_FIX_BITS_5112 7
34#define AR5K_GAIN_CRN_MAX_FIX_BITS AR5K_GAIN_CRN_FIX_BITS_5112
35#define AR5K_GAIN_DYN_ADJUST_HI_MARGIN 15
36#define AR5K_GAIN_DYN_ADJUST_LO_MARGIN 20
37#define AR5K_GAIN_CCK_PROBE_CORR 5
38#define AR5K_GAIN_CCK_OFDM_GAIN_DELTA 15
39#define AR5K_GAIN_STEP_COUNT 10
40#define AR5K_GAIN_PARAM_TX_CLIP 0
41#define AR5K_GAIN_PARAM_PD_90 1
42#define AR5K_GAIN_PARAM_PD_84 2
43#define AR5K_GAIN_PARAM_GAIN_SEL 3
44#define AR5K_GAIN_PARAM_MIX_ORN 0
45#define AR5K_GAIN_PARAM_PD_138 1
46#define AR5K_GAIN_PARAM_PD_137 2
47#define AR5K_GAIN_PARAM_PD_136 3
48#define AR5K_GAIN_PARAM_PD_132 4
49#define AR5K_GAIN_PARAM_PD_131 5
50#define AR5K_GAIN_PARAM_PD_130 6
51#define AR5K_GAIN_CHECK_ADJUST(_g) \
52 ((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
53
54struct ath5k_gain_opt_step {
55 s16 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
56 s32 gos_gain;
57};
58
59struct ath5k_gain {
60 u32 g_step_idx;
61 u32 g_current;
62 u32 g_target;
63 u32 g_low;
64 u32 g_high;
65 u32 g_f_corr;
66 u32 g_active;
67 const struct ath5k_gain_opt_step *g_step;
68};
69
70
71/*
72 * HW SPECIFIC STRUCTS
73 */
74
75/* Some EEPROM defines */
76#define AR5K_EEPROM_EEP_SCALE 100
77#define AR5K_EEPROM_EEP_DELTA 10
78#define AR5K_EEPROM_N_MODES 3
79#define AR5K_EEPROM_N_5GHZ_CHAN 10
80#define AR5K_EEPROM_N_2GHZ_CHAN 3
81#define AR5K_EEPROM_MAX_CHAN 10
82#define AR5K_EEPROM_N_PCDAC 11
83#define AR5K_EEPROM_N_TEST_FREQ 8
84#define AR5K_EEPROM_N_EDGES 8
85#define AR5K_EEPROM_N_INTERCEPTS 11
86#define AR5K_EEPROM_FREQ_M(_v) AR5K_EEPROM_OFF(_v, 0x7f, 0xff)
87#define AR5K_EEPROM_PCDAC_M 0x3f
88#define AR5K_EEPROM_PCDAC_START 1
89#define AR5K_EEPROM_PCDAC_STOP 63
90#define AR5K_EEPROM_PCDAC_STEP 1
91#define AR5K_EEPROM_NON_EDGE_M 0x40
92#define AR5K_EEPROM_CHANNEL_POWER 8
93#define AR5K_EEPROM_N_OBDB 4
94#define AR5K_EEPROM_OBDB_DIS 0xffff
95#define AR5K_EEPROM_CHANNEL_DIS 0xff
96#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10)
97#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32)
98#define AR5K_EEPROM_MAX_CTLS 32
99#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4
100#define AR5K_EEPROM_N_XPD0_POINTS 4
101#define AR5K_EEPROM_N_XPD3_POINTS 3
102#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ 35
103#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ 55
104#define AR5K_EEPROM_POWER_M 0x3f
105#define AR5K_EEPROM_POWER_MIN 0
106#define AR5K_EEPROM_POWER_MAX 3150
107#define AR5K_EEPROM_POWER_STEP 50
108#define AR5K_EEPROM_POWER_TABLE_SIZE 64
109#define AR5K_EEPROM_N_POWER_LOC_11B 4
110#define AR5K_EEPROM_N_POWER_LOC_11G 6
111#define AR5K_EEPROM_I_GAIN 10
112#define AR5K_EEPROM_CCK_OFDM_DELTA 15
113#define AR5K_EEPROM_N_IQ_CAL 2
114
115/* Struct to hold EEPROM calibration data */
116struct ath5k_eeprom_info {
117 u16 ee_magic;
118 u16 ee_protect;
119 u16 ee_regdomain;
120 u16 ee_version;
121 u16 ee_header;
122 u16 ee_ant_gain;
123 u16 ee_misc0;
124 u16 ee_misc1;
125 u16 ee_cck_ofdm_gain_delta;
126 u16 ee_cck_ofdm_power_delta;
127 u16 ee_scaled_cck_delta;
128
129 /* Used for tx thermal adjustment (eeprom_init, rfregs) */
130 u16 ee_tx_clip;
131 u16 ee_pwd_84;
132 u16 ee_pwd_90;
133 u16 ee_gain_select;
134
135 /* RF Calibration settings (reset, rfregs) */
136 u16 ee_i_cal[AR5K_EEPROM_N_MODES];
137 u16 ee_q_cal[AR5K_EEPROM_N_MODES];
138 u16 ee_fixed_bias[AR5K_EEPROM_N_MODES];
139 u16 ee_turbo_max_power[AR5K_EEPROM_N_MODES];
140 u16 ee_xr_power[AR5K_EEPROM_N_MODES];
141 u16 ee_switch_settling[AR5K_EEPROM_N_MODES];
142 u16 ee_ant_tx_rx[AR5K_EEPROM_N_MODES];
143 u16 ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC];
144 u16 ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
145 u16 ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
146 u16 ee_tx_end2xlna_enable[AR5K_EEPROM_N_MODES];
147 u16 ee_tx_end2xpa_disable[AR5K_EEPROM_N_MODES];
148 u16 ee_tx_frm2xpa_enable[AR5K_EEPROM_N_MODES];
149 u16 ee_thr_62[AR5K_EEPROM_N_MODES];
150 u16 ee_xlna_gain[AR5K_EEPROM_N_MODES];
151 u16 ee_xpd[AR5K_EEPROM_N_MODES];
152 u16 ee_x_gain[AR5K_EEPROM_N_MODES];
153 u16 ee_i_gain[AR5K_EEPROM_N_MODES];
154 u16 ee_margin_tx_rx[AR5K_EEPROM_N_MODES];
155
156 /* Unused */
157 u16 ee_false_detect[AR5K_EEPROM_N_MODES];
158 u16 ee_cal_pier[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_2GHZ_CHAN];
159 u16 ee_channel[AR5K_EEPROM_N_MODES][AR5K_EEPROM_MAX_CHAN]; /*empty*/
160
161 /* Conformance test limits (Unused) */
162 u16 ee_ctls;
163 u16 ee_ctl[AR5K_EEPROM_MAX_CTLS];
164
165 /* Noise Floor Calibration settings */
166 s16 ee_noise_floor_thr[AR5K_EEPROM_N_MODES];
167 s8 ee_adc_desired_size[AR5K_EEPROM_N_MODES];
168 s8 ee_pga_desired_size[AR5K_EEPROM_N_MODES];
169};
170
171/* 19/*
172 * Internal RX/TX descriptor structures 20 * Internal RX/TX descriptor structures
173 * (rX: reserved fields possibily used by future versions of the ar5k chipset) 21 * (rX: reserved fields possibily used by future versions of the ar5k chipset)
@@ -178,14 +26,15 @@ struct ath5k_eeprom_info {
178 */ 26 */
179struct ath5k_hw_rx_ctl { 27struct ath5k_hw_rx_ctl {
180 u32 rx_control_0; /* RX control word 0 */ 28 u32 rx_control_0; /* RX control word 0 */
29 u32 rx_control_1; /* RX control word 1 */
30} __packed;
181 31
32/* RX control word 0 field/sflags */
182#define AR5K_DESC_RX_CTL0 0x00000000 33#define AR5K_DESC_RX_CTL0 0x00000000
183 34
184 u32 rx_control_1; /* RX control word 1 */ 35/* RX control word 1 fields/flags */
185
186#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff 36#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff
187#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 37#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000
188} __packed;
189 38
190/* 39/*
191 * common hardware RX status descriptor 40 * common hardware RX status descriptor
@@ -197,6 +46,7 @@ struct ath5k_hw_rx_status {
197} __packed; 46} __packed;
198 47
199/* 5210/5211 */ 48/* 5210/5211 */
49/* RX status word 0 fields/flags */
200#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff 50#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff
201#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000 51#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000
202#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000 52#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000
@@ -205,6 +55,8 @@ struct ath5k_hw_rx_status {
205#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19 55#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19
206#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000 56#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000
207#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27 57#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27
58
59/* RX status word 1 fields/flags */
208#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001 60#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001
209#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 61#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
210#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004 62#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004
@@ -220,6 +72,7 @@ struct ath5k_hw_rx_status {
220#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000 72#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000
221 73
222/* 5212 */ 74/* 5212 */
75/* RX status word 0 fields/flags */
223#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff 76#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff
224#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000 77#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000
225#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000 78#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000
@@ -229,6 +82,8 @@ struct ath5k_hw_rx_status {
229#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20 82#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20
230#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000 83#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000
231#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28 84#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28
85
86/* RX status word 1 fields/flags */
232#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001 87#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001
233#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002 88#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
234#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004 89#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004
@@ -246,16 +101,18 @@ struct ath5k_hw_rx_status {
246 * common hardware RX error descriptor 101 * common hardware RX error descriptor
247 */ 102 */
248struct ath5k_hw_rx_error { 103struct ath5k_hw_rx_error {
249 u32 rx_error_0; /* RX error word 0 */ 104 u32 rx_error_0; /* RX status word 0 */
105 u32 rx_error_1; /* RX status word 1 */
106} __packed;
250 107
108/* RX error word 0 fields/flags */
251#define AR5K_RX_DESC_ERROR0 0x00000000 109#define AR5K_RX_DESC_ERROR0 0x00000000
252 110
253 u32 rx_error_1; /* RX error word 1 */ 111/* RX error word 1 fields/flags */
254
255#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00 112#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
256#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8 113#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
257} __packed;
258 114
115/* PHY Error codes */
259#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00 116#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00
260#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20 117#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20
261#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40 118#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40
@@ -270,7 +127,10 @@ struct ath5k_hw_rx_error {
270 */ 127 */
271struct ath5k_hw_2w_tx_ctl { 128struct ath5k_hw_2w_tx_ctl {
272 u32 tx_control_0; /* TX control word 0 */ 129 u32 tx_control_0; /* TX control word 0 */
130 u32 tx_control_1; /* TX control word 1 */
131} __packed;
273 132
133/* TX control word 0 fields/flags */
274#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff 134#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff
275#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN 0x0003f000 /*[5210 ?]*/ 135#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN 0x0003f000 /*[5210 ?]*/
276#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S 12 136#define AR5K_2W_TX_DESC_CTL0_HEADER_LEN_S 12
@@ -284,29 +144,34 @@ struct ath5k_hw_2w_tx_ctl {
284#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S 26 144#define AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_S 26
285#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000 145#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 0x02000000
286#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000 146#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211 0x1e000000
287#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT (ah->ah_version == AR5K_AR5210 ? \ 147
288 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \ 148#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT \
289 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211) 149 (ah->ah_version == AR5K_AR5210 ? \
150 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5210 : \
151 AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_5211)
152
290#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25 153#define AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT_S 25
291#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000 154#define AR5K_2W_TX_DESC_CTL0_INTREQ 0x20000000
292#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000 155#define AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID 0x40000000
293 156
294 u32 tx_control_1; /* TX control word 1 */ 157/* TX control word 1 fields/flags */
295
296#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff 158#define AR5K_2W_TX_DESC_CTL1_BUF_LEN 0x00000fff
297#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000 159#define AR5K_2W_TX_DESC_CTL1_MORE 0x00001000
298#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 0x0007e000 160#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 0x0007e000
299#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211 0x000fe000 161#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211 0x000fe000
300#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX (ah->ah_version == AR5K_AR5210 ? \ 162
301 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \ 163#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX \
302 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211) 164 (ah->ah_version == AR5K_AR5210 ? \
165 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5210 : \
166 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_5211)
167
303#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13 168#define AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX_S 13
304#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE 0x00700000 /*[5211]*/ 169#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE 0x00700000 /*[5211]*/
305#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S 20 170#define AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_S 20
306#define AR5K_2W_TX_DESC_CTL1_NOACK 0x00800000 /*[5211]*/ 171#define AR5K_2W_TX_DESC_CTL1_NOACK 0x00800000 /*[5211]*/
307#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION 0xfff80000 /*[5210 ?]*/ 172#define AR5K_2W_TX_DESC_CTL1_RTS_DURATION 0xfff80000 /*[5210 ?]*/
308} __packed;
309 173
174/* Frame types */
310#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0x00 175#define AR5K_AR5210_TX_DESC_FRAME_TYPE_NORMAL 0x00
311#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 0x04 176#define AR5K_AR5210_TX_DESC_FRAME_TYPE_ATIM 0x04
312#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 0x08 177#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PSPOLL 0x08
@@ -378,7 +243,10 @@ struct ath5k_hw_4w_tx_ctl {
378 */ 243 */
379struct ath5k_hw_tx_status { 244struct ath5k_hw_tx_status {
380 u32 tx_status_0; /* TX status word 0 */ 245 u32 tx_status_0; /* TX status word 0 */
246 u32 tx_status_1; /* TX status word 1 */
247} __packed;
381 248
249/* TX status word 0 fields/flags */
382#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 250#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001
383#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002 251#define AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES 0x00000002
384#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004 252#define AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN 0x00000004
@@ -400,8 +268,7 @@ struct ath5k_hw_tx_status {
400#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000 268#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP 0xffff0000
401#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16 269#define AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP_S 16
402 270
403 u32 tx_status_1; /* TX status word 1 */ 271/* TX status word 1 fields/flags */
404
405#define AR5K_DESC_TX_STATUS1_DONE 0x00000001 272#define AR5K_DESC_TX_STATUS1_DONE 0x00000001
406#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe 273#define AR5K_DESC_TX_STATUS1_SEQ_NUM 0x00001ffe
407#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1 274#define AR5K_DESC_TX_STATUS1_SEQ_NUM_S 1
@@ -411,8 +278,6 @@ struct ath5k_hw_tx_status {
411#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S 21 278#define AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX_S 21
412#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS 0x00800000 279#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS 0x00800000
413#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA 0x01000000 280#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA 0x01000000
414} __packed;
415
416 281
417/* 282/*
418 * 5210/5211 hardware TX descriptor 283 * 5210/5211 hardware TX descriptor
@@ -441,176 +306,27 @@ struct ath5k_hw_all_rx_desc {
441 } u; 306 } u;
442} __packed; 307} __packed;
443 308
444
445/* 309/*
446 * AR5K REGISTER ACCESS 310 * Atheros hardware descriptor
311 * This is read and written to by the hardware
447 */ 312 */
313struct ath5k_desc {
314 u32 ds_link; /* physical address of the next descriptor */
315 u32 ds_data; /* physical address of data buffer (skb) */
448 316
449/*Swap RX/TX Descriptor for big endian archs*/ 317 union {
450#if defined(__BIG_ENDIAN) 318 struct ath5k_hw_5210_tx_desc ds_tx5210;
451#define AR5K_INIT_CFG ( \ 319 struct ath5k_hw_5212_tx_desc ds_tx5212;
452 AR5K_CFG_SWTD | AR5K_CFG_SWRD \ 320 struct ath5k_hw_all_rx_desc ds_rx;
453) 321 } ud;
454#else 322} __packed;
455#define AR5K_INIT_CFG 0x00000000
456#endif
457
458/*#define AR5K_REG_READ(_reg) ath5k_hw_reg_read(ah, _reg)
459
460#define AR5K_REG_WRITE(_reg, _val) ath5k_hw_reg_write(ah, _val, _reg)*/
461
462#define AR5K_REG_SM(_val, _flags) \
463 (((_val) << _flags##_S) & (_flags))
464
465#define AR5K_REG_MS(_val, _flags) \
466 (((_val) & (_flags)) >> _flags##_S)
467
468/* Some registers can hold multiple values of interest. For this
469 * reason when we want to write to these registers we must first
470 * retrieve the values which we do not want to clear (lets call this
471 * old_data) and then set the register with this and our new_value:
472 * ( old_data | new_value) */
473#define AR5K_REG_WRITE_BITS(ah, _reg, _flags, _val) \
474 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & ~(_flags)) | \
475 (((_val) << _flags##_S) & (_flags)), _reg)
476
477#define AR5K_REG_MASKED_BITS(ah, _reg, _flags, _mask) \
478 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, _reg) & \
479 (_mask)) | (_flags), _reg)
480
481#define AR5K_REG_ENABLE_BITS(ah, _reg, _flags) \
482 ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) | (_flags), _reg)
483
484#define AR5K_REG_DISABLE_BITS(ah, _reg, _flags) \
485 ath5k_hw_reg_write(ah, ath5k_hw_reg_read(ah, _reg) & ~(_flags), _reg)
486
487#define AR5K_PHY_WRITE(ah, _reg, _val) \
488 ath5k_hw_reg_write(ah, _val, (ah)->ah_phy + ((_reg) << 2))
489
490#define AR5K_PHY_READ(ah, _reg) \
491 ath5k_hw_reg_read(ah, (ah)->ah_phy + ((_reg) << 2))
492
493#define AR5K_REG_WAIT(_i) do { \
494 if (_i % 64) \
495 udelay(1); \
496} while (0)
497
498#define AR5K_EEPROM_READ(_o, _v) do { \
499 if ((ret = ath5k_hw_eeprom_read(ah, (_o), &(_v))) != 0) \
500 return (ret); \
501} while (0)
502
503#define AR5K_EEPROM_READ_HDR(_o, _v) \
504 AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \
505
506/* Read status of selected queue */
507#define AR5K_REG_READ_Q(ah, _reg, _queue) \
508 (ath5k_hw_reg_read(ah, _reg) & (1 << _queue)) \
509
510#define AR5K_REG_WRITE_Q(ah, _reg, _queue) \
511 ath5k_hw_reg_write(ah, (1 << _queue), _reg)
512
513#define AR5K_Q_ENABLE_BITS(_reg, _queue) do { \
514 _reg |= 1 << _queue; \
515} while (0)
516
517#define AR5K_Q_DISABLE_BITS(_reg, _queue) do { \
518 _reg &= ~(1 << _queue); \
519} while (0)
520
521#define AR5K_LOW_ID(_a)( \
522(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24 \
523)
524
525#define AR5K_HIGH_ID(_a) ((_a)[4] | (_a)[5] << 8)
526
527/*
528 * Initial register values
529 */
530
531/*
532 * Common initial register values
533 */
534#define AR5K_INIT_MODE CHANNEL_B
535
536#define AR5K_INIT_TX_LATENCY 502
537#define AR5K_INIT_USEC 39
538#define AR5K_INIT_USEC_TURBO 79
539#define AR5K_INIT_USEC_32 31
540#define AR5K_INIT_CARR_SENSE_EN 1
541#define AR5K_INIT_PROG_IFS 920
542#define AR5K_INIT_PROG_IFS_TURBO 960
543#define AR5K_INIT_EIFS 3440
544#define AR5K_INIT_EIFS_TURBO 6880
545#define AR5K_INIT_SLOT_TIME 396
546#define AR5K_INIT_SLOT_TIME_TURBO 480
547#define AR5K_INIT_ACK_CTS_TIMEOUT 1024
548#define AR5K_INIT_ACK_CTS_TIMEOUT_TURBO 0x08000800
549#define AR5K_INIT_SIFS 560
550#define AR5K_INIT_SIFS_TURBO 480
551#define AR5K_INIT_SH_RETRY 10
552#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY
553#define AR5K_INIT_SSH_RETRY 32
554#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
555#define AR5K_INIT_TX_RETRY 10
556#define AR5K_INIT_TOPS 8
557#define AR5K_INIT_RXNOFRM 8
558#define AR5K_INIT_RPGTO 0
559#define AR5K_INIT_TXNOFRM 0
560#define AR5K_INIT_BEACON_PERIOD 65535
561#define AR5K_INIT_TIM_OFFSET 0
562#define AR5K_INIT_BEACON_EN 0
563#define AR5K_INIT_RESET_TSF 0
564
565#define AR5K_INIT_TRANSMIT_LATENCY ( \
566 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
567 (AR5K_INIT_USEC) \
568)
569#define AR5K_INIT_TRANSMIT_LATENCY_TURBO ( \
570 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \
571 (AR5K_INIT_USEC_TURBO) \
572)
573#define AR5K_INIT_PROTO_TIME_CNTRL ( \
574 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS << 12) | \
575 (AR5K_INIT_PROG_IFS) \
576)
577#define AR5K_INIT_PROTO_TIME_CNTRL_TURBO ( \
578 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS_TURBO << 12) | \
579 (AR5K_INIT_PROG_IFS_TURBO) \
580)
581#define AR5K_INIT_BEACON_CONTROL ( \
582 (AR5K_INIT_RESET_TSF << 24) | (AR5K_INIT_BEACON_EN << 23) | \
583 (AR5K_INIT_TIM_OFFSET << 16) | (AR5K_INIT_BEACON_PERIOD) \
584)
585
586/*
587 * Non-common initial register values which have to be loaded into the
588 * card at boot time and after each reset.
589 */
590
591/* Register dumps are done per operation mode */
592#define AR5K_INI_RFGAIN_5GHZ 0
593#define AR5K_INI_RFGAIN_2GHZ 1
594
595#define AR5K_INI_VAL_11A 0
596#define AR5K_INI_VAL_11A_TURBO 1
597#define AR5K_INI_VAL_11B 2
598#define AR5K_INI_VAL_11G 3
599#define AR5K_INI_VAL_11G_TURBO 4
600#define AR5K_INI_VAL_XR 0
601#define AR5K_INI_VAL_MAX 5
602
603#define AR5K_RF5111_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
604#define AR5K_RF5112_INI_RF_MAX_BANKS AR5K_MAX_RF_BANKS
605 323
606static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits) 324#define AR5K_RXDESC_INTREQ 0x0020
607{
608 u32 retval = 0, bit, i;
609 325
610 for (i = 0; i < bits; i++) { 326#define AR5K_TXDESC_CLRDMASK 0x0001
611 bit = (val >> i) & 1; 327#define AR5K_TXDESC_NOACK 0x0002 /*[5211+]*/
612 retval = (retval << 1) | bit; 328#define AR5K_TXDESC_RTSENA 0x0004
613 } 329#define AR5K_TXDESC_CTSENA 0x0008
330#define AR5K_TXDESC_INTREQ 0x0010
331#define AR5K_TXDESC_VEOL 0x0020 /*[5211+]*/
614 332
615 return retval;
616}
diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath5k/dma.c
new file mode 100644
index 000000000000..7adceb2c7fab
--- /dev/null
+++ b/drivers/net/wireless/ath5k/dma.c
@@ -0,0 +1,605 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*************************************\
20* DMA and interrupt masking functions *
21\*************************************/
22
23/*
24 * dma.c - DMA and interrupt masking functions
25 *
26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
27 * handle queue setup for 5210 chipset (rest are handled on qcu.c).
28 * Also we setup interrupt mask register (IMR) and read the various iterrupt
29 * status registers (ISR).
30 *
31 * TODO: Handle SISR on 5211+ and introduce a function to return the queue
32 * number that resulted the interrupt.
33 */
34
35#include "ath5k.h"
36#include "reg.h"
37#include "debug.h"
38#include "base.h"
39
40/*********\
41* Receive *
42\*********/
43
44/**
45 * ath5k_hw_start_rx_dma - Start DMA receive
46 *
47 * @ah: The &struct ath5k_hw
48 */
49void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
50{
51 ATH5K_TRACE(ah->ah_sc);
52 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
53 ath5k_hw_reg_read(ah, AR5K_CR);
54}
55
56/**
57 * ath5k_hw_stop_rx_dma - Stop DMA receive
58 *
59 * @ah: The &struct ath5k_hw
60 */
61int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
62{
63 unsigned int i;
64
65 ATH5K_TRACE(ah->ah_sc);
66 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
67
68 /*
69 * It may take some time to disable the DMA receive unit
70 */
71 for (i = 1000; i > 0 &&
72 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
73 i--)
74 udelay(10);
75
76 return i ? 0 : -EBUSY;
77}
78
79/**
80 * ath5k_hw_get_rxdp - Get RX Descriptor's address
81 *
82 * @ah: The &struct ath5k_hw
83 *
84 * XXX: Is RXDP read and clear ?
85 */
86u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
87{
88 return ath5k_hw_reg_read(ah, AR5K_RXDP);
89}
90
91/**
92 * ath5k_hw_set_rxdp - Set RX Descriptor's address
93 *
94 * @ah: The &struct ath5k_hw
95 * @phys_addr: RX descriptor address
96 *
97 * XXX: Should we check if rx is enabled before setting rxdp ?
98 */
99void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
100{
101 ATH5K_TRACE(ah->ah_sc);
102
103 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
104}
105
106
107/**********\
108* Transmit *
109\**********/
110
111/**
112 * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
113 *
114 * @ah: The &struct ath5k_hw
115 * @queue: The hw queue number
116 *
117 * Start DMA transmit for a specific queue and since 5210 doesn't have
118 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
119 * queue for normal data and one queue for beacons). For queue setup
120 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
121 * of range or if queue is already disabled.
122 *
123 * NOTE: Must be called after setting up tx control descriptor for that
124 * queue (see below).
125 */
126int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
127{
128 u32 tx_queue;
129
130 ATH5K_TRACE(ah->ah_sc);
131 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
132
133 /* Return if queue is declared inactive */
134 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
135 return -EIO;
136
137 if (ah->ah_version == AR5K_AR5210) {
138 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
139
140 /*
141 * Set the queue by type on 5210
142 */
143 switch (ah->ah_txq[queue].tqi_type) {
144 case AR5K_TX_QUEUE_DATA:
145 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
146 break;
147 case AR5K_TX_QUEUE_BEACON:
148 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
149 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
150 AR5K_BSR);
151 break;
152 case AR5K_TX_QUEUE_CAB:
153 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
154 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
155 AR5K_BCR_BDMAE, AR5K_BSR);
156 break;
157 default:
158 return -EINVAL;
159 }
160 /* Start queue */
161 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
162 ath5k_hw_reg_read(ah, AR5K_CR);
163 } else {
164 /* Return if queue is disabled */
165 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
166 return -EIO;
167
168 /* Start queue */
169 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
170 }
171
172 return 0;
173}
174
175/**
176 * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
177 *
178 * @ah: The &struct ath5k_hw
179 * @queue: The hw queue number
180 *
181 * Stop DMA transmit on a specific hw queue and drain queue so we don't
182 * have any pending frames. Returns -EBUSY if we still have pending frames,
183 * -EINVAL if queue number is out of range.
184 *
185 */
186int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
187{
188 unsigned int i = 40;
189 u32 tx_queue, pending;
190
191 ATH5K_TRACE(ah->ah_sc);
192 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
193
194 /* Return if queue is declared inactive */
195 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
196 return -EIO;
197
198 if (ah->ah_version == AR5K_AR5210) {
199 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
200
201 /*
202 * Set by queue type
203 */
204 switch (ah->ah_txq[queue].tqi_type) {
205 case AR5K_TX_QUEUE_DATA:
206 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
207 break;
208 case AR5K_TX_QUEUE_BEACON:
209 case AR5K_TX_QUEUE_CAB:
210 /* XXX Fix me... */
211 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
212 ath5k_hw_reg_write(ah, 0, AR5K_BSR);
213 break;
214 default:
215 return -EINVAL;
216 }
217
218 /* Stop queue */
219 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
220 ath5k_hw_reg_read(ah, AR5K_CR);
221 } else {
222 /*
223 * Schedule TX disable and wait until queue is empty
224 */
225 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
226
227 /*Check for pending frames*/
228 do {
229 pending = ath5k_hw_reg_read(ah,
230 AR5K_QUEUE_STATUS(queue)) &
231 AR5K_QCU_STS_FRMPENDCNT;
232 udelay(100);
233 } while (--i && pending);
234
235 /* For 2413+ order PCU to drop packets using
236 * QUIET mechanism */
237 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
238 pending){
239 /* Set periodicity and duration */
240 ath5k_hw_reg_write(ah,
241 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
242 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
243 AR5K_QUIET_CTL2);
244
245 /* Enable quiet period for current TSF */
246 ath5k_hw_reg_write(ah,
247 AR5K_QUIET_CTL1_QT_EN |
248 AR5K_REG_SM(ath5k_hw_reg_read(ah,
249 AR5K_TSF_L32_5211) >> 10,
250 AR5K_QUIET_CTL1_NEXT_QT_TSF),
251 AR5K_QUIET_CTL1);
252
253 /* Force channel idle high */
254 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
255 AR5K_DIAG_SW_CHANEL_IDLE_HIGH);
256
257 /* Wait a while and disable mechanism */
258 udelay(200);
259 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
260 AR5K_QUIET_CTL1_QT_EN);
261
262 /* Re-check for pending frames */
263 i = 40;
264 do {
265 pending = ath5k_hw_reg_read(ah,
266 AR5K_QUEUE_STATUS(queue)) &
267 AR5K_QCU_STS_FRMPENDCNT;
268 udelay(100);
269 } while (--i && pending);
270
271 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
272 AR5K_DIAG_SW_CHANEL_IDLE_HIGH);
273 }
274
275 /* Clear register */
276 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
277 if (pending)
278 return -EBUSY;
279 }
280
281 /* TODO: Check for success on 5210 else return error */
282 return 0;
283}
284
285/**
286 * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
287 *
288 * @ah: The &struct ath5k_hw
289 * @queue: The hw queue number
290 *
291 * Get TX descriptor's address for a specific queue. For 5210 we ignore
292 * the queue number and use tx queue type since we only have 2 queues.
293 * We use TXDP0 for normal data queue and TXDP1 for beacon queue.
294 * For newer chips with QCU/DCU we just read the corresponding TXDP register.
295 *
296 * XXX: Is TXDP read and clear ?
297 */
298u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
299{
300 u16 tx_reg;
301
302 ATH5K_TRACE(ah->ah_sc);
303 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
304
305 /*
306 * Get the transmit queue descriptor pointer from the selected queue
307 */
308 /*5210 doesn't have QCU*/
309 if (ah->ah_version == AR5K_AR5210) {
310 switch (ah->ah_txq[queue].tqi_type) {
311 case AR5K_TX_QUEUE_DATA:
312 tx_reg = AR5K_NOQCU_TXDP0;
313 break;
314 case AR5K_TX_QUEUE_BEACON:
315 case AR5K_TX_QUEUE_CAB:
316 tx_reg = AR5K_NOQCU_TXDP1;
317 break;
318 default:
319 return 0xffffffff;
320 }
321 } else {
322 tx_reg = AR5K_QUEUE_TXDP(queue);
323 }
324
325 return ath5k_hw_reg_read(ah, tx_reg);
326}
327
328/**
329 * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
330 *
331 * @ah: The &struct ath5k_hw
332 * @queue: The hw queue number
333 *
334 * Set TX descriptor's address for a specific queue. For 5210 we ignore
335 * the queue number and we use tx queue type since we only have 2 queues
336 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
337 * For newer chips with QCU/DCU we just set the corresponding TXDP register.
338 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
339 * active.
340 */
341int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
342{
343 u16 tx_reg;
344
345 ATH5K_TRACE(ah->ah_sc);
346 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
347
348 /*
349 * Set the transmit queue descriptor pointer register by type
350 * on 5210
351 */
352 if (ah->ah_version == AR5K_AR5210) {
353 switch (ah->ah_txq[queue].tqi_type) {
354 case AR5K_TX_QUEUE_DATA:
355 tx_reg = AR5K_NOQCU_TXDP0;
356 break;
357 case AR5K_TX_QUEUE_BEACON:
358 case AR5K_TX_QUEUE_CAB:
359 tx_reg = AR5K_NOQCU_TXDP1;
360 break;
361 default:
362 return -EINVAL;
363 }
364 } else {
365 /*
366 * Set the transmit queue descriptor pointer for
367 * the selected queue on QCU for 5211+
368 * (this won't work if the queue is still active)
369 */
370 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
371 return -EIO;
372
373 tx_reg = AR5K_QUEUE_TXDP(queue);
374 }
375
376 /* Set descriptor pointer */
377 ath5k_hw_reg_write(ah, phys_addr, tx_reg);
378
379 return 0;
380}
381
382/**
383 * ath5k_hw_update_tx_triglevel - Update tx trigger level
384 *
385 * @ah: The &struct ath5k_hw
386 * @increase: Flag to force increase of trigger level
387 *
388 * This function increases/decreases the tx trigger level for the tx fifo
389 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes
390 * the buffer and transmits it's data. Lowering this results sending small
391 * frames more quickly but can lead to tx underruns, raising it a lot can
392 * result other problems (i think bmiss is related). Right now we start with
393 * the lowest possible (64Bytes) and if we get tx underrun we increase it using
394 * the increase flag. Returns -EIO if we have have reached maximum/minimum.
395 *
396 * XXX: Link this with tx DMA size ?
397 * XXX: Use it to save interrupts ?
398 * TODO: Needs testing, i think it's related to bmiss...
399 */
400int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
401{
402 u32 trigger_level, imr;
403 int ret = -EIO;
404
405 ATH5K_TRACE(ah->ah_sc);
406
407 /*
408 * Disable interrupts by setting the mask
409 */
410 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
411
412 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
413 AR5K_TXCFG_TXFULL);
414
415 if (!increase) {
416 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
417 goto done;
418 } else
419 trigger_level +=
420 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
421
422 /*
423 * Update trigger level on success
424 */
425 if (ah->ah_version == AR5K_AR5210)
426 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
427 else
428 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
429 AR5K_TXCFG_TXFULL, trigger_level);
430
431 ret = 0;
432
433done:
434 /*
435 * Restore interrupt mask
436 */
437 ath5k_hw_set_imr(ah, imr);
438
439 return ret;
440}
441
442/*******************\
443* Interrupt masking *
444\*******************/
445
446/**
447 * ath5k_hw_is_intr_pending - Check if we have pending interrupts
448 *
449 * @ah: The &struct ath5k_hw
450 *
451 * Check if we have pending interrupts to process. Returns 1 if we
452 * have pending interrupts and 0 if we haven't.
453 */
454bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
455{
456 ATH5K_TRACE(ah->ah_sc);
457 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
458}
459
460/**
461 * ath5k_hw_get_isr - Get interrupt status
462 *
463 * @ah: The @struct ath5k_hw
464 * @interrupt_mask: Driver's interrupt mask used to filter out
465 * interrupts in sw.
466 *
467 * This function is used inside our interrupt handler to determine the reason
468 * for the interrupt by reading Primary Interrupt Status Register. Returns an
469 * abstract interrupt status mask which is mostly ISR with some uncommon bits
470 * being mapped on some standard non hw-specific positions
471 * (check out &ath5k_int).
472 *
473 * NOTE: We use read-and-clear register, so after this function is called ISR
474 * is zeroed.
475 *
476 * XXX: Why filter interrupts in sw with interrupt_mask ? No benefit at all
477 * plus it can be misleading (one might thing that we save interrupts this way)
478 */
479int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
480{
481 u32 data;
482
483 ATH5K_TRACE(ah->ah_sc);
484
485 /*
486 * Read interrupt status from the Interrupt Status register
487 * on 5210
488 */
489 if (ah->ah_version == AR5K_AR5210) {
490 data = ath5k_hw_reg_read(ah, AR5K_ISR);
491 if (unlikely(data == AR5K_INT_NOCARD)) {
492 *interrupt_mask = data;
493 return -ENODEV;
494 }
495 } else {
496 /*
497 * Read interrupt status from the Read-And-Clear
498 * shadow register.
499 * Note: PISR/SISR Not available on 5210
500 */
501 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
502 }
503
504 /*
505 * Get abstract interrupt mask (driver-compatible)
506 */
507 *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
508
509 if (unlikely(data == AR5K_INT_NOCARD))
510 return -ENODEV;
511
512 if (data & (AR5K_ISR_RXOK | AR5K_ISR_RXERR))
513 *interrupt_mask |= AR5K_INT_RX;
514
515 if (data & (AR5K_ISR_TXOK | AR5K_ISR_TXERR
516 | AR5K_ISR_TXDESC | AR5K_ISR_TXEOL))
517 *interrupt_mask |= AR5K_INT_TX;
518
519 if (ah->ah_version != AR5K_AR5210) {
520 /*HIU = Host Interface Unit (PCI etc)*/
521 if (unlikely(data & (AR5K_ISR_HIUERR)))
522 *interrupt_mask |= AR5K_INT_FATAL;
523
524 /*Beacon Not Ready*/
525 if (unlikely(data & (AR5K_ISR_BNR)))
526 *interrupt_mask |= AR5K_INT_BNR;
527 }
528
529 /*
530 * XXX: BMISS interrupts may occur after association.
531 * I found this on 5210 code but it needs testing. If this is
532 * true we should disable them before assoc and re-enable them
533 * after a successfull assoc + some jiffies.
534 */
535#if 0
536 interrupt_mask &= ~AR5K_INT_BMISS;
537#endif
538
539 /*
540 * In case we didn't handle anything,
541 * print the register value.
542 */
543 if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
544 ATH5K_PRINTF("0x%08x\n", data);
545
546 return 0;
547}
548
549/**
550 * ath5k_hw_set_imr - Set interrupt mask
551 *
552 * @ah: The &struct ath5k_hw
553 * @new_mask: The new interrupt mask to be set
554 *
555 * Set the interrupt mask in hw to save interrupts. We do that by mapping
556 * ath5k_int bits to hw-specific bits to remove abstraction and writing
557 * Interrupt Mask Register.
558 */
559enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
560{
561 enum ath5k_int old_mask, int_mask;
562
563 /*
564 * Disable card interrupts to prevent any race conditions
565 * (they will be re-enabled afterwards).
566 */
567 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
568 ath5k_hw_reg_read(ah, AR5K_IER);
569
570 old_mask = ah->ah_imr;
571
572 /*
573 * Add additional, chipset-dependent interrupt mask flags
574 * and write them to the IMR (interrupt mask register).
575 */
576 int_mask = new_mask & AR5K_INT_COMMON;
577
578 if (new_mask & AR5K_INT_RX)
579 int_mask |= AR5K_IMR_RXOK | AR5K_IMR_RXERR | AR5K_IMR_RXORN |
580 AR5K_IMR_RXDESC;
581
582 if (new_mask & AR5K_INT_TX)
583 int_mask |= AR5K_IMR_TXOK | AR5K_IMR_TXERR | AR5K_IMR_TXDESC |
584 AR5K_IMR_TXURN;
585
586 if (ah->ah_version != AR5K_AR5210) {
587 if (new_mask & AR5K_INT_FATAL) {
588 int_mask |= AR5K_IMR_HIUERR;
589 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_MCABT |
590 AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR);
591 }
592 }
593
594 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
595
596 /* Store new interrupt mask */
597 ah->ah_imr = new_mask;
598
599 /* ..re-enable interrupts */
600 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
601 ath5k_hw_reg_read(ah, AR5K_IER);
602
603 return old_mask;
604}
605
diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath5k/eeprom.c
new file mode 100644
index 000000000000..a883839b6a9f
--- /dev/null
+++ b/drivers/net/wireless/ath5k/eeprom.c
@@ -0,0 +1,466 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*************************************\
20* EEPROM access functions and helpers *
21\*************************************/
22
23#include "ath5k.h"
24#include "reg.h"
25#include "debug.h"
26#include "base.h"
27
28/*
29 * Read from eeprom
30 */
31static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
32{
33 u32 status, timeout;
34
35 ATH5K_TRACE(ah->ah_sc);
36 /*
37 * Initialize EEPROM access
38 */
39 if (ah->ah_version == AR5K_AR5210) {
40 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
41 (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
42 } else {
43 ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
44 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
45 AR5K_EEPROM_CMD_READ);
46 }
47
48 for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
49 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
50 if (status & AR5K_EEPROM_STAT_RDDONE) {
51 if (status & AR5K_EEPROM_STAT_RDERR)
52 return -EIO;
53 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
54 0xffff);
55 return 0;
56 }
57 udelay(15);
58 }
59
60 return -ETIMEDOUT;
61}
62
63/*
64 * Translate binary channel representation in EEPROM to frequency
65 */
66static u16 ath5k_eeprom_bin2freq(struct ath5k_hw *ah, u16 bin,
67 unsigned int mode)
68{
69 u16 val;
70
71 if (bin == AR5K_EEPROM_CHANNEL_DIS)
72 return bin;
73
74 if (mode == AR5K_EEPROM_MODE_11A) {
75 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
76 val = (5 * bin) + 4800;
77 else
78 val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 :
79 (bin * 10) + 5100;
80 } else {
81 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
82 val = bin + 2300;
83 else
84 val = bin + 2400;
85 }
86
87 return val;
88}
89
90/*
91 * Read antenna infos from eeprom
92 */
93static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
94 unsigned int mode)
95{
96 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
97 u32 o = *offset;
98 u16 val;
99 int ret, i = 0;
100
101 AR5K_EEPROM_READ(o++, val);
102 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
103 ee->ee_ant_tx_rx[mode] = (val >> 2) & 0x3f;
104 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
105
106 AR5K_EEPROM_READ(o++, val);
107 ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
108 ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
109 ee->ee_ant_control[mode][i++] = val & 0x3f;
110
111 AR5K_EEPROM_READ(o++, val);
112 ee->ee_ant_control[mode][i++] = (val >> 10) & 0x3f;
113 ee->ee_ant_control[mode][i++] = (val >> 4) & 0x3f;
114 ee->ee_ant_control[mode][i] = (val << 2) & 0x3f;
115
116 AR5K_EEPROM_READ(o++, val);
117 ee->ee_ant_control[mode][i++] |= (val >> 14) & 0x3;
118 ee->ee_ant_control[mode][i++] = (val >> 8) & 0x3f;
119 ee->ee_ant_control[mode][i++] = (val >> 2) & 0x3f;
120 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
121
122 AR5K_EEPROM_READ(o++, val);
123 ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
124 ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
125 ee->ee_ant_control[mode][i++] = val & 0x3f;
126
127 /* Get antenna modes */
128 ah->ah_antenna[mode][0] =
129 (ee->ee_ant_control[mode][0] << 4) | 0x1;
130 ah->ah_antenna[mode][AR5K_ANT_FIXED_A] =
131 ee->ee_ant_control[mode][1] |
132 (ee->ee_ant_control[mode][2] << 6) |
133 (ee->ee_ant_control[mode][3] << 12) |
134 (ee->ee_ant_control[mode][4] << 18) |
135 (ee->ee_ant_control[mode][5] << 24);
136 ah->ah_antenna[mode][AR5K_ANT_FIXED_B] =
137 ee->ee_ant_control[mode][6] |
138 (ee->ee_ant_control[mode][7] << 6) |
139 (ee->ee_ant_control[mode][8] << 12) |
140 (ee->ee_ant_control[mode][9] << 18) |
141 (ee->ee_ant_control[mode][10] << 24);
142
143 /* return new offset */
144 *offset = o;
145
146 return 0;
147}
148
149/*
150 * Read supported modes from eeprom
151 */
152static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
153 unsigned int mode)
154{
155 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
156 u32 o = *offset;
157 u16 val;
158 int ret;
159
160 AR5K_EEPROM_READ(o++, val);
161 ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff;
162 ee->ee_thr_62[mode] = val & 0xff;
163
164 if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
165 ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28;
166
167 AR5K_EEPROM_READ(o++, val);
168 ee->ee_tx_end2xpa_disable[mode] = (val >> 8) & 0xff;
169 ee->ee_tx_frm2xpa_enable[mode] = val & 0xff;
170
171 AR5K_EEPROM_READ(o++, val);
172 ee->ee_pga_desired_size[mode] = (val >> 8) & 0xff;
173
174 if ((val & 0xff) & 0x80)
175 ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1);
176 else
177 ee->ee_noise_floor_thr[mode] = val & 0xff;
178
179 if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
180 ee->ee_noise_floor_thr[mode] =
181 mode == AR5K_EEPROM_MODE_11A ? -54 : -1;
182
183 AR5K_EEPROM_READ(o++, val);
184 ee->ee_xlna_gain[mode] = (val >> 5) & 0xff;
185 ee->ee_x_gain[mode] = (val >> 1) & 0xf;
186 ee->ee_xpd[mode] = val & 0x1;
187
188 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
189 ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
190
191 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
192 AR5K_EEPROM_READ(o++, val);
193 ee->ee_false_detect[mode] = (val >> 6) & 0x7f;
194
195 if (mode == AR5K_EEPROM_MODE_11A)
196 ee->ee_xr_power[mode] = val & 0x3f;
197 else {
198 ee->ee_ob[mode][0] = val & 0x7;
199 ee->ee_db[mode][0] = (val >> 3) & 0x7;
200 }
201 }
202
203 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) {
204 ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN;
205 ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA;
206 } else {
207 ee->ee_i_gain[mode] = (val >> 13) & 0x7;
208
209 AR5K_EEPROM_READ(o++, val);
210 ee->ee_i_gain[mode] |= (val << 3) & 0x38;
211
212 if (mode == AR5K_EEPROM_MODE_11G)
213 ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff;
214 }
215
216 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
217 mode == AR5K_EEPROM_MODE_11A) {
218 ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
219 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
220 }
221
222 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6 &&
223 mode == AR5K_EEPROM_MODE_11G)
224 ee->ee_scaled_cck_delta = (val >> 11) & 0x1f;
225
226 /* return new offset */
227 *offset = o;
228
229 return 0;
230}
231
232/*
233 * Initialize eeprom & capabilities structs
234 */
235int ath5k_eeprom_init(struct ath5k_hw *ah)
236{
237 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
238 unsigned int mode, i;
239 int ret;
240 u32 offset;
241 u16 val;
242
243 /* Initial TX thermal adjustment values */
244 ee->ee_tx_clip = 4;
245 ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
246 ee->ee_gain_select = 1;
247
248 /*
249 * Read values from EEPROM and store them in the capability structure
250 */
251 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
252 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
253 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
254 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
255 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
256
257 /* Return if we have an old EEPROM */
258 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
259 return 0;
260
261#ifdef notyet
262 /*
263 * Validate the checksum of the EEPROM date. There are some
264 * devices with invalid EEPROMs.
265 */
266 for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
267 AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
268 cksum ^= val;
269 }
270 if (cksum != AR5K_EEPROM_INFO_CKSUM) {
271 ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
272 return -EIO;
273 }
274#endif
275
276 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
277 ee_ant_gain);
278
279 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
280 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
281 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
282 }
283
284 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
285 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val);
286 ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7;
287 ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
288
289 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val);
290 ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7;
291 ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
292 }
293
294 /*
295 * Get conformance test limit values
296 */
297 offset = AR5K_EEPROM_CTL(ah->ah_ee_version);
298 ee->ee_ctls = AR5K_EEPROM_N_CTLS(ah->ah_ee_version);
299
300 for (i = 0; i < ee->ee_ctls; i++) {
301 AR5K_EEPROM_READ(offset++, val);
302 ee->ee_ctl[i] = (val >> 8) & 0xff;
303 ee->ee_ctl[i + 1] = val & 0xff;
304 }
305
306 /*
307 * Get values for 802.11a (5GHz)
308 */
309 mode = AR5K_EEPROM_MODE_11A;
310
311 ee->ee_turbo_max_power[mode] =
312 AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header);
313
314 offset = AR5K_EEPROM_MODES_11A(ah->ah_ee_version);
315
316 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
317 if (ret)
318 return ret;
319
320 AR5K_EEPROM_READ(offset++, val);
321 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
322 ee->ee_ob[mode][3] = (val >> 5) & 0x7;
323 ee->ee_db[mode][3] = (val >> 2) & 0x7;
324 ee->ee_ob[mode][2] = (val << 1) & 0x7;
325
326 AR5K_EEPROM_READ(offset++, val);
327 ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
328 ee->ee_db[mode][2] = (val >> 12) & 0x7;
329 ee->ee_ob[mode][1] = (val >> 9) & 0x7;
330 ee->ee_db[mode][1] = (val >> 6) & 0x7;
331 ee->ee_ob[mode][0] = (val >> 3) & 0x7;
332 ee->ee_db[mode][0] = val & 0x7;
333
334 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
335 if (ret)
336 return ret;
337
338 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) {
339 AR5K_EEPROM_READ(offset++, val);
340 ee->ee_margin_tx_rx[mode] = val & 0x3f;
341 }
342
343 /*
344 * Get values for 802.11b (2.4GHz)
345 */
346 mode = AR5K_EEPROM_MODE_11B;
347 offset = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
348
349 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
350 if (ret)
351 return ret;
352
353 AR5K_EEPROM_READ(offset++, val);
354 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
355 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
356 ee->ee_db[mode][1] = val & 0x7;
357
358 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
359 if (ret)
360 return ret;
361
362 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
363 AR5K_EEPROM_READ(offset++, val);
364 ee->ee_cal_pier[mode][0] =
365 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
366 ee->ee_cal_pier[mode][1] =
367 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
368
369 AR5K_EEPROM_READ(offset++, val);
370 ee->ee_cal_pier[mode][2] =
371 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
372 }
373
374 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
375 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
376
377 /*
378 * Get values for 802.11g (2.4GHz)
379 */
380 mode = AR5K_EEPROM_MODE_11G;
381 offset = AR5K_EEPROM_MODES_11G(ah->ah_ee_version);
382
383 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
384 if (ret)
385 return ret;
386
387 AR5K_EEPROM_READ(offset++, val);
388 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
389 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
390 ee->ee_db[mode][1] = val & 0x7;
391
392 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
393 if (ret)
394 return ret;
395
396 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
397 AR5K_EEPROM_READ(offset++, val);
398 ee->ee_cal_pier[mode][0] =
399 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
400 ee->ee_cal_pier[mode][1] =
401 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
402
403 AR5K_EEPROM_READ(offset++, val);
404 ee->ee_turbo_max_power[mode] = val & 0x7f;
405 ee->ee_xr_power[mode] = (val >> 7) & 0x3f;
406
407 AR5K_EEPROM_READ(offset++, val);
408 ee->ee_cal_pier[mode][2] =
409 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
410
411 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
412 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
413
414 AR5K_EEPROM_READ(offset++, val);
415 ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
416 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
417
418 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
419 AR5K_EEPROM_READ(offset++, val);
420 ee->ee_cck_ofdm_gain_delta = val & 0xff;
421 }
422 }
423
424 /*
425 * Read 5GHz EEPROM channels
426 */
427
428 return 0;
429}
430
431/*
432 * Read the MAC address from eeprom
433 */
434int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
435{
436 u8 mac_d[ETH_ALEN];
437 u32 total, offset;
438 u16 data;
439 int octet, ret;
440
441 memset(mac, 0, ETH_ALEN);
442 memset(mac_d, 0, ETH_ALEN);
443
444 ret = ath5k_hw_eeprom_read(ah, 0x20, &data);
445 if (ret)
446 return ret;
447
448 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
449 ret = ath5k_hw_eeprom_read(ah, offset, &data);
450 if (ret)
451 return ret;
452
453 total += data;
454 mac_d[octet + 1] = data & 0xff;
455 mac_d[octet] = data >> 8;
456 octet += 2;
457 }
458
459 memcpy(mac, mac_d, ETH_ALEN);
460
461 if (!total || total == 3 * 0xffff)
462 return -EINVAL;
463
464 return 0;
465}
466
diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath5k/eeprom.h
new file mode 100644
index 000000000000..a468ecfbb18a
--- /dev/null
+++ b/drivers/net/wireless/ath5k/eeprom.h
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/*
20 * Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE)
21 */
22#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
23#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
24#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
25#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
26#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
27
28#define AR5K_EEPROM_PROTECT 0x003f /* EEPROM protect status */
29#define AR5K_EEPROM_PROTECT_RD_0_31 0x0001 /* Read protection bit for offsets 0x0 - 0x1f */
30#define AR5K_EEPROM_PROTECT_WR_0_31 0x0002 /* Write protection bit for offsets 0x0 - 0x1f */
31#define AR5K_EEPROM_PROTECT_RD_32_63 0x0004 /* 0x20 - 0x3f */
32#define AR5K_EEPROM_PROTECT_WR_32_63 0x0008
33#define AR5K_EEPROM_PROTECT_RD_64_127 0x0010 /* 0x40 - 0x7f */
34#define AR5K_EEPROM_PROTECT_WR_64_127 0x0020
35#define AR5K_EEPROM_PROTECT_RD_128_191 0x0040 /* 0x80 - 0xbf (regdom) */
36#define AR5K_EEPROM_PROTECT_WR_128_191 0x0080
37#define AR5K_EEPROM_PROTECT_RD_192_207 0x0100 /* 0xc0 - 0xcf */
38#define AR5K_EEPROM_PROTECT_WR_192_207 0x0200
39#define AR5K_EEPROM_PROTECT_RD_208_223 0x0400 /* 0xd0 - 0xdf */
40#define AR5K_EEPROM_PROTECT_WR_208_223 0x0800
41#define AR5K_EEPROM_PROTECT_RD_224_239 0x1000 /* 0xe0 - 0xef */
42#define AR5K_EEPROM_PROTECT_WR_224_239 0x2000
43#define AR5K_EEPROM_PROTECT_RD_240_255 0x4000 /* 0xf0 - 0xff */
44#define AR5K_EEPROM_PROTECT_WR_240_255 0x8000
45#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
46#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
47#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
48#define AR5K_EEPROM_INFO_CKSUM 0xffff
49#define AR5K_EEPROM_INFO(_n) (AR5K_EEPROM_INFO_BASE + (_n))
50
51#define AR5K_EEPROM_VERSION AR5K_EEPROM_INFO(1) /* EEPROM Version */
52#define AR5K_EEPROM_VERSION_3_0 0x3000 /* No idea what's going on before this version */
53#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2Ghz (ar5211_rfregs) */
54#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */
55#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
56#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain ee_cck_ofdm_power_delta (eeprom_read_modes) */
57#define AR5K_EEPROM_VERSION_4_0 0x4000 /* has ee_misc*, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */
58#define AR5K_EEPROM_VERSION_4_1 0x4001 /* has ee_margin_tx_rx (eeprom_init) */
59#define AR5K_EEPROM_VERSION_4_2 0x4002 /* has ee_cck_ofdm_gain_delta (eeprom_init) */
60#define AR5K_EEPROM_VERSION_4_3 0x4003
61#define AR5K_EEPROM_VERSION_4_4 0x4004
62#define AR5K_EEPROM_VERSION_4_5 0x4005
63#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
64#define AR5K_EEPROM_VERSION_4_7 0x4007
65
66#define AR5K_EEPROM_MODE_11A 0
67#define AR5K_EEPROM_MODE_11B 1
68#define AR5K_EEPROM_MODE_11G 2
69
70#define AR5K_EEPROM_HDR AR5K_EEPROM_INFO(2) /* Header that contains the device caps */
71#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
72#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
73#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
74#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */
75#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */
76#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7)
77#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz (?) */
78#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
79
80#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c
81#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2
82#define AR5K_EEPROM_RFKILL_POLARITY 0x00000002
83#define AR5K_EEPROM_RFKILL_POLARITY_S 1
84
85/* Newer EEPROMs are using a different offset */
86#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
87 (((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
88
89#define AR5K_EEPROM_ANT_GAIN(_v) AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3)
90#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v) ((int8_t)(((_v) >> 8) & 0xff))
91#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v) ((int8_t)((_v) & 0xff))
92
93/* calibration settings */
94#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
95#define AR5K_EEPROM_MODES_11B(_v) AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2)
96#define AR5K_EEPROM_MODES_11G(_v) AR5K_EEPROM_OFF(_v, 0x00da, 0x010d)
97#define AR5K_EEPROM_CTL(_v) AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128) /* Conformance test limits */
98
99/* [3.1 - 3.3] */
100#define AR5K_EEPROM_OBDB0_2GHZ 0x00ec
101#define AR5K_EEPROM_OBDB1_2GHZ 0x00ed
102
103/* Misc values available since EEPROM 4.0 */
104#define AR5K_EEPROM_MISC0 0x00c4
105#define AR5K_EEPROM_EARSTART(_v) ((_v) & 0xfff)
106#define AR5K_EEPROM_EEMAP(_v) (((_v) >> 14) & 0x3)
107#define AR5K_EEPROM_MISC1 0x00c5
108#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
109#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1)
110
111
112/* Some EEPROM defines */
113#define AR5K_EEPROM_EEP_SCALE 100
114#define AR5K_EEPROM_EEP_DELTA 10
115#define AR5K_EEPROM_N_MODES 3
116#define AR5K_EEPROM_N_5GHZ_CHAN 10
117#define AR5K_EEPROM_N_2GHZ_CHAN 3
118#define AR5K_EEPROM_MAX_CHAN 10
119#define AR5K_EEPROM_N_PCDAC 11
120#define AR5K_EEPROM_N_TEST_FREQ 8
121#define AR5K_EEPROM_N_EDGES 8
122#define AR5K_EEPROM_N_INTERCEPTS 11
123#define AR5K_EEPROM_FREQ_M(_v) AR5K_EEPROM_OFF(_v, 0x7f, 0xff)
124#define AR5K_EEPROM_PCDAC_M 0x3f
125#define AR5K_EEPROM_PCDAC_START 1
126#define AR5K_EEPROM_PCDAC_STOP 63
127#define AR5K_EEPROM_PCDAC_STEP 1
128#define AR5K_EEPROM_NON_EDGE_M 0x40
129#define AR5K_EEPROM_CHANNEL_POWER 8
130#define AR5K_EEPROM_N_OBDB 4
131#define AR5K_EEPROM_OBDB_DIS 0xffff
132#define AR5K_EEPROM_CHANNEL_DIS 0xff
133#define AR5K_EEPROM_SCALE_OC_DELTA(_x) (((_x) * 2) / 10)
134#define AR5K_EEPROM_N_CTLS(_v) AR5K_EEPROM_OFF(_v, 16, 32)
135#define AR5K_EEPROM_MAX_CTLS 32
136#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4
137#define AR5K_EEPROM_N_XPD0_POINTS 4
138#define AR5K_EEPROM_N_XPD3_POINTS 3
139#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ 35
140#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ 55
141#define AR5K_EEPROM_POWER_M 0x3f
142#define AR5K_EEPROM_POWER_MIN 0
143#define AR5K_EEPROM_POWER_MAX 3150
144#define AR5K_EEPROM_POWER_STEP 50
145#define AR5K_EEPROM_POWER_TABLE_SIZE 64
146#define AR5K_EEPROM_N_POWER_LOC_11B 4
147#define AR5K_EEPROM_N_POWER_LOC_11G 6
148#define AR5K_EEPROM_I_GAIN 10
149#define AR5K_EEPROM_CCK_OFDM_DELTA 15
150#define AR5K_EEPROM_N_IQ_CAL 2
151
152#define AR5K_EEPROM_READ(_o, _v) do { \
153 ret = ath5k_hw_eeprom_read(ah, (_o), &(_v)); \
154 if (ret) \
155 return ret; \
156} while (0)
157
158#define AR5K_EEPROM_READ_HDR(_o, _v) \
159 AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \
160
161/* Struct to hold EEPROM calibration data */
162struct ath5k_eeprom_info {
163 u16 ee_magic;
164 u16 ee_protect;
165 u16 ee_regdomain;
166 u16 ee_version;
167 u16 ee_header;
168 u16 ee_ant_gain;
169 u16 ee_misc0;
170 u16 ee_misc1;
171 u16 ee_cck_ofdm_gain_delta;
172 u16 ee_cck_ofdm_power_delta;
173 u16 ee_scaled_cck_delta;
174
175 /* Used for tx thermal adjustment (eeprom_init, rfregs) */
176 u16 ee_tx_clip;
177 u16 ee_pwd_84;
178 u16 ee_pwd_90;
179 u16 ee_gain_select;
180
181 /* RF Calibration settings (reset, rfregs) */
182 u16 ee_i_cal[AR5K_EEPROM_N_MODES];
183 u16 ee_q_cal[AR5K_EEPROM_N_MODES];
184 u16 ee_fixed_bias[AR5K_EEPROM_N_MODES];
185 u16 ee_turbo_max_power[AR5K_EEPROM_N_MODES];
186 u16 ee_xr_power[AR5K_EEPROM_N_MODES];
187 u16 ee_switch_settling[AR5K_EEPROM_N_MODES];
188 u16 ee_ant_tx_rx[AR5K_EEPROM_N_MODES];
189 u16 ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC];
190 u16 ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
191 u16 ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
192 u16 ee_tx_end2xlna_enable[AR5K_EEPROM_N_MODES];
193 u16 ee_tx_end2xpa_disable[AR5K_EEPROM_N_MODES];
194 u16 ee_tx_frm2xpa_enable[AR5K_EEPROM_N_MODES];
195 u16 ee_thr_62[AR5K_EEPROM_N_MODES];
196 u16 ee_xlna_gain[AR5K_EEPROM_N_MODES];
197 u16 ee_xpd[AR5K_EEPROM_N_MODES];
198 u16 ee_x_gain[AR5K_EEPROM_N_MODES];
199 u16 ee_i_gain[AR5K_EEPROM_N_MODES];
200 u16 ee_margin_tx_rx[AR5K_EEPROM_N_MODES];
201
202 /* Unused */
203 u16 ee_false_detect[AR5K_EEPROM_N_MODES];
204 u16 ee_cal_pier[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_2GHZ_CHAN];
205 u16 ee_channel[AR5K_EEPROM_N_MODES][AR5K_EEPROM_MAX_CHAN]; /*empty*/
206
207 /* Conformance test limits (Unused) */
208 u16 ee_ctls;
209 u16 ee_ctl[AR5K_EEPROM_MAX_CTLS];
210
211 /* Noise Floor Calibration settings */
212 s16 ee_noise_floor_thr[AR5K_EEPROM_N_MODES];
213 s8 ee_adc_desired_size[AR5K_EEPROM_N_MODES];
214 s8 ee_pga_desired_size[AR5K_EEPROM_N_MODES];
215};
diff --git a/drivers/net/wireless/ath5k/gpio.c b/drivers/net/wireless/ath5k/gpio.c
new file mode 100644
index 000000000000..b77205adc180
--- /dev/null
+++ b/drivers/net/wireless/ath5k/gpio.c
@@ -0,0 +1,176 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/****************\
20 GPIO Functions
21\****************/
22
23#include "ath5k.h"
24#include "reg.h"
25#include "debug.h"
26#include "base.h"
27
28/*
29 * Set led state
30 */
31void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
32{
33 u32 led;
34 /*5210 has different led mode handling*/
35 u32 led_5210;
36
37 ATH5K_TRACE(ah->ah_sc);
38
39 /*Reset led status*/
40 if (ah->ah_version != AR5K_AR5210)
41 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
42 AR5K_PCICFG_LEDMODE | AR5K_PCICFG_LED);
43 else
44 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED);
45
46 /*
47 * Some blinking values, define at your wish
48 */
49 switch (state) {
50 case AR5K_LED_SCAN:
51 case AR5K_LED_AUTH:
52 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND;
53 led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL;
54 break;
55
56 case AR5K_LED_INIT:
57 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE;
58 led_5210 = AR5K_PCICFG_LED_PEND;
59 break;
60
61 case AR5K_LED_ASSOC:
62 case AR5K_LED_RUN:
63 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC;
64 led_5210 = AR5K_PCICFG_LED_ASSOC;
65 break;
66
67 default:
68 led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE;
69 led_5210 = AR5K_PCICFG_LED_PEND;
70 break;
71 }
72
73 /*Write new status to the register*/
74 if (ah->ah_version != AR5K_AR5210)
75 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led);
76 else
77 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
78}
79
80/*
81 * Set GPIO inputs
82 */
83int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
84{
85 ATH5K_TRACE(ah->ah_sc);
86 if (gpio > AR5K_NUM_GPIO)
87 return -EINVAL;
88
89 ath5k_hw_reg_write(ah,
90 (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
91 | AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR);
92
93 return 0;
94}
95
96/*
97 * Set GPIO outputs
98 */
99int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
100{
101 ATH5K_TRACE(ah->ah_sc);
102 if (gpio > AR5K_NUM_GPIO)
103 return -EINVAL;
104
105 ath5k_hw_reg_write(ah,
106 (ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
107 | AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR);
108
109 return 0;
110}
111
112/*
113 * Get GPIO state
114 */
115u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
116{
117 ATH5K_TRACE(ah->ah_sc);
118 if (gpio > AR5K_NUM_GPIO)
119 return 0xffffffff;
120
121 /* GPIO input magic */
122 return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) &
123 0x1;
124}
125
126/*
127 * Set GPIO state
128 */
129int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
130{
131 u32 data;
132 ATH5K_TRACE(ah->ah_sc);
133
134 if (gpio > AR5K_NUM_GPIO)
135 return -EINVAL;
136
137 /* GPIO output magic */
138 data = ath5k_hw_reg_read(ah, AR5K_GPIODO);
139
140 data &= ~(1 << gpio);
141 data |= (val & 1) << gpio;
142
143 ath5k_hw_reg_write(ah, data, AR5K_GPIODO);
144
145 return 0;
146}
147
148/*
149 * Initialize the GPIO interrupt (RFKill switch)
150 */
151void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
152 u32 interrupt_level)
153{
154 u32 data;
155
156 ATH5K_TRACE(ah->ah_sc);
157 if (gpio > AR5K_NUM_GPIO)
158 return;
159
160 /*
161 * Set the GPIO interrupt
162 */
163 data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &
164 ~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH |
165 AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) |
166 (AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA);
167
168 ath5k_hw_reg_write(ah, interrupt_level ? data :
169 (data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR);
170
171 ah->ah_imr |= AR5K_IMR_GPIO;
172
173 /* Enable GPIO interrupts */
174 AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO);
175}
176
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c
deleted file mode 100644
index ad1a5b422c8c..000000000000
--- a/drivers/net/wireless/ath5k/hw.c
+++ /dev/null
@@ -1,4529 +0,0 @@
1/*
2 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007 Matthew W. S. Bell <mentor@madwifi.org>
5 * Copyright (c) 2007 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
6 * Copyright (c) 2007 Pavel Roskin <proski@gnu.org>
7 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 *
21 */
22
23/*
24 * HW related functions for Atheros Wireless LAN devices.
25 */
26
27#include <linux/pci.h>
28#include <linux/delay.h>
29
30#include "reg.h"
31#include "base.h"
32#include "debug.h"
33
34/* Rate tables */
35static const struct ath5k_rate_table ath5k_rt_11a = AR5K_RATES_11A;
36static const struct ath5k_rate_table ath5k_rt_11b = AR5K_RATES_11B;
37static const struct ath5k_rate_table ath5k_rt_11g = AR5K_RATES_11G;
38static const struct ath5k_rate_table ath5k_rt_turbo = AR5K_RATES_TURBO;
39static const struct ath5k_rate_table ath5k_rt_xr = AR5K_RATES_XR;
40
41/* Prototypes */
42static int ath5k_hw_nic_reset(struct ath5k_hw *, u32);
43static int ath5k_hw_nic_wakeup(struct ath5k_hw *, int, bool);
44static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
45 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
46 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
47 unsigned int, unsigned int);
48static int ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
49 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
50 unsigned int);
51static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *, struct ath5k_desc *,
52 struct ath5k_tx_status *);
53static int ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
54 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
55 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
56 unsigned int, unsigned int);
57static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *, struct ath5k_desc *,
58 struct ath5k_tx_status *);
59static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *, struct ath5k_desc *,
60 struct ath5k_rx_status *);
61static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *, struct ath5k_desc *,
62 struct ath5k_rx_status *);
63static int ath5k_hw_get_capabilities(struct ath5k_hw *);
64
65static int ath5k_eeprom_init(struct ath5k_hw *);
66static int ath5k_eeprom_read_mac(struct ath5k_hw *, u8 *);
67
68static int ath5k_hw_enable_pspoll(struct ath5k_hw *, u8 *, u16);
69static int ath5k_hw_disable_pspoll(struct ath5k_hw *);
70
71/*
72 * Enable to overwrite the country code (use "00" for debug)
73 */
74#if 0
75#define COUNTRYCODE "00"
76#endif
77
78/*******************\
79 General Functions
80\*******************/
81
82/*
83 * Functions used internaly
84 */
85
86static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
87{
88 return turbo ? (usec * 80) : (usec * 40);
89}
90
91static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
92{
93 return turbo ? (clock / 80) : (clock / 40);
94}
95
96/*
97 * Check if a register write has been completed
98 */
99int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
100 bool is_set)
101{
102 int i;
103 u32 data;
104
105 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
106 data = ath5k_hw_reg_read(ah, reg);
107 if (is_set && (data & flag))
108 break;
109 else if ((data & flag) == val)
110 break;
111 udelay(15);
112 }
113
114 return (i <= 0) ? -EAGAIN : 0;
115}
116
117
118/***************************************\
119 Attach/Detach Functions
120\***************************************/
121
122/*
123 * Power On Self Test helper function
124 */
125static int ath5k_hw_post(struct ath5k_hw *ah)
126{
127
128 int i, c;
129 u16 cur_reg;
130 u16 regs[2] = {AR5K_STA_ID0, AR5K_PHY(8)};
131 u32 var_pattern;
132 u32 static_pattern[4] = {
133 0x55555555, 0xaaaaaaaa,
134 0x66666666, 0x99999999
135 };
136 u32 init_val;
137 u32 cur_val;
138
139 for (c = 0; c < 2; c++) {
140
141 cur_reg = regs[c];
142
143 /* Save previous value */
144 init_val = ath5k_hw_reg_read(ah, cur_reg);
145
146 for (i = 0; i < 256; i++) {
147 var_pattern = i << 16 | i;
148 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
149 cur_val = ath5k_hw_reg_read(ah, cur_reg);
150
151 if (cur_val != var_pattern) {
152 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
153 return -EAGAIN;
154 }
155
156 /* Found on ndiswrapper dumps */
157 var_pattern = 0x0039080f;
158 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
159 }
160
161 for (i = 0; i < 4; i++) {
162 var_pattern = static_pattern[i];
163 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
164 cur_val = ath5k_hw_reg_read(ah, cur_reg);
165
166 if (cur_val != var_pattern) {
167 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
168 return -EAGAIN;
169 }
170
171 /* Found on ndiswrapper dumps */
172 var_pattern = 0x003b080f;
173 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
174 }
175
176 /* Restore previous value */
177 ath5k_hw_reg_write(ah, init_val, cur_reg);
178
179 }
180
181 return 0;
182
183}
184
185/*
186 * Check if the device is supported and initialize the needed structs
187 */
188struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
189{
190 struct ath5k_hw *ah;
191 struct pci_dev *pdev = sc->pdev;
192 u8 mac[ETH_ALEN];
193 int ret;
194 u32 srev;
195
196 /*If we passed the test malloc a ath5k_hw struct*/
197 ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
198 if (ah == NULL) {
199 ret = -ENOMEM;
200 ATH5K_ERR(sc, "out of memory\n");
201 goto err;
202 }
203
204 ah->ah_sc = sc;
205 ah->ah_iobase = sc->iobase;
206
207 /*
208 * HW information
209 */
210
211 ah->ah_op_mode = IEEE80211_IF_TYPE_STA;
212 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
213 ah->ah_turbo = false;
214 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
215 ah->ah_imr = 0;
216 ah->ah_atim_window = 0;
217 ah->ah_aifs = AR5K_TUNE_AIFS;
218 ah->ah_cw_min = AR5K_TUNE_CWMIN;
219 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
220 ah->ah_software_retry = false;
221 ah->ah_ant_diversity = AR5K_TUNE_ANT_DIVERSITY;
222
223 /*
224 * Set the mac revision based on the pci id
225 */
226 ah->ah_version = mac_version;
227
228 /*Fill the ath5k_hw struct with the needed functions*/
229 if (ah->ah_version == AR5K_AR5212)
230 ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
231 else if (ah->ah_version == AR5K_AR5211)
232 ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
233
234 if (ah->ah_version == AR5K_AR5212) {
235 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
236 ah->ah_setup_xtx_desc = ath5k_hw_setup_xr_tx_desc;
237 ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
238 } else {
239 ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
240 ah->ah_setup_xtx_desc = ath5k_hw_setup_xr_tx_desc;
241 ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
242 }
243
244 if (ah->ah_version == AR5K_AR5212)
245 ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
246 else if (ah->ah_version <= AR5K_AR5211)
247 ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
248
249 /* Bring device out of sleep and reset it's units */
250 ret = ath5k_hw_nic_wakeup(ah, AR5K_INIT_MODE, true);
251 if (ret)
252 goto err_free;
253
254 /* Get MAC, PHY and RADIO revisions */
255 srev = ath5k_hw_reg_read(ah, AR5K_SREV);
256 ah->ah_mac_srev = srev;
257 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
258 ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
259 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
260 0xffffffff;
261 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
262 CHANNEL_5GHZ);
263
264 if (ah->ah_version == AR5K_AR5210)
265 ah->ah_radio_2ghz_revision = 0;
266 else
267 ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
268 CHANNEL_2GHZ);
269
270 /* Return on unsuported chips (unsupported eeprom etc) */
271 if ((srev >= AR5K_SREV_VER_AR5416) &&
272 (srev < AR5K_SREV_VER_AR2425)) {
273 ATH5K_ERR(sc, "Device not yet supported.\n");
274 ret = -ENODEV;
275 goto err_free;
276 } else if (srev == AR5K_SREV_VER_AR2425) {
277 ATH5K_WARN(sc, "Support for RF2425 is under development.\n");
278 }
279
280 /* Identify single chip solutions */
281 if (((srev <= AR5K_SREV_VER_AR5414) &&
282 (srev >= AR5K_SREV_VER_AR2413)) ||
283 (srev == AR5K_SREV_VER_AR2425)) {
284 ah->ah_single_chip = true;
285 } else {
286 ah->ah_single_chip = false;
287 }
288
289 /* Single chip radio */
290 if (ah->ah_radio_2ghz_revision == ah->ah_radio_5ghz_revision)
291 ah->ah_radio_2ghz_revision = 0;
292
293 /* Identify the radio chip*/
294 if (ah->ah_version == AR5K_AR5210) {
295 ah->ah_radio = AR5K_RF5110;
296 /*
297 * Register returns 0x0/0x04 for radio revision
298 * so ath5k_hw_radio_revision doesn't parse the value
299 * correctly. For now we are based on mac's srev to
300 * identify RF2425 radio.
301 */
302 } else if (srev == AR5K_SREV_VER_AR2425) {
303 ah->ah_radio = AR5K_RF2425;
304 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2425;
305 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) {
306 ah->ah_radio = AR5K_RF5111;
307 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
308 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) {
309 ah->ah_radio = AR5K_RF5112;
310 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
311 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) {
312 ah->ah_radio = AR5K_RF2413;
313 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
314 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) {
315 ah->ah_radio = AR5K_RF5413;
316 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
317 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5133) {
318 /* AR5424 */
319 if (srev >= AR5K_SREV_VER_AR5424) {
320 ah->ah_radio = AR5K_RF5413;
321 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5413;
322 /* AR2424 */
323 } else {
324 ah->ah_radio = AR5K_RF2413; /* For testing */
325 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF2413;
326 }
327 }
328 ah->ah_phy = AR5K_PHY(0);
329
330 /*
331 * Write PCI-E power save settings
332 */
333 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
334 ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080);
335 ath5k_hw_reg_write(ah, 0x24924924, 0x4080);
336 ath5k_hw_reg_write(ah, 0x28000039, 0x4080);
337 ath5k_hw_reg_write(ah, 0x53160824, 0x4080);
338 ath5k_hw_reg_write(ah, 0xe5980579, 0x4080);
339 ath5k_hw_reg_write(ah, 0x001defff, 0x4080);
340 ath5k_hw_reg_write(ah, 0x1aaabe40, 0x4080);
341 ath5k_hw_reg_write(ah, 0xbe105554, 0x4080);
342 ath5k_hw_reg_write(ah, 0x000e3007, 0x4080);
343 ath5k_hw_reg_write(ah, 0x00000000, 0x4084);
344 }
345
346 /*
347 * POST
348 */
349 ret = ath5k_hw_post(ah);
350 if (ret)
351 goto err_free;
352
353 /* Write AR5K_PCICFG_UNK on 2112B and later chips */
354 if (ah->ah_radio_5ghz_revision > AR5K_SREV_RAD_2112B ||
355 srev > AR5K_SREV_VER_AR2413) {
356 ath5k_hw_reg_write(ah, AR5K_PCICFG_UNK, AR5K_PCICFG);
357 }
358
359 /*
360 * Get card capabilities, values, ...
361 */
362 ret = ath5k_eeprom_init(ah);
363 if (ret) {
364 ATH5K_ERR(sc, "unable to init EEPROM\n");
365 goto err_free;
366 }
367
368 /* Get misc capabilities */
369 ret = ath5k_hw_get_capabilities(ah);
370 if (ret) {
371 ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n",
372 sc->pdev->device);
373 goto err_free;
374 }
375
376 /* Get MAC address */
377 ret = ath5k_eeprom_read_mac(ah, mac);
378 if (ret) {
379 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
380 sc->pdev->device);
381 goto err_free;
382 }
383
384 ath5k_hw_set_lladdr(ah, mac);
385 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
386 memset(ah->ah_bssid, 0xff, ETH_ALEN);
387 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
388 ath5k_hw_set_opmode(ah);
389
390 ath5k_hw_set_rfgain_opt(ah);
391
392 return ah;
393err_free:
394 kfree(ah);
395err:
396 return ERR_PTR(ret);
397}
398
399/*
400 * Bring up MAC + PHY Chips
401 */
402static int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
403{
404 struct pci_dev *pdev = ah->ah_sc->pdev;
405 u32 turbo, mode, clock, bus_flags;
406 int ret;
407
408 turbo = 0;
409 mode = 0;
410 clock = 0;
411
412 ATH5K_TRACE(ah->ah_sc);
413
414 /* Wakeup the device */
415 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
416 if (ret) {
417 ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
418 return ret;
419 }
420
421 if (ah->ah_version != AR5K_AR5210) {
422 /*
423 * Get channel mode flags
424 */
425
426 if (ah->ah_radio >= AR5K_RF5112) {
427 mode = AR5K_PHY_MODE_RAD_RF5112;
428 clock = AR5K_PHY_PLL_RF5112;
429 } else {
430 mode = AR5K_PHY_MODE_RAD_RF5111; /*Zero*/
431 clock = AR5K_PHY_PLL_RF5111; /*Zero*/
432 }
433
434 if (flags & CHANNEL_2GHZ) {
435 mode |= AR5K_PHY_MODE_FREQ_2GHZ;
436 clock |= AR5K_PHY_PLL_44MHZ;
437
438 if (flags & CHANNEL_CCK) {
439 mode |= AR5K_PHY_MODE_MOD_CCK;
440 } else if (flags & CHANNEL_OFDM) {
441 /* XXX Dynamic OFDM/CCK is not supported by the
442 * AR5211 so we set MOD_OFDM for plain g (no
443 * CCK headers) operation. We need to test
444 * this, 5211 might support ofdm-only g after
445 * all, there are also initial register values
446 * in the code for g mode (see initvals.c). */
447 if (ah->ah_version == AR5K_AR5211)
448 mode |= AR5K_PHY_MODE_MOD_OFDM;
449 else
450 mode |= AR5K_PHY_MODE_MOD_DYN;
451 } else {
452 ATH5K_ERR(ah->ah_sc,
453 "invalid radio modulation mode\n");
454 return -EINVAL;
455 }
456 } else if (flags & CHANNEL_5GHZ) {
457 mode |= AR5K_PHY_MODE_FREQ_5GHZ;
458 clock |= AR5K_PHY_PLL_40MHZ;
459
460 if (flags & CHANNEL_OFDM)
461 mode |= AR5K_PHY_MODE_MOD_OFDM;
462 else {
463 ATH5K_ERR(ah->ah_sc,
464 "invalid radio modulation mode\n");
465 return -EINVAL;
466 }
467 } else {
468 ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n");
469 return -EINVAL;
470 }
471
472 if (flags & CHANNEL_TURBO)
473 turbo = AR5K_PHY_TURBO_MODE | AR5K_PHY_TURBO_SHORT;
474 } else { /* Reset the device */
475
476 /* ...enable Atheros turbo mode if requested */
477 if (flags & CHANNEL_TURBO)
478 ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
479 AR5K_PHY_TURBO);
480 }
481
482 /* reseting PCI on PCI-E cards results card to hang
483 * and always return 0xffff... so we ingore that flag
484 * for PCI-E cards */
485 bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI;
486
487 /* Reset chipset */
488 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
489 AR5K_RESET_CTL_BASEBAND | bus_flags);
490 if (ret) {
491 ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
492 return -EIO;
493 }
494
495 if (ah->ah_version == AR5K_AR5210)
496 udelay(2300);
497
498 /* ...wakeup again!*/
499 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
500 if (ret) {
501 ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n");
502 return ret;
503 }
504
505 /* ...final warm reset */
506 if (ath5k_hw_nic_reset(ah, 0)) {
507 ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
508 return -EIO;
509 }
510
511 if (ah->ah_version != AR5K_AR5210) {
512 /* ...set the PHY operating mode */
513 ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
514 udelay(300);
515
516 ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE);
517 ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO);
518 }
519
520 return 0;
521}
522
523/*
524 * Get the rate table for a specific operation mode
525 */
526const struct ath5k_rate_table *ath5k_hw_get_rate_table(struct ath5k_hw *ah,
527 unsigned int mode)
528{
529 ATH5K_TRACE(ah->ah_sc);
530
531 if (!test_bit(mode, ah->ah_capabilities.cap_mode))
532 return NULL;
533
534 /* Get rate tables */
535 switch (mode) {
536 case AR5K_MODE_11A:
537 return &ath5k_rt_11a;
538 case AR5K_MODE_11A_TURBO:
539 return &ath5k_rt_turbo;
540 case AR5K_MODE_11B:
541 return &ath5k_rt_11b;
542 case AR5K_MODE_11G:
543 return &ath5k_rt_11g;
544 case AR5K_MODE_11G_TURBO:
545 return &ath5k_rt_xr;
546 }
547
548 return NULL;
549}
550
551/*
552 * Free the ath5k_hw struct
553 */
554void ath5k_hw_detach(struct ath5k_hw *ah)
555{
556 ATH5K_TRACE(ah->ah_sc);
557
558 __set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
559
560 if (ah->ah_rf_banks != NULL)
561 kfree(ah->ah_rf_banks);
562
563 /* assume interrupts are down */
564 kfree(ah);
565}
566
567/****************************\
568 Reset function and helpers
569\****************************/
570
571/**
572 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
573 *
574 * @ah: the &struct ath5k_hw
575 * @channel: the currently set channel upon reset
576 *
577 * Write the OFDM timings for the AR5212 upon reset. This is a helper for
578 * ath5k_hw_reset(). This seems to tune the PLL a specified frequency
579 * depending on the bandwidth of the channel.
580 *
581 */
582static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
583 struct ieee80211_channel *channel)
584{
585 /* Get exponent and mantissa and set it */
586 u32 coef_scaled, coef_exp, coef_man,
587 ds_coef_exp, ds_coef_man, clock;
588
589 if (!(ah->ah_version == AR5K_AR5212) ||
590 !(channel->hw_value & CHANNEL_OFDM))
591 BUG();
592
593 /* Seems there are two PLLs, one for baseband sampling and one
594 * for tuning. Tuning basebands are 40 MHz or 80MHz when in
595 * turbo. */
596 clock = channel->hw_value & CHANNEL_TURBO ? 80 : 40;
597 coef_scaled = ((5 * (clock << 24)) / 2) /
598 channel->center_freq;
599
600 for (coef_exp = 31; coef_exp > 0; coef_exp--)
601 if ((coef_scaled >> coef_exp) & 0x1)
602 break;
603
604 if (!coef_exp)
605 return -EINVAL;
606
607 coef_exp = 14 - (coef_exp - 24);
608 coef_man = coef_scaled +
609 (1 << (24 - coef_exp - 1));
610 ds_coef_man = coef_man >> (24 - coef_exp);
611 ds_coef_exp = coef_exp - 16;
612
613 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
614 AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
615 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
616 AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
617
618 return 0;
619}
620
621/**
622 * ath5k_hw_write_rate_duration - set rate duration during hw resets
623 *
624 * @ah: the &struct ath5k_hw
625 * @mode: one of enum ath5k_driver_mode
626 *
627 * Write the rate duration table for the current mode upon hw reset. This
628 * is a helper for ath5k_hw_reset(). It seems all this is doing is setting
629 * an ACK timeout for the hardware for the current mode for each rate. The
630 * rates which are capable of short preamble (802.11b rates 2Mbps, 5.5Mbps,
631 * and 11Mbps) have another register for the short preamble ACK timeout
632 * calculation.
633 *
634 */
635static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
636 unsigned int mode)
637{
638 struct ath5k_softc *sc = ah->ah_sc;
639 const struct ath5k_rate_table *rt;
640 struct ieee80211_rate srate = {};
641 unsigned int i;
642
643 /* Get rate table for the current operating mode */
644 rt = ath5k_hw_get_rate_table(ah, mode);
645
646 /* Write rate duration table */
647 for (i = 0; i < rt->rate_count; i++) {
648 const struct ath5k_rate *rate, *control_rate;
649
650 u32 reg;
651 u16 tx_time;
652
653 rate = &rt->rates[i];
654 control_rate = &rt->rates[rate->control_rate];
655
656 /* Set ACK timeout */
657 reg = AR5K_RATE_DUR(rate->rate_code);
658
659 srate.bitrate = control_rate->rate_kbps/100;
660
661 /* An ACK frame consists of 10 bytes. If you add the FCS,
662 * which ieee80211_generic_frame_duration() adds,
663 * its 14 bytes. Note we use the control rate and not the
664 * actual rate for this rate. See mac80211 tx.c
665 * ieee80211_duration() for a brief description of
666 * what rate we should choose to TX ACKs. */
667 tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
668 sc->vif, 10, &srate));
669
670 ath5k_hw_reg_write(ah, tx_time, reg);
671
672 if (!HAS_SHPREAMBLE(i))
673 continue;
674
675 /*
676 * We're not distinguishing short preamble here,
677 * This is true, all we'll get is a longer value here
678 * which is not necessarilly bad. We could use
679 * export ieee80211_frame_duration() but that needs to be
680 * fixed first to be properly used by mac802111 drivers:
681 *
682 * - remove erp stuff and let the routine figure ofdm
683 * erp rates
684 * - remove passing argument ieee80211_local as
685 * drivers don't have access to it
686 * - move drivers using ieee80211_generic_frame_duration()
687 * to this
688 */
689 ath5k_hw_reg_write(ah, tx_time,
690 reg + (AR5K_SET_SHORT_PREAMBLE << 2));
691 }
692}
693
694/*
695 * Main reset function
696 */
697int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
698 struct ieee80211_channel *channel, bool change_channel)
699{
700 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
701 struct pci_dev *pdev = ah->ah_sc->pdev;
702 u32 data, s_seq, s_ant, s_led[3], dma_size;
703 unsigned int i, mode, freq, ee_mode, ant[2];
704 int ret;
705
706 ATH5K_TRACE(ah->ah_sc);
707
708 s_seq = 0;
709 s_ant = 0;
710 ee_mode = 0;
711 freq = 0;
712 mode = 0;
713
714 /*
715 * Save some registers before a reset
716 */
717 /*DCU/Antenna selection not available on 5210*/
718 if (ah->ah_version != AR5K_AR5210) {
719 if (change_channel) {
720 /* Seq number for queue 0 -do this for all queues ? */
721 s_seq = ath5k_hw_reg_read(ah,
722 AR5K_QUEUE_DFS_SEQNUM(0));
723 /*Default antenna*/
724 s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
725 }
726 }
727
728 /*GPIOs*/
729 s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) & AR5K_PCICFG_LEDSTATE;
730 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
731 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
732
733 if (change_channel && ah->ah_rf_banks != NULL)
734 ath5k_hw_get_rf_gain(ah);
735
736
737 /*Wakeup the device*/
738 ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
739 if (ret)
740 return ret;
741
742 /*
743 * Initialize operating mode
744 */
745 ah->ah_op_mode = op_mode;
746
747 /*
748 * 5111/5112 Settings
749 * 5210 only comes with RF5110
750 */
751 if (ah->ah_version != AR5K_AR5210) {
752 if (ah->ah_radio != AR5K_RF5111 &&
753 ah->ah_radio != AR5K_RF5112 &&
754 ah->ah_radio != AR5K_RF5413 &&
755 ah->ah_radio != AR5K_RF2413 &&
756 ah->ah_radio != AR5K_RF2425) {
757 ATH5K_ERR(ah->ah_sc,
758 "invalid phy radio: %u\n", ah->ah_radio);
759 return -EINVAL;
760 }
761
762 switch (channel->hw_value & CHANNEL_MODES) {
763 case CHANNEL_A:
764 mode = AR5K_MODE_11A;
765 freq = AR5K_INI_RFGAIN_5GHZ;
766 ee_mode = AR5K_EEPROM_MODE_11A;
767 break;
768 case CHANNEL_G:
769 mode = AR5K_MODE_11G;
770 freq = AR5K_INI_RFGAIN_2GHZ;
771 ee_mode = AR5K_EEPROM_MODE_11G;
772 break;
773 case CHANNEL_B:
774 mode = AR5K_MODE_11B;
775 freq = AR5K_INI_RFGAIN_2GHZ;
776 ee_mode = AR5K_EEPROM_MODE_11B;
777 break;
778 case CHANNEL_T:
779 mode = AR5K_MODE_11A_TURBO;
780 freq = AR5K_INI_RFGAIN_5GHZ;
781 ee_mode = AR5K_EEPROM_MODE_11A;
782 break;
783 /*Is this ok on 5211 too ?*/
784 case CHANNEL_TG:
785 mode = AR5K_MODE_11G_TURBO;
786 freq = AR5K_INI_RFGAIN_2GHZ;
787 ee_mode = AR5K_EEPROM_MODE_11G;
788 break;
789 case CHANNEL_XR:
790 if (ah->ah_version == AR5K_AR5211) {
791 ATH5K_ERR(ah->ah_sc,
792 "XR mode not available on 5211");
793 return -EINVAL;
794 }
795 mode = AR5K_MODE_XR;
796 freq = AR5K_INI_RFGAIN_5GHZ;
797 ee_mode = AR5K_EEPROM_MODE_11A;
798 break;
799 default:
800 ATH5K_ERR(ah->ah_sc,
801 "invalid channel: %d\n", channel->center_freq);
802 return -EINVAL;
803 }
804
805 /* PHY access enable */
806 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
807
808 }
809
810 ret = ath5k_hw_write_initvals(ah, mode, change_channel);
811 if (ret)
812 return ret;
813
814 /*
815 * 5211/5212 Specific
816 */
817 if (ah->ah_version != AR5K_AR5210) {
818 /*
819 * Write initial RF gain settings
820 * This should work for both 5111/5112
821 */
822 ret = ath5k_hw_rfgain(ah, freq);
823 if (ret)
824 return ret;
825
826 mdelay(1);
827
828 /*
829 * Write some more initial register settings
830 */
831 if (ah->ah_version == AR5K_AR5212) {
832 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
833
834 if (channel->hw_value == CHANNEL_G)
835 if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413)
836 ath5k_hw_reg_write(ah, 0x00f80d80,
837 0x994c);
838 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424)
839 ath5k_hw_reg_write(ah, 0x00380140,
840 0x994c);
841 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425)
842 ath5k_hw_reg_write(ah, 0x00fc0ec0,
843 0x994c);
844 else /* 2425 */
845 ath5k_hw_reg_write(ah, 0x00fc0fc0,
846 0x994c);
847 else
848 ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
849
850 /* Some bits are disabled here, we know nothing about
851 * register 0xa228 yet, most of the times this ends up
852 * with a value 0x9b5 -haven't seen any dump with
853 * a different value- */
854 /* Got this from decompiling binary HAL */
855 data = ath5k_hw_reg_read(ah, 0xa228);
856 data &= 0xfffffdff;
857 ath5k_hw_reg_write(ah, data, 0xa228);
858
859 data = ath5k_hw_reg_read(ah, 0xa228);
860 data &= 0xfffe03ff;
861 ath5k_hw_reg_write(ah, data, 0xa228);
862 data = 0;
863
864 /* Just write 0x9b5 ? */
865 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
866 ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
867 ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
868 ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
869 }
870
871 /* Fix for first revision of the RF5112 RF chipset */
872 if (ah->ah_radio >= AR5K_RF5112 &&
873 ah->ah_radio_5ghz_revision <
874 AR5K_SREV_RAD_5112A) {
875 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
876 AR5K_PHY_CCKTXCTL);
877 if (channel->hw_value & CHANNEL_5GHZ)
878 data = 0xffb81020;
879 else
880 data = 0xffb80d20;
881 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
882 data = 0;
883 }
884
885 /*
886 * Set TX power (FIXME)
887 */
888 ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER);
889 if (ret)
890 return ret;
891
892 /* Write rate duration table only on AR5212 and if
893 * virtual interface has already been brought up
894 * XXX: rethink this after new mode changes to
895 * mac80211 are integrated */
896 if (ah->ah_version == AR5K_AR5212 &&
897 ah->ah_sc->vif != NULL)
898 ath5k_hw_write_rate_duration(ah, mode);
899
900 /*
901 * Write RF registers
902 */
903 ret = ath5k_hw_rfregs(ah, channel, mode);
904 if (ret)
905 return ret;
906
907 /*
908 * Configure additional registers
909 */
910
911 /* Write OFDM timings on 5212*/
912 if (ah->ah_version == AR5K_AR5212 &&
913 channel->hw_value & CHANNEL_OFDM) {
914 ret = ath5k_hw_write_ofdm_timings(ah, channel);
915 if (ret)
916 return ret;
917 }
918
919 /*Enable/disable 802.11b mode on 5111
920 (enable 2111 frequency converter + CCK)*/
921 if (ah->ah_radio == AR5K_RF5111) {
922 if (mode == AR5K_MODE_11B)
923 AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
924 AR5K_TXCFG_B_MODE);
925 else
926 AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
927 AR5K_TXCFG_B_MODE);
928 }
929
930 /*
931 * Set channel and calibrate the PHY
932 */
933 ret = ath5k_hw_channel(ah, channel);
934 if (ret)
935 return ret;
936
937 /* Set antenna mode */
938 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
939 ah->ah_antenna[ee_mode][0], 0xfffffc06);
940
941 /*
942 * In case a fixed antenna was set as default
943 * write the same settings on both AR5K_PHY_ANT_SWITCH_TABLE
944 * registers.
945 */
946 if (s_ant != 0){
947 if (s_ant == AR5K_ANT_FIXED_A) /* 1 - Main */
948 ant[0] = ant[1] = AR5K_ANT_FIXED_A;
949 else /* 2 - Aux */
950 ant[0] = ant[1] = AR5K_ANT_FIXED_B;
951 } else {
952 ant[0] = AR5K_ANT_FIXED_A;
953 ant[1] = AR5K_ANT_FIXED_B;
954 }
955
956 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[0]],
957 AR5K_PHY_ANT_SWITCH_TABLE_0);
958 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[1]],
959 AR5K_PHY_ANT_SWITCH_TABLE_1);
960
961 /* Commit values from EEPROM */
962 if (ah->ah_radio == AR5K_RF5111)
963 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
964 AR5K_PHY_FRAME_CTL_TX_CLIP, ee->ee_tx_clip);
965
966 ath5k_hw_reg_write(ah,
967 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
968 AR5K_PHY_NFTHRES);
969
970 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
971 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
972 0xffffc07f);
973 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
974 (ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000,
975 0xfffc0fff);
976 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
977 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
978 ((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
979 0xffff0000);
980
981 ath5k_hw_reg_write(ah,
982 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
983 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
984 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
985 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
986
987 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
988 ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
989 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
990 (ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
991 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
992
993 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
994 AR5K_PHY_IQ_CORR_ENABLE |
995 (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) |
996 ee->ee_q_cal[ee_mode]);
997
998 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
999 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
1000 AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
1001 ee->ee_margin_tx_rx[ee_mode]);
1002
1003 } else {
1004 mdelay(1);
1005 /* Disable phy and wait */
1006 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
1007 mdelay(1);
1008 }
1009
1010 /*
1011 * Restore saved values
1012 */
1013 /*DCU/Antenna selection not available on 5210*/
1014 if (ah->ah_version != AR5K_AR5210) {
1015 ath5k_hw_reg_write(ah, s_seq, AR5K_QUEUE_DFS_SEQNUM(0));
1016 ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
1017 }
1018 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]);
1019 ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
1020 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
1021
1022 /*
1023 * Misc
1024 */
1025 /* XXX: add ah->aid once mac80211 gives this to us */
1026 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
1027
1028 ath5k_hw_set_opmode(ah);
1029 /*PISR/SISR Not available on 5210*/
1030 if (ah->ah_version != AR5K_AR5210) {
1031 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
1032 /* If we later allow tuning for this, store into sc structure */
1033 data = AR5K_TUNE_RSSI_THRES |
1034 AR5K_TUNE_BMISS_THRES << AR5K_RSSI_THR_BMISS_S;
1035 ath5k_hw_reg_write(ah, data, AR5K_RSSI_THR);
1036 }
1037
1038 /*
1039 * Set Rx/Tx DMA Configuration
1040 *
1041 * Set maximum DMA size (512) except for PCI-E cards since
1042 * it causes rx overruns and tx errors (tested on 5424 but since
1043 * rx overruns also occur on 5416/5418 with madwifi we set 128
1044 * for all PCI-E cards to be safe).
1045 *
1046 * In dumps this is 128 for allchips.
1047 *
1048 * XXX: need to check 5210 for this
1049 * TODO: Check out tx triger level, it's always 64 on dumps but I
1050 * guess we can tweak it and see how it goes ;-)
1051 */
1052 dma_size = (pdev->is_pcie) ? AR5K_DMASIZE_128B : AR5K_DMASIZE_512B;
1053 if (ah->ah_version != AR5K_AR5210) {
1054 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
1055 AR5K_TXCFG_SDMAMR, dma_size);
1056 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
1057 AR5K_RXCFG_SDMAMW, dma_size);
1058 }
1059
1060 /*
1061 * Enable the PHY and wait until completion
1062 */
1063 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
1064
1065 /*
1066 * On 5211+ read activation -> rx delay
1067 * and use it.
1068 */
1069 if (ah->ah_version != AR5K_AR5210) {
1070 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
1071 AR5K_PHY_RX_DELAY_M;
1072 data = (channel->hw_value & CHANNEL_CCK) ?
1073 ((data << 2) / 22) : (data / 10);
1074
1075 udelay(100 + (2 * data));
1076 data = 0;
1077 } else {
1078 mdelay(1);
1079 }
1080
1081 /*
1082 * Perform ADC test (?)
1083 */
1084 data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
1085 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
1086 for (i = 0; i <= 20; i++) {
1087 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
1088 break;
1089 udelay(200);
1090 }
1091 ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1);
1092 data = 0;
1093
1094 /*
1095 * Start automatic gain calibration
1096 *
1097 * During AGC calibration RX path is re-routed to
1098 * a signal detector so we don't receive anything.
1099 *
1100 * This method is used to calibrate some static offsets
1101 * used together with on-the fly I/Q calibration (the
1102 * one performed via ath5k_hw_phy_calibrate), that doesn't
1103 * interrupt rx path.
1104 *
1105 * If we are in a noisy environment AGC calibration may time
1106 * out.
1107 */
1108 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1109 AR5K_PHY_AGCCTL_CAL);
1110
1111 /* At the same time start I/Q calibration for QAM constellation
1112 * -no need for CCK- */
1113 ah->ah_calibration = false;
1114 if (!(mode == AR5K_MODE_11B)) {
1115 ah->ah_calibration = true;
1116 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
1117 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
1118 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
1119 AR5K_PHY_IQ_RUN);
1120 }
1121
1122 /* Wait for gain calibration to finish (we check for I/Q calibration
1123 * during ath5k_phy_calibrate) */
1124 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
1125 AR5K_PHY_AGCCTL_CAL, 0, false)) {
1126 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
1127 channel->center_freq);
1128 return -EAGAIN;
1129 }
1130
1131 /*
1132 * Start noise floor calibration
1133 *
1134 * If we run NF calibration before AGC, it always times out.
1135 * Binary HAL starts NF and AGC calibration at the same time
1136 * and only waits for AGC to finish. I believe that's wrong because
1137 * during NF calibration, rx path is also routed to a detector, so if
1138 * it doesn't finish we won't have RX.
1139 *
1140 * XXX: Find an interval that's OK for all cards...
1141 */
1142 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
1143 if (ret)
1144 return ret;
1145
1146 /*
1147 * Reset queues and start beacon timers at the end of the reset routine
1148 */
1149 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
1150 /*No QCU on 5210*/
1151 if (ah->ah_version != AR5K_AR5210)
1152 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(i), i);
1153
1154 ret = ath5k_hw_reset_tx_queue(ah, i);
1155 if (ret) {
1156 ATH5K_ERR(ah->ah_sc,
1157 "failed to reset TX queue #%d\n", i);
1158 return ret;
1159 }
1160 }
1161
1162 /* Pre-enable interrupts on 5211/5212*/
1163 if (ah->ah_version != AR5K_AR5210)
1164 ath5k_hw_set_intr(ah, AR5K_INT_RX | AR5K_INT_TX |
1165 AR5K_INT_FATAL);
1166
1167 /*
1168 * Set RF kill flags if supported by the device (read from the EEPROM)
1169 * Disable gpio_intr for now since it results system hang.
1170 * TODO: Handle this in ath5k_intr
1171 */
1172#if 0
1173 if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) {
1174 ath5k_hw_set_gpio_input(ah, 0);
1175 ah->ah_gpio[0] = ath5k_hw_get_gpio(ah, 0);
1176 if (ah->ah_gpio[0] == 0)
1177 ath5k_hw_set_gpio_intr(ah, 0, 1);
1178 else
1179 ath5k_hw_set_gpio_intr(ah, 0, 0);
1180 }
1181#endif
1182
1183 /*
1184 * Set the 32MHz reference clock on 5212 phy clock sleep register
1185 *
1186 * TODO: Find out how to switch to external 32Khz clock to save power
1187 */
1188 if (ah->ah_version == AR5K_AR5212) {
1189 ath5k_hw_reg_write(ah, AR5K_PHY_SCR_32MHZ, AR5K_PHY_SCR);
1190 ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
1191 ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ, AR5K_PHY_SCAL);
1192 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
1193 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
1194 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
1195
1196 data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
1197 data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
1198 0x00000f80 : 0x00001380 ;
1199 ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
1200 data = 0;
1201 }
1202
1203 if (ah->ah_version == AR5K_AR5212) {
1204 ath5k_hw_reg_write(ah, 0x000100aa, 0x8118);
1205 ath5k_hw_reg_write(ah, 0x00003210, 0x811c);
1206 ath5k_hw_reg_write(ah, 0x00000052, 0x8108);
1207 if (ah->ah_mac_srev >= AR5K_SREV_VER_AR2413)
1208 ath5k_hw_reg_write(ah, 0x00000004, 0x8120);
1209 }
1210
1211 /*
1212 * Disable beacons and reset the register
1213 */
1214 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE |
1215 AR5K_BEACON_RESET_TSF);
1216
1217 return 0;
1218}
1219
1220/*
1221 * Reset chipset
1222 */
1223static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
1224{
1225 int ret;
1226 u32 mask = val ? val : ~0U;
1227
1228 ATH5K_TRACE(ah->ah_sc);
1229
1230 /* Read-and-clear RX Descriptor Pointer*/
1231 ath5k_hw_reg_read(ah, AR5K_RXDP);
1232
1233 /*
1234 * Reset the device and wait until success
1235 */
1236 ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
1237
1238 /* Wait at least 128 PCI clocks */
1239 udelay(15);
1240
1241 if (ah->ah_version == AR5K_AR5210) {
1242 val &= AR5K_RESET_CTL_CHIP;
1243 mask &= AR5K_RESET_CTL_CHIP;
1244 } else {
1245 val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
1246 mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
1247 }
1248
1249 ret = ath5k_hw_register_timeout(ah, AR5K_RESET_CTL, mask, val, false);
1250
1251 /*
1252 * Reset configuration register (for hw byte-swap). Note that this
1253 * is only set for big endian. We do the necessary magic in
1254 * AR5K_INIT_CFG.
1255 */
1256 if ((val & AR5K_RESET_CTL_PCU) == 0)
1257 ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
1258
1259 return ret;
1260}
1261
1262/*
1263 * Power management functions
1264 */
1265
1266/*
1267 * Sleep control
1268 */
1269int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1270 bool set_chip, u16 sleep_duration)
1271{
1272 unsigned int i;
1273 u32 staid, data;
1274
1275 ATH5K_TRACE(ah->ah_sc);
1276 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
1277
1278 switch (mode) {
1279 case AR5K_PM_AUTO:
1280 staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
1281 /* fallthrough */
1282 case AR5K_PM_NETWORK_SLEEP:
1283 if (set_chip)
1284 ath5k_hw_reg_write(ah,
1285 AR5K_SLEEP_CTL_SLE_ALLOW |
1286 sleep_duration,
1287 AR5K_SLEEP_CTL);
1288
1289 staid |= AR5K_STA_ID1_PWR_SV;
1290 break;
1291
1292 case AR5K_PM_FULL_SLEEP:
1293 if (set_chip)
1294 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP,
1295 AR5K_SLEEP_CTL);
1296
1297 staid |= AR5K_STA_ID1_PWR_SV;
1298 break;
1299
1300 case AR5K_PM_AWAKE:
1301
1302 staid &= ~AR5K_STA_ID1_PWR_SV;
1303
1304 if (!set_chip)
1305 goto commit;
1306
1307 /* Preserve sleep duration */
1308 data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
1309 if( data & 0xffc00000 ){
1310 data = 0;
1311 } else {
1312 data = data & 0xfffcffff;
1313 }
1314
1315 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
1316 udelay(15);
1317
1318 for (i = 50; i > 0; i--) {
1319 /* Check if the chip did wake up */
1320 if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
1321 AR5K_PCICFG_SPWR_DN) == 0)
1322 break;
1323
1324 /* Wait a bit and retry */
1325 udelay(200);
1326 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
1327 }
1328
1329 /* Fail if the chip didn't wake up */
1330 if (i <= 0)
1331 return -EIO;
1332
1333 break;
1334
1335 default:
1336 return -EINVAL;
1337 }
1338
1339commit:
1340 ah->ah_power_mode = mode;
1341 ath5k_hw_reg_write(ah, staid, AR5K_STA_ID1);
1342
1343 return 0;
1344}
1345
1346/***********************\
1347 DMA Related Functions
1348\***********************/
1349
1350/*
1351 * Receive functions
1352 */
1353
1354/*
1355 * Start DMA receive
1356 */
1357void ath5k_hw_start_rx(struct ath5k_hw *ah)
1358{
1359 ATH5K_TRACE(ah->ah_sc);
1360 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
1361 ath5k_hw_reg_read(ah, AR5K_CR);
1362}
1363
1364/*
1365 * Stop DMA receive
1366 */
1367int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
1368{
1369 unsigned int i;
1370
1371 ATH5K_TRACE(ah->ah_sc);
1372 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
1373
1374 /*
1375 * It may take some time to disable the DMA receive unit
1376 */
1377 for (i = 2000; i > 0 &&
1378 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
1379 i--)
1380 udelay(10);
1381
1382 return i ? 0 : -EBUSY;
1383}
1384
1385/*
1386 * Get the address of the RX Descriptor
1387 */
1388u32 ath5k_hw_get_rx_buf(struct ath5k_hw *ah)
1389{
1390 return ath5k_hw_reg_read(ah, AR5K_RXDP);
1391}
1392
1393/*
1394 * Set the address of the RX Descriptor
1395 */
1396void ath5k_hw_put_rx_buf(struct ath5k_hw *ah, u32 phys_addr)
1397{
1398 ATH5K_TRACE(ah->ah_sc);
1399
1400 /*TODO:Shouldn't we check if RX is enabled first ?*/
1401 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
1402}
1403
1404/*
1405 * Transmit functions
1406 */
1407
1408/*
1409 * Start DMA transmit for a specific queue
1410 * (see also QCU/DCU functions)
1411 */
1412int ath5k_hw_tx_start(struct ath5k_hw *ah, unsigned int queue)
1413{
1414 u32 tx_queue;
1415
1416 ATH5K_TRACE(ah->ah_sc);
1417 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
1418
1419 /* Return if queue is declared inactive */
1420 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
1421 return -EIO;
1422
1423 if (ah->ah_version == AR5K_AR5210) {
1424 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
1425
1426 /*
1427 * Set the queue by type on 5210
1428 */
1429 switch (ah->ah_txq[queue].tqi_type) {
1430 case AR5K_TX_QUEUE_DATA:
1431 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
1432 break;
1433 case AR5K_TX_QUEUE_BEACON:
1434 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
1435 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
1436 AR5K_BSR);
1437 break;
1438 case AR5K_TX_QUEUE_CAB:
1439 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
1440 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
1441 AR5K_BCR_BDMAE, AR5K_BSR);
1442 break;
1443 default:
1444 return -EINVAL;
1445 }
1446 /* Start queue */
1447 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
1448 ath5k_hw_reg_read(ah, AR5K_CR);
1449 } else {
1450 /* Return if queue is disabled */
1451 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
1452 return -EIO;
1453
1454 /* Start queue */
1455 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
1456 }
1457
1458 return 0;
1459}
1460
1461/*
1462 * Stop DMA transmit for a specific queue
1463 * (see also QCU/DCU functions)
1464 */
1465int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
1466{
1467 unsigned int i = 100;
1468 u32 tx_queue, pending;
1469
1470 ATH5K_TRACE(ah->ah_sc);
1471 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
1472
1473 /* Return if queue is declared inactive */
1474 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
1475 return -EIO;
1476
1477 if (ah->ah_version == AR5K_AR5210) {
1478 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
1479
1480 /*
1481 * Set by queue type
1482 */
1483 switch (ah->ah_txq[queue].tqi_type) {
1484 case AR5K_TX_QUEUE_DATA:
1485 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
1486 break;
1487 case AR5K_TX_QUEUE_BEACON:
1488 case AR5K_TX_QUEUE_CAB:
1489 /* XXX Fix me... */
1490 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
1491 ath5k_hw_reg_write(ah, 0, AR5K_BSR);
1492 break;
1493 default:
1494 return -EINVAL;
1495 }
1496
1497 /* Stop queue */
1498 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
1499 ath5k_hw_reg_read(ah, AR5K_CR);
1500 } else {
1501 /*
1502 * Schedule TX disable and wait until queue is empty
1503 */
1504 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
1505
1506 /*Check for pending frames*/
1507 do {
1508 pending = ath5k_hw_reg_read(ah,
1509 AR5K_QUEUE_STATUS(queue)) &
1510 AR5K_QCU_STS_FRMPENDCNT;
1511 udelay(100);
1512 } while (--i && pending);
1513
1514 /* Clear register */
1515 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
1516 if (pending)
1517 return -EBUSY;
1518 }
1519
1520 /* TODO: Check for success else return error */
1521 return 0;
1522}
1523
1524/*
1525 * Get the address of the TX Descriptor for a specific queue
1526 * (see also QCU/DCU functions)
1527 */
1528u32 ath5k_hw_get_tx_buf(struct ath5k_hw *ah, unsigned int queue)
1529{
1530 u16 tx_reg;
1531
1532 ATH5K_TRACE(ah->ah_sc);
1533 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
1534
1535 /*
1536 * Get the transmit queue descriptor pointer from the selected queue
1537 */
1538 /*5210 doesn't have QCU*/
1539 if (ah->ah_version == AR5K_AR5210) {
1540 switch (ah->ah_txq[queue].tqi_type) {
1541 case AR5K_TX_QUEUE_DATA:
1542 tx_reg = AR5K_NOQCU_TXDP0;
1543 break;
1544 case AR5K_TX_QUEUE_BEACON:
1545 case AR5K_TX_QUEUE_CAB:
1546 tx_reg = AR5K_NOQCU_TXDP1;
1547 break;
1548 default:
1549 return 0xffffffff;
1550 }
1551 } else {
1552 tx_reg = AR5K_QUEUE_TXDP(queue);
1553 }
1554
1555 return ath5k_hw_reg_read(ah, tx_reg);
1556}
1557
1558/*
1559 * Set the address of the TX Descriptor for a specific queue
1560 * (see also QCU/DCU functions)
1561 */
1562int ath5k_hw_put_tx_buf(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
1563{
1564 u16 tx_reg;
1565
1566 ATH5K_TRACE(ah->ah_sc);
1567 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
1568
1569 /*
1570 * Set the transmit queue descriptor pointer register by type
1571 * on 5210
1572 */
1573 if (ah->ah_version == AR5K_AR5210) {
1574 switch (ah->ah_txq[queue].tqi_type) {
1575 case AR5K_TX_QUEUE_DATA:
1576 tx_reg = AR5K_NOQCU_TXDP0;
1577 break;
1578 case AR5K_TX_QUEUE_BEACON:
1579 case AR5K_TX_QUEUE_CAB:
1580 tx_reg = AR5K_NOQCU_TXDP1;
1581 break;
1582 default:
1583 return -EINVAL;
1584 }
1585 } else {
1586 /*
1587 * Set the transmit queue descriptor pointer for
1588 * the selected queue on QCU for 5211+
1589 * (this won't work if the queue is still active)
1590 */
1591 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
1592 return -EIO;
1593
1594 tx_reg = AR5K_QUEUE_TXDP(queue);
1595 }
1596
1597 /* Set descriptor pointer */
1598 ath5k_hw_reg_write(ah, phys_addr, tx_reg);
1599
1600 return 0;
1601}
1602
1603/*
1604 * Update tx trigger level
1605 */
1606int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
1607{
1608 u32 trigger_level, imr;
1609 int ret = -EIO;
1610
1611 ATH5K_TRACE(ah->ah_sc);
1612
1613 /*
1614 * Disable interrupts by setting the mask
1615 */
1616 imr = ath5k_hw_set_intr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
1617
1618 /*TODO: Boundary check on trigger_level*/
1619 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
1620 AR5K_TXCFG_TXFULL);
1621
1622 if (!increase) {
1623 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
1624 goto done;
1625 } else
1626 trigger_level +=
1627 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
1628
1629 /*
1630 * Update trigger level on success
1631 */
1632 if (ah->ah_version == AR5K_AR5210)
1633 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
1634 else
1635 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
1636 AR5K_TXCFG_TXFULL, trigger_level);
1637
1638 ret = 0;
1639
1640done:
1641 /*
1642 * Restore interrupt mask
1643 */
1644 ath5k_hw_set_intr(ah, imr);
1645
1646 return ret;
1647}
1648
1649/*
1650 * Interrupt handling
1651 */
1652
1653/*
1654 * Check if we have pending interrupts
1655 */
1656bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
1657{
1658 ATH5K_TRACE(ah->ah_sc);
1659 return ath5k_hw_reg_read(ah, AR5K_INTPEND);
1660}
1661
1662/*
1663 * Get interrupt mask (ISR)
1664 */
1665int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
1666{
1667 u32 data;
1668
1669 ATH5K_TRACE(ah->ah_sc);
1670
1671 /*
1672 * Read interrupt status from the Interrupt Status register
1673 * on 5210
1674 */
1675 if (ah->ah_version == AR5K_AR5210) {
1676 data = ath5k_hw_reg_read(ah, AR5K_ISR);
1677 if (unlikely(data == AR5K_INT_NOCARD)) {
1678 *interrupt_mask = data;
1679 return -ENODEV;
1680 }
1681 } else {
1682 /*
1683 * Read interrupt status from the Read-And-Clear shadow register
1684 * Note: PISR/SISR Not available on 5210
1685 */
1686 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
1687 }
1688
1689 /*
1690 * Get abstract interrupt mask (driver-compatible)
1691 */
1692 *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
1693
1694 if (unlikely(data == AR5K_INT_NOCARD))
1695 return -ENODEV;
1696
1697 if (data & (AR5K_ISR_RXOK | AR5K_ISR_RXERR))
1698 *interrupt_mask |= AR5K_INT_RX;
1699
1700 if (data & (AR5K_ISR_TXOK | AR5K_ISR_TXERR
1701 | AR5K_ISR_TXDESC | AR5K_ISR_TXEOL))
1702 *interrupt_mask |= AR5K_INT_TX;
1703
1704 if (ah->ah_version != AR5K_AR5210) {
1705 /*HIU = Host Interface Unit (PCI etc)*/
1706 if (unlikely(data & (AR5K_ISR_HIUERR)))
1707 *interrupt_mask |= AR5K_INT_FATAL;
1708
1709 /*Beacon Not Ready*/
1710 if (unlikely(data & (AR5K_ISR_BNR)))
1711 *interrupt_mask |= AR5K_INT_BNR;
1712 }
1713
1714 /*
1715 * XXX: BMISS interrupts may occur after association.
1716 * I found this on 5210 code but it needs testing. If this is
1717 * true we should disable them before assoc and re-enable them
1718 * after a successfull assoc + some jiffies.
1719 */
1720#if 0
1721 interrupt_mask &= ~AR5K_INT_BMISS;
1722#endif
1723
1724 /*
1725 * In case we didn't handle anything,
1726 * print the register value.
1727 */
1728 if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
1729 ATH5K_PRINTF("0x%08x\n", data);
1730
1731 return 0;
1732}
1733
1734/*
1735 * Set interrupt mask
1736 */
1737enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask)
1738{
1739 enum ath5k_int old_mask, int_mask;
1740
1741 /*
1742 * Disable card interrupts to prevent any race conditions
1743 * (they will be re-enabled afterwards).
1744 */
1745 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
1746 ath5k_hw_reg_read(ah, AR5K_IER);
1747
1748 old_mask = ah->ah_imr;
1749
1750 /*
1751 * Add additional, chipset-dependent interrupt mask flags
1752 * and write them to the IMR (interrupt mask register).
1753 */
1754 int_mask = new_mask & AR5K_INT_COMMON;
1755
1756 if (new_mask & AR5K_INT_RX)
1757 int_mask |= AR5K_IMR_RXOK | AR5K_IMR_RXERR | AR5K_IMR_RXORN |
1758 AR5K_IMR_RXDESC;
1759
1760 if (new_mask & AR5K_INT_TX)
1761 int_mask |= AR5K_IMR_TXOK | AR5K_IMR_TXERR | AR5K_IMR_TXDESC |
1762 AR5K_IMR_TXURN;
1763
1764 if (ah->ah_version != AR5K_AR5210) {
1765 if (new_mask & AR5K_INT_FATAL) {
1766 int_mask |= AR5K_IMR_HIUERR;
1767 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_MCABT |
1768 AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR);
1769 }
1770 }
1771
1772 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
1773
1774 /* Store new interrupt mask */
1775 ah->ah_imr = new_mask;
1776
1777 /* ..re-enable interrupts */
1778 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
1779 ath5k_hw_reg_read(ah, AR5K_IER);
1780
1781 return old_mask;
1782}
1783
1784
1785/*************************\
1786 EEPROM access functions
1787\*************************/
1788
1789/*
1790 * Read from eeprom
1791 */
1792static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
1793{
1794 u32 status, timeout;
1795
1796 ATH5K_TRACE(ah->ah_sc);
1797 /*
1798 * Initialize EEPROM access
1799 */
1800 if (ah->ah_version == AR5K_AR5210) {
1801 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
1802 (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
1803 } else {
1804 ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
1805 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
1806 AR5K_EEPROM_CMD_READ);
1807 }
1808
1809 for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
1810 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
1811 if (status & AR5K_EEPROM_STAT_RDDONE) {
1812 if (status & AR5K_EEPROM_STAT_RDERR)
1813 return -EIO;
1814 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
1815 0xffff);
1816 return 0;
1817 }
1818 udelay(15);
1819 }
1820
1821 return -ETIMEDOUT;
1822}
1823
1824/*
1825 * Write to eeprom - currently disabled, use at your own risk
1826 */
1827#if 0
1828static int ath5k_hw_eeprom_write(struct ath5k_hw *ah, u32 offset, u16 data)
1829{
1830
1831 u32 status, timeout;
1832
1833 ATH5K_TRACE(ah->ah_sc);
1834
1835 /*
1836 * Initialize eeprom access
1837 */
1838
1839 if (ah->ah_version == AR5K_AR5210) {
1840 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
1841 } else {
1842 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
1843 AR5K_EEPROM_CMD_RESET);
1844 }
1845
1846 /*
1847 * Write data to data register
1848 */
1849
1850 if (ah->ah_version == AR5K_AR5210) {
1851 ath5k_hw_reg_write(ah, data, AR5K_EEPROM_BASE + (4 * offset));
1852 } else {
1853 ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
1854 ath5k_hw_reg_write(ah, data, AR5K_EEPROM_DATA);
1855 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
1856 AR5K_EEPROM_CMD_WRITE);
1857 }
1858
1859 /*
1860 * Check status
1861 */
1862
1863 for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
1864 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
1865 if (status & AR5K_EEPROM_STAT_WRDONE) {
1866 if (status & AR5K_EEPROM_STAT_WRERR)
1867 return EIO;
1868 return 0;
1869 }
1870 udelay(15);
1871 }
1872
1873 ATH5K_ERR(ah->ah_sc, "EEPROM Write is disabled!");
1874 return -EIO;
1875}
1876#endif
1877
1878/*
1879 * Translate binary channel representation in EEPROM to frequency
1880 */
1881static u16 ath5k_eeprom_bin2freq(struct ath5k_hw *ah, u16 bin, unsigned int mode)
1882{
1883 u16 val;
1884
1885 if (bin == AR5K_EEPROM_CHANNEL_DIS)
1886 return bin;
1887
1888 if (mode == AR5K_EEPROM_MODE_11A) {
1889 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
1890 val = (5 * bin) + 4800;
1891 else
1892 val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 :
1893 (bin * 10) + 5100;
1894 } else {
1895 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2)
1896 val = bin + 2300;
1897 else
1898 val = bin + 2400;
1899 }
1900
1901 return val;
1902}
1903
1904/*
1905 * Read antenna infos from eeprom
1906 */
1907static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
1908 unsigned int mode)
1909{
1910 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1911 u32 o = *offset;
1912 u16 val;
1913 int ret, i = 0;
1914
1915 AR5K_EEPROM_READ(o++, val);
1916 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
1917 ee->ee_ant_tx_rx[mode] = (val >> 2) & 0x3f;
1918 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
1919
1920 AR5K_EEPROM_READ(o++, val);
1921 ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
1922 ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
1923 ee->ee_ant_control[mode][i++] = val & 0x3f;
1924
1925 AR5K_EEPROM_READ(o++, val);
1926 ee->ee_ant_control[mode][i++] = (val >> 10) & 0x3f;
1927 ee->ee_ant_control[mode][i++] = (val >> 4) & 0x3f;
1928 ee->ee_ant_control[mode][i] = (val << 2) & 0x3f;
1929
1930 AR5K_EEPROM_READ(o++, val);
1931 ee->ee_ant_control[mode][i++] |= (val >> 14) & 0x3;
1932 ee->ee_ant_control[mode][i++] = (val >> 8) & 0x3f;
1933 ee->ee_ant_control[mode][i++] = (val >> 2) & 0x3f;
1934 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
1935
1936 AR5K_EEPROM_READ(o++, val);
1937 ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
1938 ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
1939 ee->ee_ant_control[mode][i++] = val & 0x3f;
1940
1941 /* Get antenna modes */
1942 ah->ah_antenna[mode][0] =
1943 (ee->ee_ant_control[mode][0] << 4) | 0x1;
1944 ah->ah_antenna[mode][AR5K_ANT_FIXED_A] =
1945 ee->ee_ant_control[mode][1] |
1946 (ee->ee_ant_control[mode][2] << 6) |
1947 (ee->ee_ant_control[mode][3] << 12) |
1948 (ee->ee_ant_control[mode][4] << 18) |
1949 (ee->ee_ant_control[mode][5] << 24);
1950 ah->ah_antenna[mode][AR5K_ANT_FIXED_B] =
1951 ee->ee_ant_control[mode][6] |
1952 (ee->ee_ant_control[mode][7] << 6) |
1953 (ee->ee_ant_control[mode][8] << 12) |
1954 (ee->ee_ant_control[mode][9] << 18) |
1955 (ee->ee_ant_control[mode][10] << 24);
1956
1957 /* return new offset */
1958 *offset = o;
1959
1960 return 0;
1961}
1962
1963/*
1964 * Read supported modes from eeprom
1965 */
1966static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
1967 unsigned int mode)
1968{
1969 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1970 u32 o = *offset;
1971 u16 val;
1972 int ret;
1973
1974 AR5K_EEPROM_READ(o++, val);
1975 ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff;
1976 ee->ee_thr_62[mode] = val & 0xff;
1977
1978 if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
1979 ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28;
1980
1981 AR5K_EEPROM_READ(o++, val);
1982 ee->ee_tx_end2xpa_disable[mode] = (val >> 8) & 0xff;
1983 ee->ee_tx_frm2xpa_enable[mode] = val & 0xff;
1984
1985 AR5K_EEPROM_READ(o++, val);
1986 ee->ee_pga_desired_size[mode] = (val >> 8) & 0xff;
1987
1988 if ((val & 0xff) & 0x80)
1989 ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1);
1990 else
1991 ee->ee_noise_floor_thr[mode] = val & 0xff;
1992
1993 if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
1994 ee->ee_noise_floor_thr[mode] =
1995 mode == AR5K_EEPROM_MODE_11A ? -54 : -1;
1996
1997 AR5K_EEPROM_READ(o++, val);
1998 ee->ee_xlna_gain[mode] = (val >> 5) & 0xff;
1999 ee->ee_x_gain[mode] = (val >> 1) & 0xf;
2000 ee->ee_xpd[mode] = val & 0x1;
2001
2002 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0)
2003 ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
2004
2005 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
2006 AR5K_EEPROM_READ(o++, val);
2007 ee->ee_false_detect[mode] = (val >> 6) & 0x7f;
2008
2009 if (mode == AR5K_EEPROM_MODE_11A)
2010 ee->ee_xr_power[mode] = val & 0x3f;
2011 else {
2012 ee->ee_ob[mode][0] = val & 0x7;
2013 ee->ee_db[mode][0] = (val >> 3) & 0x7;
2014 }
2015 }
2016
2017 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) {
2018 ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN;
2019 ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA;
2020 } else {
2021 ee->ee_i_gain[mode] = (val >> 13) & 0x7;
2022
2023 AR5K_EEPROM_READ(o++, val);
2024 ee->ee_i_gain[mode] |= (val << 3) & 0x38;
2025
2026 if (mode == AR5K_EEPROM_MODE_11G)
2027 ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff;
2028 }
2029
2030 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
2031 mode == AR5K_EEPROM_MODE_11A) {
2032 ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
2033 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
2034 }
2035
2036 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6 &&
2037 mode == AR5K_EEPROM_MODE_11G)
2038 ee->ee_scaled_cck_delta = (val >> 11) & 0x1f;
2039
2040 /* return new offset */
2041 *offset = o;
2042
2043 return 0;
2044}
2045
2046/*
2047 * Initialize eeprom & capabilities structs
2048 */
2049static int ath5k_eeprom_init(struct ath5k_hw *ah)
2050{
2051 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
2052 unsigned int mode, i;
2053 int ret;
2054 u32 offset;
2055 u16 val;
2056
2057 /* Initial TX thermal adjustment values */
2058 ee->ee_tx_clip = 4;
2059 ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
2060 ee->ee_gain_select = 1;
2061
2062 /*
2063 * Read values from EEPROM and store them in the capability structure
2064 */
2065 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
2066 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
2067 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
2068 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
2069 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
2070
2071 /* Return if we have an old EEPROM */
2072 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
2073 return 0;
2074
2075#ifdef notyet
2076 /*
2077 * Validate the checksum of the EEPROM date. There are some
2078 * devices with invalid EEPROMs.
2079 */
2080 for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
2081 AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
2082 cksum ^= val;
2083 }
2084 if (cksum != AR5K_EEPROM_INFO_CKSUM) {
2085 ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
2086 return -EIO;
2087 }
2088#endif
2089
2090 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
2091 ee_ant_gain);
2092
2093 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
2094 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
2095 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
2096 }
2097
2098 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
2099 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val);
2100 ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7;
2101 ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
2102
2103 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val);
2104 ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7;
2105 ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
2106 }
2107
2108 /*
2109 * Get conformance test limit values
2110 */
2111 offset = AR5K_EEPROM_CTL(ah->ah_ee_version);
2112 ee->ee_ctls = AR5K_EEPROM_N_CTLS(ah->ah_ee_version);
2113
2114 for (i = 0; i < ee->ee_ctls; i++) {
2115 AR5K_EEPROM_READ(offset++, val);
2116 ee->ee_ctl[i] = (val >> 8) & 0xff;
2117 ee->ee_ctl[i + 1] = val & 0xff;
2118 }
2119
2120 /*
2121 * Get values for 802.11a (5GHz)
2122 */
2123 mode = AR5K_EEPROM_MODE_11A;
2124
2125 ee->ee_turbo_max_power[mode] =
2126 AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header);
2127
2128 offset = AR5K_EEPROM_MODES_11A(ah->ah_ee_version);
2129
2130 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
2131 if (ret)
2132 return ret;
2133
2134 AR5K_EEPROM_READ(offset++, val);
2135 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
2136 ee->ee_ob[mode][3] = (val >> 5) & 0x7;
2137 ee->ee_db[mode][3] = (val >> 2) & 0x7;
2138 ee->ee_ob[mode][2] = (val << 1) & 0x7;
2139
2140 AR5K_EEPROM_READ(offset++, val);
2141 ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
2142 ee->ee_db[mode][2] = (val >> 12) & 0x7;
2143 ee->ee_ob[mode][1] = (val >> 9) & 0x7;
2144 ee->ee_db[mode][1] = (val >> 6) & 0x7;
2145 ee->ee_ob[mode][0] = (val >> 3) & 0x7;
2146 ee->ee_db[mode][0] = val & 0x7;
2147
2148 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
2149 if (ret)
2150 return ret;
2151
2152 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) {
2153 AR5K_EEPROM_READ(offset++, val);
2154 ee->ee_margin_tx_rx[mode] = val & 0x3f;
2155 }
2156
2157 /*
2158 * Get values for 802.11b (2.4GHz)
2159 */
2160 mode = AR5K_EEPROM_MODE_11B;
2161 offset = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
2162
2163 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
2164 if (ret)
2165 return ret;
2166
2167 AR5K_EEPROM_READ(offset++, val);
2168 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
2169 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
2170 ee->ee_db[mode][1] = val & 0x7;
2171
2172 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
2173 if (ret)
2174 return ret;
2175
2176 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
2177 AR5K_EEPROM_READ(offset++, val);
2178 ee->ee_cal_pier[mode][0] =
2179 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
2180 ee->ee_cal_pier[mode][1] =
2181 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
2182
2183 AR5K_EEPROM_READ(offset++, val);
2184 ee->ee_cal_pier[mode][2] =
2185 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
2186 }
2187
2188 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
2189 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
2190
2191 /*
2192 * Get values for 802.11g (2.4GHz)
2193 */
2194 mode = AR5K_EEPROM_MODE_11G;
2195 offset = AR5K_EEPROM_MODES_11G(ah->ah_ee_version);
2196
2197 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
2198 if (ret)
2199 return ret;
2200
2201 AR5K_EEPROM_READ(offset++, val);
2202 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
2203 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
2204 ee->ee_db[mode][1] = val & 0x7;
2205
2206 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
2207 if (ret)
2208 return ret;
2209
2210 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
2211 AR5K_EEPROM_READ(offset++, val);
2212 ee->ee_cal_pier[mode][0] =
2213 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
2214 ee->ee_cal_pier[mode][1] =
2215 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
2216
2217 AR5K_EEPROM_READ(offset++, val);
2218 ee->ee_turbo_max_power[mode] = val & 0x7f;
2219 ee->ee_xr_power[mode] = (val >> 7) & 0x3f;
2220
2221 AR5K_EEPROM_READ(offset++, val);
2222 ee->ee_cal_pier[mode][2] =
2223 ath5k_eeprom_bin2freq(ah, val & 0xff, mode);
2224
2225 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
2226 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
2227
2228 AR5K_EEPROM_READ(offset++, val);
2229 ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
2230 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
2231
2232 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
2233 AR5K_EEPROM_READ(offset++, val);
2234 ee->ee_cck_ofdm_gain_delta = val & 0xff;
2235 }
2236 }
2237
2238 /*
2239 * Read 5GHz EEPROM channels
2240 */
2241
2242 return 0;
2243}
2244
2245/*
2246 * Read the MAC address from eeprom
2247 */
2248static int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
2249{
2250 u8 mac_d[ETH_ALEN];
2251 u32 total, offset;
2252 u16 data;
2253 int octet, ret;
2254
2255 memset(mac, 0, ETH_ALEN);
2256 memset(mac_d, 0, ETH_ALEN);
2257
2258 ret = ath5k_hw_eeprom_read(ah, 0x20, &data);
2259 if (ret)
2260 return ret;
2261
2262 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
2263 ret = ath5k_hw_eeprom_read(ah, offset, &data);
2264 if (ret)
2265 return ret;
2266
2267 total += data;
2268 mac_d[octet + 1] = data & 0xff;
2269 mac_d[octet] = data >> 8;
2270 octet += 2;
2271 }
2272
2273 memcpy(mac, mac_d, ETH_ALEN);
2274
2275 if (!total || total == 3 * 0xffff)
2276 return -EINVAL;
2277
2278 return 0;
2279}
2280
2281/*
2282 * Fill the capabilities struct
2283 */
2284static int ath5k_hw_get_capabilities(struct ath5k_hw *ah)
2285{
2286 u16 ee_header;
2287
2288 ATH5K_TRACE(ah->ah_sc);
2289 /* Capabilities stored in the EEPROM */
2290 ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
2291
2292 if (ah->ah_version == AR5K_AR5210) {
2293 /*
2294 * Set radio capabilities
2295 * (The AR5110 only supports the middle 5GHz band)
2296 */
2297 ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
2298 ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
2299 ah->ah_capabilities.cap_range.range_2ghz_min = 0;
2300 ah->ah_capabilities.cap_range.range_2ghz_max = 0;
2301
2302 /* Set supported modes */
2303 __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
2304 __set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode);
2305 } else {
2306 /*
2307 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
2308 * XXX and from 2312 to 2732GHz. There are problems with the
2309 * XXX current ieee80211 implementation because the IEEE
2310 * XXX channel mapping does not support negative channel
2311 * XXX numbers (2312MHz is channel -19). Of course, this
2312 * XXX doesn't matter because these channels are out of range
2313 * XXX but some regulation domains like MKK (Japan) will
2314 * XXX support frequencies somewhere around 4.8GHz.
2315 */
2316
2317 /*
2318 * Set radio capabilities
2319 */
2320
2321 if (AR5K_EEPROM_HDR_11A(ee_header)) {
2322 ah->ah_capabilities.cap_range.range_5ghz_min = 5005; /* 4920 */
2323 ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
2324
2325 /* Set supported modes */
2326 __set_bit(AR5K_MODE_11A,
2327 ah->ah_capabilities.cap_mode);
2328 __set_bit(AR5K_MODE_11A_TURBO,
2329 ah->ah_capabilities.cap_mode);
2330 if (ah->ah_version == AR5K_AR5212)
2331 __set_bit(AR5K_MODE_11G_TURBO,
2332 ah->ah_capabilities.cap_mode);
2333 }
2334
2335 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is
2336 * connected */
2337 if (AR5K_EEPROM_HDR_11B(ee_header) ||
2338 AR5K_EEPROM_HDR_11G(ee_header)) {
2339 ah->ah_capabilities.cap_range.range_2ghz_min = 2412; /* 2312 */
2340 ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
2341
2342 if (AR5K_EEPROM_HDR_11B(ee_header))
2343 __set_bit(AR5K_MODE_11B,
2344 ah->ah_capabilities.cap_mode);
2345
2346 if (AR5K_EEPROM_HDR_11G(ee_header))
2347 __set_bit(AR5K_MODE_11G,
2348 ah->ah_capabilities.cap_mode);
2349 }
2350 }
2351
2352 /* GPIO */
2353 ah->ah_gpio_npins = AR5K_NUM_GPIO;
2354
2355 /* Set number of supported TX queues */
2356 if (ah->ah_version == AR5K_AR5210)
2357 ah->ah_capabilities.cap_queues.q_tx_num =
2358 AR5K_NUM_TX_QUEUES_NOQCU;
2359 else
2360 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
2361
2362 return 0;
2363}
2364
2365/*********************************\
2366 Protocol Control Unit Functions
2367\*********************************/
2368
2369/*
2370 * Set Operation mode
2371 */
2372int ath5k_hw_set_opmode(struct ath5k_hw *ah)
2373{
2374 u32 pcu_reg, beacon_reg, low_id, high_id;
2375
2376 pcu_reg = 0;
2377 beacon_reg = 0;
2378
2379 ATH5K_TRACE(ah->ah_sc);
2380
2381 switch (ah->ah_op_mode) {
2382 case IEEE80211_IF_TYPE_IBSS:
2383 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_DESC_ANTENNA |
2384 (ah->ah_version == AR5K_AR5210 ?
2385 AR5K_STA_ID1_NO_PSPOLL : 0);
2386 beacon_reg |= AR5K_BCR_ADHOC;
2387 break;
2388
2389 case IEEE80211_IF_TYPE_AP:
2390 pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_RTS_DEF_ANTENNA |
2391 (ah->ah_version == AR5K_AR5210 ?
2392 AR5K_STA_ID1_NO_PSPOLL : 0);
2393 beacon_reg |= AR5K_BCR_AP;
2394 break;
2395
2396 case IEEE80211_IF_TYPE_STA:
2397 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
2398 (ah->ah_version == AR5K_AR5210 ?
2399 AR5K_STA_ID1_PWR_SV : 0);
2400 case IEEE80211_IF_TYPE_MNTR:
2401 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
2402 (ah->ah_version == AR5K_AR5210 ?
2403 AR5K_STA_ID1_NO_PSPOLL : 0);
2404 break;
2405
2406 default:
2407 return -EINVAL;
2408 }
2409
2410 /*
2411 * Set PCU registers
2412 */
2413 low_id = AR5K_LOW_ID(ah->ah_sta_id);
2414 high_id = AR5K_HIGH_ID(ah->ah_sta_id);
2415 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
2416 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
2417
2418 /*
2419 * Set Beacon Control Register on 5210
2420 */
2421 if (ah->ah_version == AR5K_AR5210)
2422 ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
2423
2424 return 0;
2425}
2426
2427/*
2428 * BSSID Functions
2429 */
2430
2431/*
2432 * Get station id
2433 */
2434void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
2435{
2436 ATH5K_TRACE(ah->ah_sc);
2437 memcpy(mac, ah->ah_sta_id, ETH_ALEN);
2438}
2439
2440/*
2441 * Set station id
2442 */
2443int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
2444{
2445 u32 low_id, high_id;
2446
2447 ATH5K_TRACE(ah->ah_sc);
2448 /* Set new station ID */
2449 memcpy(ah->ah_sta_id, mac, ETH_ALEN);
2450
2451 low_id = AR5K_LOW_ID(mac);
2452 high_id = AR5K_HIGH_ID(mac);
2453
2454 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
2455 ath5k_hw_reg_write(ah, high_id, AR5K_STA_ID1);
2456
2457 return 0;
2458}
2459
2460/*
2461 * Set BSSID
2462 */
2463void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
2464{
2465 u32 low_id, high_id;
2466 u16 tim_offset = 0;
2467
2468 /*
2469 * Set simple BSSID mask on 5212
2470 */
2471 if (ah->ah_version == AR5K_AR5212) {
2472 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM0);
2473 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM1);
2474 }
2475
2476 /*
2477 * Set BSSID which triggers the "SME Join" operation
2478 */
2479 low_id = AR5K_LOW_ID(bssid);
2480 high_id = AR5K_HIGH_ID(bssid);
2481 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0);
2482 ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) <<
2483 AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1);
2484
2485 if (assoc_id == 0) {
2486 ath5k_hw_disable_pspoll(ah);
2487 return;
2488 }
2489
2490 AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
2491 tim_offset ? tim_offset + 4 : 0);
2492
2493 ath5k_hw_enable_pspoll(ah, NULL, 0);
2494}
2495/**
2496 * ath5k_hw_set_bssid_mask - set common bits we should listen to
2497 *
2498 * The bssid_mask is a utility used by AR5212 hardware to inform the hardware
2499 * which bits of the interface's MAC address should be looked at when trying
2500 * to decide which packets to ACK. In station mode every bit matters. In AP
2501 * mode with a single BSS every bit matters as well. In AP mode with
2502 * multiple BSSes not every bit matters.
2503 *
2504 * @ah: the &struct ath5k_hw
2505 * @mask: the bssid_mask, a u8 array of size ETH_ALEN
2506 *
2507 * Note that this is a simple filter and *does* not filter out all
2508 * relevant frames. Some non-relevant frames will get through, probability
2509 * jocks are welcomed to compute.
2510 *
2511 * When handling multiple BSSes (or VAPs) you can get the BSSID mask by
2512 * computing the set of:
2513 *
2514 * ~ ( MAC XOR BSSID )
2515 *
2516 * When you do this you are essentially computing the common bits. Later it
2517 * is assumed the harware will "and" (&) the BSSID mask with the MAC address
2518 * to obtain the relevant bits which should match on the destination frame.
2519 *
2520 * Simple example: on your card you have have two BSSes you have created with
2521 * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
2522 * There is another BSSID-03 but you are not part of it. For simplicity's sake,
2523 * assuming only 4 bits for a mac address and for BSSIDs you can then have:
2524 *
2525 * \
2526 * MAC: 0001 |
2527 * BSSID-01: 0100 | --> Belongs to us
2528 * BSSID-02: 1001 |
2529 * /
2530 * -------------------
2531 * BSSID-03: 0110 | --> External
2532 * -------------------
2533 *
2534 * Our bssid_mask would then be:
2535 *
2536 * On loop iteration for BSSID-01:
2537 * ~(0001 ^ 0100) -> ~(0101)
2538 * -> 1010
2539 * bssid_mask = 1010
2540 *
2541 * On loop iteration for BSSID-02:
2542 * bssid_mask &= ~(0001 ^ 1001)
2543 * bssid_mask = (1010) & ~(0001 ^ 1001)
2544 * bssid_mask = (1010) & ~(1001)
2545 * bssid_mask = (1010) & (0110)
2546 * bssid_mask = 0010
2547 *
2548 * A bssid_mask of 0010 means "only pay attention to the second least
2549 * significant bit". This is because its the only bit common
2550 * amongst the MAC and all BSSIDs we support. To findout what the real
2551 * common bit is we can simply "&" the bssid_mask now with any BSSID we have
2552 * or our MAC address (we assume the hardware uses the MAC address).
2553 *
2554 * Now, suppose there's an incoming frame for BSSID-03:
2555 *
2556 * IFRAME-01: 0110
2557 *
2558 * An easy eye-inspeciton of this already should tell you that this frame
2559 * will not pass our check. This is beacuse the bssid_mask tells the
2560 * hardware to only look at the second least significant bit and the
2561 * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
2562 * as 1, which does not match 0.
2563 *
2564 * So with IFRAME-01 we *assume* the hardware will do:
2565 *
2566 * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
2567 * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
2568 * --> allow = (0010) == 0000 ? 1 : 0;
2569 * --> allow = 0
2570 *
2571 * Lets now test a frame that should work:
2572 *
2573 * IFRAME-02: 0001 (we should allow)
2574 *
2575 * allow = (0001 & 1010) == 1010
2576 *
2577 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
2578 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
2579 * --> allow = (0010) == (0010)
2580 * --> allow = 1
2581 *
2582 * Other examples:
2583 *
2584 * IFRAME-03: 0100 --> allowed
2585 * IFRAME-04: 1001 --> allowed
2586 * IFRAME-05: 1101 --> allowed but its not for us!!!
2587 *
2588 */
2589int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
2590{
2591 u32 low_id, high_id;
2592 ATH5K_TRACE(ah->ah_sc);
2593
2594 if (ah->ah_version == AR5K_AR5212) {
2595 low_id = AR5K_LOW_ID(mask);
2596 high_id = AR5K_HIGH_ID(mask);
2597
2598 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
2599 ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
2600
2601 return 0;
2602 }
2603
2604 return -EIO;
2605}
2606
2607/*
2608 * Receive start/stop functions
2609 */
2610
2611/*
2612 * Start receive on PCU
2613 */
2614void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
2615{
2616 ATH5K_TRACE(ah->ah_sc);
2617 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
2618
2619 /* TODO: ANI Support */
2620}
2621
2622/*
2623 * Stop receive on PCU
2624 */
2625void ath5k_hw_stop_pcu_recv(struct ath5k_hw *ah)
2626{
2627 ATH5K_TRACE(ah->ah_sc);
2628 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
2629
2630 /* TODO: ANI Support */
2631}
2632
2633/*
2634 * RX Filter functions
2635 */
2636
2637/*
2638 * Set multicast filter
2639 */
2640void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
2641{
2642 ATH5K_TRACE(ah->ah_sc);
2643 /* Set the multicat filter */
2644 ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
2645 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
2646}
2647
2648/*
2649 * Set multicast filter by index
2650 */
2651int ath5k_hw_set_mcast_filterindex(struct ath5k_hw *ah, u32 index)
2652{
2653
2654 ATH5K_TRACE(ah->ah_sc);
2655 if (index >= 64)
2656 return -EINVAL;
2657 else if (index >= 32)
2658 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
2659 (1 << (index - 32)));
2660 else
2661 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
2662
2663 return 0;
2664}
2665
2666/*
2667 * Clear Multicast filter by index
2668 */
2669int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
2670{
2671
2672 ATH5K_TRACE(ah->ah_sc);
2673 if (index >= 64)
2674 return -EINVAL;
2675 else if (index >= 32)
2676 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
2677 (1 << (index - 32)));
2678 else
2679 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
2680
2681 return 0;
2682}
2683
2684/*
2685 * Get current rx filter
2686 */
2687u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
2688{
2689 u32 data, filter = 0;
2690
2691 ATH5K_TRACE(ah->ah_sc);
2692 filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
2693
2694 /*Radar detection for 5212*/
2695 if (ah->ah_version == AR5K_AR5212) {
2696 data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
2697
2698 if (data & AR5K_PHY_ERR_FIL_RADAR)
2699 filter |= AR5K_RX_FILTER_RADARERR;
2700 if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
2701 filter |= AR5K_RX_FILTER_PHYERR;
2702 }
2703
2704 return filter;
2705}
2706
2707/*
2708 * Set rx filter
2709 */
2710void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
2711{
2712 u32 data = 0;
2713
2714 ATH5K_TRACE(ah->ah_sc);
2715
2716 /* Set PHY error filter register on 5212*/
2717 if (ah->ah_version == AR5K_AR5212) {
2718 if (filter & AR5K_RX_FILTER_RADARERR)
2719 data |= AR5K_PHY_ERR_FIL_RADAR;
2720 if (filter & AR5K_RX_FILTER_PHYERR)
2721 data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
2722 }
2723
2724 /*
2725 * The AR5210 uses promiscous mode to detect radar activity
2726 */
2727 if (ah->ah_version == AR5K_AR5210 &&
2728 (filter & AR5K_RX_FILTER_RADARERR)) {
2729 filter &= ~AR5K_RX_FILTER_RADARERR;
2730 filter |= AR5K_RX_FILTER_PROM;
2731 }
2732
2733 /*Zero length DMA*/
2734 if (data)
2735 AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
2736 else
2737 AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
2738
2739 /*Write RX Filter register*/
2740 ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
2741
2742 /*Write PHY error filter register on 5212*/
2743 if (ah->ah_version == AR5K_AR5212)
2744 ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
2745
2746}
2747
2748/*
2749 * Beacon related functions
2750 */
2751
2752/*
2753 * Get a 32bit TSF
2754 */
2755u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
2756{
2757 ATH5K_TRACE(ah->ah_sc);
2758 return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
2759}
2760
2761/*
2762 * Get the full 64bit TSF
2763 */
2764u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
2765{
2766 u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
2767 ATH5K_TRACE(ah->ah_sc);
2768
2769 return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
2770}
2771
2772/*
2773 * Force a TSF reset
2774 */
2775void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
2776{
2777 ATH5K_TRACE(ah->ah_sc);
2778 AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_RESET_TSF);
2779}
2780
2781/*
2782 * Initialize beacon timers
2783 */
2784void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
2785{
2786 u32 timer1, timer2, timer3;
2787
2788 ATH5K_TRACE(ah->ah_sc);
2789 /*
2790 * Set the additional timers by mode
2791 */
2792 switch (ah->ah_op_mode) {
2793 case IEEE80211_IF_TYPE_STA:
2794 if (ah->ah_version == AR5K_AR5210) {
2795 timer1 = 0xffffffff;
2796 timer2 = 0xffffffff;
2797 } else {
2798 timer1 = 0x0000ffff;
2799 timer2 = 0x0007ffff;
2800 }
2801 break;
2802
2803 default:
2804 timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
2805 timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
2806 }
2807
2808 timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1);
2809
2810 /*
2811 * Set the beacon register and enable all timers.
2812 * (next beacon, DMA beacon, software beacon, ATIM window time)
2813 */
2814 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
2815 ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
2816 ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
2817 ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
2818
2819 ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
2820 AR5K_BEACON_RESET_TSF | AR5K_BEACON_ENABLE),
2821 AR5K_BEACON);
2822}
2823
2824#if 0
2825/*
2826 * Set beacon timers
2827 */
2828int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
2829 const struct ath5k_beacon_state *state)
2830{
2831 u32 cfp_period, next_cfp, dtim, interval, next_beacon;
2832
2833 /*
2834 * TODO: should be changed through *state
2835 * review struct ath5k_beacon_state struct
2836 *
2837 * XXX: These are used for cfp period bellow, are they
2838 * ok ? Is it O.K. for tsf here to be 0 or should we use
2839 * get_tsf ?
2840 */
2841 u32 dtim_count = 0; /* XXX */
2842 u32 cfp_count = 0; /* XXX */
2843 u32 tsf = 0; /* XXX */
2844
2845 ATH5K_TRACE(ah->ah_sc);
2846 /* Return on an invalid beacon state */
2847 if (state->bs_interval < 1)
2848 return -EINVAL;
2849
2850 interval = state->bs_interval;
2851 dtim = state->bs_dtim_period;
2852
2853 /*
2854 * PCF support?
2855 */
2856 if (state->bs_cfp_period > 0) {
2857 /*
2858 * Enable PCF mode and set the CFP
2859 * (Contention Free Period) and timer registers
2860 */
2861 cfp_period = state->bs_cfp_period * state->bs_dtim_period *
2862 state->bs_interval;
2863 next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
2864 state->bs_interval;
2865
2866 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
2867 AR5K_STA_ID1_DEFAULT_ANTENNA |
2868 AR5K_STA_ID1_PCF);
2869 ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
2870 ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
2871 AR5K_CFP_DUR);
2872 ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
2873 next_cfp)) << 3, AR5K_TIMER2);
2874 } else {
2875 /* Disable PCF mode */
2876 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
2877 AR5K_STA_ID1_DEFAULT_ANTENNA |
2878 AR5K_STA_ID1_PCF);
2879 }
2880
2881 /*
2882 * Enable the beacon timer register
2883 */
2884 ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
2885
2886 /*
2887 * Start the beacon timers
2888 */
2889 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &~
2890 (AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
2891 AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
2892 AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
2893 AR5K_BEACON_PERIOD), AR5K_BEACON);
2894
2895 /*
2896 * Write new beacon miss threshold, if it appears to be valid
2897 * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
2898 * and return if its not in range. We can test this by reading value and
2899 * setting value to a largest value and seeing which values register.
2900 */
2901
2902 AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
2903 state->bs_bmiss_threshold);
2904
2905 /*
2906 * Set sleep control register
2907 * XXX: Didn't find this in 5210 code but since this register
2908 * exists also in ar5k's 5210 headers i leave it as common code.
2909 */
2910 AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
2911 (state->bs_sleep_duration - 3) << 3);
2912
2913 /*
2914 * Set enhanced sleep registers on 5212
2915 */
2916 if (ah->ah_version == AR5K_AR5212) {
2917 if (state->bs_sleep_duration > state->bs_interval &&
2918 roundup(state->bs_sleep_duration, interval) ==
2919 state->bs_sleep_duration)
2920 interval = state->bs_sleep_duration;
2921
2922 if (state->bs_sleep_duration > dtim && (dtim == 0 ||
2923 roundup(state->bs_sleep_duration, dtim) ==
2924 state->bs_sleep_duration))
2925 dtim = state->bs_sleep_duration;
2926
2927 if (interval > dtim)
2928 return -EINVAL;
2929
2930 next_beacon = interval == dtim ? state->bs_next_dtim :
2931 state->bs_next_beacon;
2932
2933 ath5k_hw_reg_write(ah,
2934 AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
2935 AR5K_SLEEP0_NEXT_DTIM) |
2936 AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
2937 AR5K_SLEEP0_ENH_SLEEP_EN |
2938 AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
2939
2940 ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
2941 AR5K_SLEEP1_NEXT_TIM) |
2942 AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
2943
2944 ath5k_hw_reg_write(ah,
2945 AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
2946 AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
2947 }
2948
2949 return 0;
2950}
2951
2952/*
2953 * Reset beacon timers
2954 */
2955void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
2956{
2957 ATH5K_TRACE(ah->ah_sc);
2958 /*
2959 * Disable beacon timer
2960 */
2961 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
2962
2963 /*
2964 * Disable some beacon register values
2965 */
2966 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
2967 AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
2968 ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
2969}
2970
2971/*
2972 * Wait for beacon queue to finish
2973 */
2974int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
2975{
2976 unsigned int i;
2977 int ret;
2978
2979 ATH5K_TRACE(ah->ah_sc);
2980
2981 /* 5210 doesn't have QCU*/
2982 if (ah->ah_version == AR5K_AR5210) {
2983 /*
2984 * Wait for beaconn queue to finish by checking
2985 * Control Register and Beacon Status Register.
2986 */
2987 for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
2988 if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
2989 ||
2990 !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
2991 break;
2992 udelay(10);
2993 }
2994
2995 /* Timeout... */
2996 if (i <= 0) {
2997 /*
2998 * Re-schedule the beacon queue
2999 */
3000 ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
3001 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
3002 AR5K_BCR);
3003
3004 return -EIO;
3005 }
3006 ret = 0;
3007 } else {
3008 /*5211/5212*/
3009 ret = ath5k_hw_register_timeout(ah,
3010 AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
3011 AR5K_QCU_STS_FRMPENDCNT, 0, false);
3012
3013 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
3014 return -EIO;
3015 }
3016
3017 return ret;
3018}
3019#endif
3020
3021/*
3022 * Update mib counters (statistics)
3023 */
3024void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
3025 struct ieee80211_low_level_stats *stats)
3026{
3027 ATH5K_TRACE(ah->ah_sc);
3028
3029 /* Read-And-Clear */
3030 stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
3031 stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
3032 stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
3033 stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
3034
3035 /* XXX: Should we use this to track beacon count ?
3036 * -we read it anyway to clear the register */
3037 ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
3038
3039 /* Reset profile count registers on 5212*/
3040 if (ah->ah_version == AR5K_AR5212) {
3041 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
3042 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
3043 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
3044 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
3045 }
3046}
3047
3048/** ath5k_hw_set_ack_bitrate - set bitrate for ACKs
3049 *
3050 * @ah: the &struct ath5k_hw
3051 * @high: determines if to use low bit rate or now
3052 */
3053void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
3054{
3055 if (ah->ah_version != AR5K_AR5212)
3056 return;
3057 else {
3058 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
3059 if (high)
3060 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
3061 else
3062 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
3063 }
3064}
3065
3066
3067/*
3068 * ACK/CTS Timeouts
3069 */
3070
3071/*
3072 * Set ACK timeout on PCU
3073 */
3074int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
3075{
3076 ATH5K_TRACE(ah->ah_sc);
3077 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
3078 ah->ah_turbo) <= timeout)
3079 return -EINVAL;
3080
3081 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
3082 ath5k_hw_htoclock(timeout, ah->ah_turbo));
3083
3084 return 0;
3085}
3086
3087/*
3088 * Read the ACK timeout from PCU
3089 */
3090unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
3091{
3092 ATH5K_TRACE(ah->ah_sc);
3093
3094 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
3095 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
3096}
3097
3098/*
3099 * Set CTS timeout on PCU
3100 */
3101int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
3102{
3103 ATH5K_TRACE(ah->ah_sc);
3104 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
3105 ah->ah_turbo) <= timeout)
3106 return -EINVAL;
3107
3108 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
3109 ath5k_hw_htoclock(timeout, ah->ah_turbo));
3110
3111 return 0;
3112}
3113
3114/*
3115 * Read CTS timeout from PCU
3116 */
3117unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
3118{
3119 ATH5K_TRACE(ah->ah_sc);
3120 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
3121 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
3122}
3123
3124/*
3125 * Key table (WEP) functions
3126 */
3127
3128int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
3129{
3130 unsigned int i;
3131
3132 ATH5K_TRACE(ah->ah_sc);
3133 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
3134
3135 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
3136 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
3137
3138 /*
3139 * Set NULL encryption on AR5212+
3140 *
3141 * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5)
3142 * AR5K_KEYTABLE_TYPE_NULL -> 0x00000007
3143 *
3144 * Note2: Windows driver (ndiswrapper) sets this to
3145 * 0x00000714 instead of 0x00000007
3146 */
3147 if (ah->ah_version > AR5K_AR5211)
3148 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
3149 AR5K_KEYTABLE_TYPE(entry));
3150
3151 return 0;
3152}
3153
3154int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
3155{
3156 ATH5K_TRACE(ah->ah_sc);
3157 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
3158
3159 /* Check the validation flag at the end of the entry */
3160 return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
3161 AR5K_KEYTABLE_VALID;
3162}
3163
3164int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
3165 const struct ieee80211_key_conf *key, const u8 *mac)
3166{
3167 unsigned int i;
3168 __le32 key_v[5] = {};
3169 u32 keytype;
3170
3171 ATH5K_TRACE(ah->ah_sc);
3172
3173 /* key->keylen comes in from mac80211 in bytes */
3174
3175 if (key->keylen > AR5K_KEYTABLE_SIZE / 8)
3176 return -EOPNOTSUPP;
3177
3178 switch (key->keylen) {
3179 /* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit */
3180 case 40 / 8:
3181 memcpy(&key_v[0], key->key, 5);
3182 keytype = AR5K_KEYTABLE_TYPE_40;
3183 break;
3184
3185 /* WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit */
3186 case 104 / 8:
3187 memcpy(&key_v[0], &key->key[0], 6);
3188 memcpy(&key_v[2], &key->key[6], 6);
3189 memcpy(&key_v[4], &key->key[12], 1);
3190 keytype = AR5K_KEYTABLE_TYPE_104;
3191 break;
3192 /* WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit */
3193 case 128 / 8:
3194 memcpy(&key_v[0], &key->key[0], 6);
3195 memcpy(&key_v[2], &key->key[6], 6);
3196 memcpy(&key_v[4], &key->key[12], 4);
3197 keytype = AR5K_KEYTABLE_TYPE_128;
3198 break;
3199
3200 default:
3201 return -EINVAL; /* shouldn't happen */
3202 }
3203
3204 for (i = 0; i < ARRAY_SIZE(key_v); i++)
3205 ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
3206 AR5K_KEYTABLE_OFF(entry, i));
3207
3208 ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
3209
3210 return ath5k_hw_set_key_lladdr(ah, entry, mac);
3211}
3212
3213int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
3214{
3215 u32 low_id, high_id;
3216
3217 ATH5K_TRACE(ah->ah_sc);
3218 /* Invalid entry (key table overflow) */
3219 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
3220
3221 /* MAC may be NULL if it's a broadcast key. In this case no need to
3222 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */
3223 if (unlikely(mac == NULL)) {
3224 low_id = 0xffffffff;
3225 high_id = 0xffff | AR5K_KEYTABLE_VALID;
3226 } else {
3227 low_id = AR5K_LOW_ID(mac);
3228 high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID;
3229 }
3230
3231 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
3232 ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry));
3233
3234 return 0;
3235}
3236
3237
3238/********************************************\
3239Queue Control Unit, DFS Control Unit Functions
3240\********************************************/
3241
3242/*
3243 * Initialize a transmit queue
3244 */
3245int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
3246 struct ath5k_txq_info *queue_info)
3247{
3248 unsigned int queue;
3249 int ret;
3250
3251 ATH5K_TRACE(ah->ah_sc);
3252
3253 /*
3254 * Get queue by type
3255 */
3256 /*5210 only has 2 queues*/
3257 if (ah->ah_version == AR5K_AR5210) {
3258 switch (queue_type) {
3259 case AR5K_TX_QUEUE_DATA:
3260 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
3261 break;
3262 case AR5K_TX_QUEUE_BEACON:
3263 case AR5K_TX_QUEUE_CAB:
3264 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
3265 break;
3266 default:
3267 return -EINVAL;
3268 }
3269 } else {
3270 switch (queue_type) {
3271 case AR5K_TX_QUEUE_DATA:
3272 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
3273 ah->ah_txq[queue].tqi_type !=
3274 AR5K_TX_QUEUE_INACTIVE; queue++) {
3275
3276 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
3277 return -EINVAL;
3278 }
3279 break;
3280 case AR5K_TX_QUEUE_UAPSD:
3281 queue = AR5K_TX_QUEUE_ID_UAPSD;
3282 break;
3283 case AR5K_TX_QUEUE_BEACON:
3284 queue = AR5K_TX_QUEUE_ID_BEACON;
3285 break;
3286 case AR5K_TX_QUEUE_CAB:
3287 queue = AR5K_TX_QUEUE_ID_CAB;
3288 break;
3289 case AR5K_TX_QUEUE_XR_DATA:
3290 if (ah->ah_version != AR5K_AR5212)
3291 ATH5K_ERR(ah->ah_sc,
3292 "XR data queues only supported in"
3293 " 5212!\n");
3294 queue = AR5K_TX_QUEUE_ID_XR_DATA;
3295 break;
3296 default:
3297 return -EINVAL;
3298 }
3299 }
3300
3301 /*
3302 * Setup internal queue structure
3303 */
3304 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
3305 ah->ah_txq[queue].tqi_type = queue_type;
3306
3307 if (queue_info != NULL) {
3308 queue_info->tqi_type = queue_type;
3309 ret = ath5k_hw_setup_tx_queueprops(ah, queue, queue_info);
3310 if (ret)
3311 return ret;
3312 }
3313 /*
3314 * We use ah_txq_status to hold a temp value for
3315 * the Secondary interrupt mask registers on 5211+
3316 * check out ath5k_hw_reset_tx_queue
3317 */
3318 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
3319
3320 return queue;
3321}
3322
3323/*
3324 * Setup a transmit queue
3325 */
3326int ath5k_hw_setup_tx_queueprops(struct ath5k_hw *ah, int queue,
3327 const struct ath5k_txq_info *queue_info)
3328{
3329 ATH5K_TRACE(ah->ah_sc);
3330 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
3331
3332 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
3333 return -EIO;
3334
3335 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
3336
3337 /*XXX: Is this supported on 5210 ?*/
3338 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
3339 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
3340 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
3341 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
3342 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
3343
3344 return 0;
3345}
3346
3347/*
3348 * Get properties for a specific transmit queue
3349 */
3350int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
3351 struct ath5k_txq_info *queue_info)
3352{
3353 ATH5K_TRACE(ah->ah_sc);
3354 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
3355 return 0;
3356}
3357
3358/*
3359 * Set a transmit queue inactive
3360 */
3361void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3362{
3363 ATH5K_TRACE(ah->ah_sc);
3364 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
3365 return;
3366
3367 /* This queue will be skipped in further operations */
3368 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
3369 /*For SIMR setup*/
3370 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
3371}
3372
3373/*
3374 * Set DFS params for a transmit queue
3375 */
3376int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3377{
3378 u32 cw_min, cw_max, retry_lg, retry_sh;
3379 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
3380
3381 ATH5K_TRACE(ah->ah_sc);
3382 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
3383
3384 tq = &ah->ah_txq[queue];
3385
3386 if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
3387 return 0;
3388
3389 if (ah->ah_version == AR5K_AR5210) {
3390 /* Only handle data queues, others will be ignored */
3391 if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
3392 return 0;
3393
3394 /* Set Slot time */
3395 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3396 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
3397 AR5K_SLOT_TIME);
3398 /* Set ACK_CTS timeout */
3399 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3400 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
3401 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
3402 /* Set Transmit Latency */
3403 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3404 AR5K_INIT_TRANSMIT_LATENCY_TURBO :
3405 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
3406 /* Set IFS0 */
3407 if (ah->ah_turbo)
3408 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
3409 (ah->ah_aifs + tq->tqi_aifs) *
3410 AR5K_INIT_SLOT_TIME_TURBO) <<
3411 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
3412 AR5K_IFS0);
3413 else
3414 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
3415 (ah->ah_aifs + tq->tqi_aifs) *
3416 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
3417 AR5K_INIT_SIFS, AR5K_IFS0);
3418
3419 /* Set IFS1 */
3420 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3421 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
3422 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
3423 /* Set AR5K_PHY_SETTLING */
3424 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3425 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
3426 | 0x38 :
3427 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
3428 | 0x1C,
3429 AR5K_PHY_SETTLING);
3430 /* Set Frame Control Register */
3431 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3432 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
3433 AR5K_PHY_TURBO_SHORT | 0x2020) :
3434 (AR5K_PHY_FRAME_CTL_INI | 0x1020),
3435 AR5K_PHY_FRAME_CTL_5210);
3436 }
3437
3438 /*
3439 * Calculate cwmin/max by channel mode
3440 */
3441 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
3442 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
3443 ah->ah_aifs = AR5K_TUNE_AIFS;
3444 /*XR is only supported on 5212*/
3445 if (IS_CHAN_XR(ah->ah_current_channel) &&
3446 ah->ah_version == AR5K_AR5212) {
3447 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
3448 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
3449 ah->ah_aifs = AR5K_TUNE_AIFS_XR;
3450 /*B mode is not supported on 5210*/
3451 } else if (IS_CHAN_B(ah->ah_current_channel) &&
3452 ah->ah_version != AR5K_AR5210) {
3453 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
3454 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
3455 ah->ah_aifs = AR5K_TUNE_AIFS_11B;
3456 }
3457
3458 cw_min = 1;
3459 while (cw_min < ah->ah_cw_min)
3460 cw_min = (cw_min << 1) | 1;
3461
3462 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
3463 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
3464 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
3465 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
3466
3467 /*
3468 * Calculate and set retry limits
3469 */
3470 if (ah->ah_software_retry) {
3471 /* XXX Need to test this */
3472 retry_lg = ah->ah_limit_tx_retries;
3473 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
3474 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
3475 } else {
3476 retry_lg = AR5K_INIT_LG_RETRY;
3477 retry_sh = AR5K_INIT_SH_RETRY;
3478 }
3479
3480 /*No QCU/DCU [5210]*/
3481 if (ah->ah_version == AR5K_AR5210) {
3482 ath5k_hw_reg_write(ah,
3483 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
3484 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
3485 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
3486 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
3487 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
3488 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
3489 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
3490 AR5K_NODCU_RETRY_LMT);
3491 } else {
3492 /*QCU/DCU [5211+]*/
3493 ath5k_hw_reg_write(ah,
3494 AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
3495 AR5K_DCU_RETRY_LMT_SLG_RETRY) |
3496 AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
3497 AR5K_DCU_RETRY_LMT_SSH_RETRY) |
3498 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
3499 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
3500 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
3501
3502 /*===Rest is also for QCU/DCU only [5211+]===*/
3503
3504 /*
3505 * Set initial content window (cw_min/cw_max)
3506 * and arbitrated interframe space (aifs)...
3507 */
3508 ath5k_hw_reg_write(ah,
3509 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
3510 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
3511 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
3512 AR5K_DCU_LCL_IFS_AIFS),
3513 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
3514
3515 /*
3516 * Set misc registers
3517 */
3518 ath5k_hw_reg_write(ah, AR5K_QCU_MISC_DCU_EARLY,
3519 AR5K_QUEUE_MISC(queue));
3520
3521 if (tq->tqi_cbr_period) {
3522 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
3523 AR5K_QCU_CBRCFG_INTVAL) |
3524 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
3525 AR5K_QCU_CBRCFG_ORN_THRES),
3526 AR5K_QUEUE_CBRCFG(queue));
3527 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
3528 AR5K_QCU_MISC_FRSHED_CBR);
3529 if (tq->tqi_cbr_overflow_limit)
3530 AR5K_REG_ENABLE_BITS(ah,
3531 AR5K_QUEUE_MISC(queue),
3532 AR5K_QCU_MISC_CBR_THRES_ENABLE);
3533 }
3534
3535 if (tq->tqi_ready_time)
3536 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
3537 AR5K_QCU_RDYTIMECFG_INTVAL) |
3538 AR5K_QCU_RDYTIMECFG_ENABLE,
3539 AR5K_QUEUE_RDYTIMECFG(queue));
3540
3541 if (tq->tqi_burst_time) {
3542 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
3543 AR5K_DCU_CHAN_TIME_DUR) |
3544 AR5K_DCU_CHAN_TIME_ENABLE,
3545 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
3546
3547 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
3548 AR5K_REG_ENABLE_BITS(ah,
3549 AR5K_QUEUE_MISC(queue),
3550 AR5K_QCU_MISC_RDY_VEOL_POLICY);
3551 }
3552
3553 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
3554 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
3555 AR5K_QUEUE_DFS_MISC(queue));
3556
3557 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
3558 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
3559 AR5K_QUEUE_DFS_MISC(queue));
3560
3561 /*
3562 * Set registers by queue type
3563 */
3564 switch (tq->tqi_type) {
3565 case AR5K_TX_QUEUE_BEACON:
3566 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
3567 AR5K_QCU_MISC_FRSHED_DBA_GT |
3568 AR5K_QCU_MISC_CBREXP_BCN |
3569 AR5K_QCU_MISC_BCN_ENABLE);
3570
3571 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
3572 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
3573 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
3574 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
3575 AR5K_DCU_MISC_BCN_ENABLE);
3576
3577 ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
3578 (AR5K_TUNE_SW_BEACON_RESP -
3579 AR5K_TUNE_DMA_BEACON_RESP) -
3580 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
3581 AR5K_QCU_RDYTIMECFG_ENABLE,
3582 AR5K_QUEUE_RDYTIMECFG(queue));
3583 break;
3584
3585 case AR5K_TX_QUEUE_CAB:
3586 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
3587 AR5K_QCU_MISC_FRSHED_DBA_GT |
3588 AR5K_QCU_MISC_CBREXP |
3589 AR5K_QCU_MISC_CBREXP_BCN);
3590
3591 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
3592 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
3593 AR5K_DCU_MISC_ARBLOCK_CTL_S));
3594 break;
3595
3596 case AR5K_TX_QUEUE_UAPSD:
3597 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
3598 AR5K_QCU_MISC_CBREXP);
3599 break;
3600
3601 case AR5K_TX_QUEUE_DATA:
3602 default:
3603 break;
3604 }
3605
3606 /*
3607 * Enable interrupts for this tx queue
3608 * in the secondary interrupt mask registers
3609 */
3610 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
3611 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
3612
3613 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
3614 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
3615
3616 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
3617 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
3618
3619 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
3620 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
3621
3622 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
3623 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
3624
3625
3626 /* Update secondary interrupt mask registers */
3627 ah->ah_txq_imr_txok &= ah->ah_txq_status;
3628 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
3629 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
3630 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
3631 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
3632
3633 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
3634 AR5K_SIMR0_QCU_TXOK) |
3635 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
3636 AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
3637 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
3638 AR5K_SIMR1_QCU_TXERR) |
3639 AR5K_REG_SM(ah->ah_txq_imr_txeol,
3640 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
3641 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txurn,
3642 AR5K_SIMR2_QCU_TXURN), AR5K_SIMR2);
3643 }
3644
3645 return 0;
3646}
3647
3648/*
3649 * Get number of pending frames
3650 * for a specific queue [5211+]
3651 */
3652u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) {
3653 ATH5K_TRACE(ah->ah_sc);
3654 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
3655
3656 /* Return if queue is declared inactive */
3657 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
3658 return false;
3659
3660 /* XXX: How about AR5K_CFG_TXCNT ? */
3661 if (ah->ah_version == AR5K_AR5210)
3662 return false;
3663
3664 return AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT;
3665}
3666
3667/*
3668 * Set slot time
3669 */
3670int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
3671{
3672 ATH5K_TRACE(ah->ah_sc);
3673 if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
3674 return -EINVAL;
3675
3676 if (ah->ah_version == AR5K_AR5210)
3677 ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
3678 ah->ah_turbo), AR5K_SLOT_TIME);
3679 else
3680 ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
3681
3682 return 0;
3683}
3684
3685/*
3686 * Get slot time
3687 */
3688unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
3689{
3690 ATH5K_TRACE(ah->ah_sc);
3691 if (ah->ah_version == AR5K_AR5210)
3692 return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
3693 AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
3694 else
3695 return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
3696}
3697
3698
3699/******************************\
3700 Hardware Descriptor Functions
3701\******************************/
3702
3703/*
3704 * TX Descriptor
3705 */
3706
3707/*
3708 * Initialize the 2-word tx descriptor on 5210/5211
3709 */
3710static int
3711ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3712 unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type,
3713 unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
3714 unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
3715 unsigned int rtscts_rate, unsigned int rtscts_duration)
3716{
3717 u32 frame_type;
3718 struct ath5k_hw_2w_tx_ctl *tx_ctl;
3719 unsigned int frame_len;
3720
3721 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
3722
3723 /*
3724 * Validate input
3725 * - Zero retries don't make sense.
3726 * - A zero rate will put the HW into a mode where it continously sends
3727 * noise on the channel, so it is important to avoid this.
3728 */
3729 if (unlikely(tx_tries0 == 0)) {
3730 ATH5K_ERR(ah->ah_sc, "zero retries\n");
3731 WARN_ON(1);
3732 return -EINVAL;
3733 }
3734 if (unlikely(tx_rate0 == 0)) {
3735 ATH5K_ERR(ah->ah_sc, "zero rate\n");
3736 WARN_ON(1);
3737 return -EINVAL;
3738 }
3739
3740 /* Clear descriptor */
3741 memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
3742
3743 /* Setup control descriptor */
3744
3745 /* Verify and set frame length */
3746
3747 /* remove padding we might have added before */
3748 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
3749
3750 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
3751 return -EINVAL;
3752
3753 tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
3754
3755 /* Verify and set buffer length */
3756
3757 /* NB: beacon's BufLen must be a multiple of 4 bytes */
3758 if(type == AR5K_PKT_TYPE_BEACON)
3759 pkt_len = roundup(pkt_len, 4);
3760
3761 if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
3762 return -EINVAL;
3763
3764 tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
3765
3766 /*
3767 * Verify and set header length
3768 * XXX: I only found that on 5210 code, does it work on 5211 ?
3769 */
3770 if (ah->ah_version == AR5K_AR5210) {
3771 if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
3772 return -EINVAL;
3773 tx_ctl->tx_control_0 |=
3774 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
3775 }
3776
3777 /*Diferences between 5210-5211*/
3778 if (ah->ah_version == AR5K_AR5210) {
3779 switch (type) {
3780 case AR5K_PKT_TYPE_BEACON:
3781 case AR5K_PKT_TYPE_PROBE_RESP:
3782 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
3783 case AR5K_PKT_TYPE_PIFS:
3784 frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
3785 default:
3786 frame_type = type /*<< 2 ?*/;
3787 }
3788
3789 tx_ctl->tx_control_0 |=
3790 AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
3791 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
3792 } else {
3793 tx_ctl->tx_control_0 |=
3794 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
3795 AR5K_REG_SM(antenna_mode, AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
3796 tx_ctl->tx_control_1 |=
3797 AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
3798 }
3799#define _TX_FLAGS(_c, _flag) \
3800 if (flags & AR5K_TXDESC_##_flag) \
3801 tx_ctl->tx_control_##_c |= \
3802 AR5K_2W_TX_DESC_CTL##_c##_##_flag
3803
3804 _TX_FLAGS(0, CLRDMASK);
3805 _TX_FLAGS(0, VEOL);
3806 _TX_FLAGS(0, INTREQ);
3807 _TX_FLAGS(0, RTSENA);
3808 _TX_FLAGS(1, NOACK);
3809
3810#undef _TX_FLAGS
3811
3812 /*
3813 * WEP crap
3814 */
3815 if (key_index != AR5K_TXKEYIX_INVALID) {
3816 tx_ctl->tx_control_0 |=
3817 AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
3818 tx_ctl->tx_control_1 |=
3819 AR5K_REG_SM(key_index,
3820 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
3821 }
3822
3823 /*
3824 * RTS/CTS Duration [5210 ?]
3825 */
3826 if ((ah->ah_version == AR5K_AR5210) &&
3827 (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
3828 tx_ctl->tx_control_1 |= rtscts_duration &
3829 AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
3830
3831 return 0;
3832}
3833
3834/*
3835 * Initialize the 4-word tx descriptor on 5212
3836 */
3837static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3838 struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
3839 enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
3840 unsigned int tx_tries0, unsigned int key_index,
3841 unsigned int antenna_mode, unsigned int flags, unsigned int rtscts_rate,
3842 unsigned int rtscts_duration)
3843{
3844 struct ath5k_hw_4w_tx_ctl *tx_ctl;
3845 unsigned int frame_len;
3846
3847 ATH5K_TRACE(ah->ah_sc);
3848 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
3849
3850 /*
3851 * Validate input
3852 * - Zero retries don't make sense.
3853 * - A zero rate will put the HW into a mode where it continously sends
3854 * noise on the channel, so it is important to avoid this.
3855 */
3856 if (unlikely(tx_tries0 == 0)) {
3857 ATH5K_ERR(ah->ah_sc, "zero retries\n");
3858 WARN_ON(1);
3859 return -EINVAL;
3860 }
3861 if (unlikely(tx_rate0 == 0)) {
3862 ATH5K_ERR(ah->ah_sc, "zero rate\n");
3863 WARN_ON(1);
3864 return -EINVAL;
3865 }
3866
3867 /* Clear descriptor */
3868 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
3869
3870 /* Setup control descriptor */
3871
3872 /* Verify and set frame length */
3873
3874 /* remove padding we might have added before */
3875 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN;
3876
3877 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
3878 return -EINVAL;
3879
3880 tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
3881
3882 /* Verify and set buffer length */
3883
3884 /* NB: beacon's BufLen must be a multiple of 4 bytes */
3885 if(type == AR5K_PKT_TYPE_BEACON)
3886 pkt_len = roundup(pkt_len, 4);
3887
3888 if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
3889 return -EINVAL;
3890
3891 tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
3892
3893 tx_ctl->tx_control_0 |=
3894 AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
3895 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
3896 tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
3897 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
3898 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
3899 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
3900 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
3901
3902#define _TX_FLAGS(_c, _flag) \
3903 if (flags & AR5K_TXDESC_##_flag) \
3904 tx_ctl->tx_control_##_c |= \
3905 AR5K_4W_TX_DESC_CTL##_c##_##_flag
3906
3907 _TX_FLAGS(0, CLRDMASK);
3908 _TX_FLAGS(0, VEOL);
3909 _TX_FLAGS(0, INTREQ);
3910 _TX_FLAGS(0, RTSENA);
3911 _TX_FLAGS(0, CTSENA);
3912 _TX_FLAGS(1, NOACK);
3913
3914#undef _TX_FLAGS
3915
3916 /*
3917 * WEP crap
3918 */
3919 if (key_index != AR5K_TXKEYIX_INVALID) {
3920 tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
3921 tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
3922 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
3923 }
3924
3925 /*
3926 * RTS/CTS
3927 */
3928 if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
3929 if ((flags & AR5K_TXDESC_RTSENA) &&
3930 (flags & AR5K_TXDESC_CTSENA))
3931 return -EINVAL;
3932 tx_ctl->tx_control_2 |= rtscts_duration &
3933 AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
3934 tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate,
3935 AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
3936 }
3937
3938 return 0;
3939}
3940
3941/*
3942 * Initialize a 4-word multirate tx descriptor on 5212
3943 */
3944static int
3945ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3946 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2,
3947 unsigned int tx_rate3, u_int tx_tries3)
3948{
3949 struct ath5k_hw_4w_tx_ctl *tx_ctl;
3950
3951 /*
3952 * Rates can be 0 as long as the retry count is 0 too.
3953 * A zero rate and nonzero retry count will put the HW into a mode where
3954 * it continously sends noise on the channel, so it is important to
3955 * avoid this.
3956 */
3957 if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
3958 (tx_rate2 == 0 && tx_tries2 != 0) ||
3959 (tx_rate3 == 0 && tx_tries3 != 0))) {
3960 ATH5K_ERR(ah->ah_sc, "zero rate\n");
3961 WARN_ON(1);
3962 return -EINVAL;
3963 }
3964
3965 if (ah->ah_version == AR5K_AR5212) {
3966 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
3967
3968#define _XTX_TRIES(_n) \
3969 if (tx_tries##_n) { \
3970 tx_ctl->tx_control_2 |= \
3971 AR5K_REG_SM(tx_tries##_n, \
3972 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \
3973 tx_ctl->tx_control_3 |= \
3974 AR5K_REG_SM(tx_rate##_n, \
3975 AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \
3976 }
3977
3978 _XTX_TRIES(1);
3979 _XTX_TRIES(2);
3980 _XTX_TRIES(3);
3981
3982#undef _XTX_TRIES
3983
3984 return 1;
3985 }
3986
3987 return 0;
3988}
3989
3990/*
3991 * Proccess the tx status descriptor on 5210/5211
3992 */
3993static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
3994 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
3995{
3996 struct ath5k_hw_2w_tx_ctl *tx_ctl;
3997 struct ath5k_hw_tx_status *tx_status;
3998
3999 ATH5K_TRACE(ah->ah_sc);
4000
4001 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
4002 tx_status = &desc->ud.ds_tx5210.tx_stat;
4003
4004 /* No frame has been send or error */
4005 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
4006 return -EINPROGRESS;
4007
4008 /*
4009 * Get descriptor status
4010 */
4011 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
4012 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
4013 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
4014 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
4015 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
4016 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
4017 /*TODO: ts->ts_virtcol + test*/
4018 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
4019 AR5K_DESC_TX_STATUS1_SEQ_NUM);
4020 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
4021 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
4022 ts->ts_antenna = 1;
4023 ts->ts_status = 0;
4024 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_0,
4025 AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
4026
4027 if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){
4028 if (tx_status->tx_status_0 &
4029 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
4030 ts->ts_status |= AR5K_TXERR_XRETRY;
4031
4032 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
4033 ts->ts_status |= AR5K_TXERR_FIFO;
4034
4035 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
4036 ts->ts_status |= AR5K_TXERR_FILT;
4037 }
4038
4039 return 0;
4040}
4041
4042/*
4043 * Proccess a tx descriptor on 5212
4044 */
4045static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
4046 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
4047{
4048 struct ath5k_hw_4w_tx_ctl *tx_ctl;
4049 struct ath5k_hw_tx_status *tx_status;
4050
4051 ATH5K_TRACE(ah->ah_sc);
4052
4053 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
4054 tx_status = &desc->ud.ds_tx5212.tx_stat;
4055
4056 /* No frame has been send or error */
4057 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
4058 return -EINPROGRESS;
4059
4060 /*
4061 * Get descriptor status
4062 */
4063 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
4064 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
4065 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
4066 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
4067 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
4068 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
4069 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
4070 AR5K_DESC_TX_STATUS1_SEQ_NUM);
4071 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
4072 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
4073 ts->ts_antenna = (tx_status->tx_status_1 &
4074 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
4075 ts->ts_status = 0;
4076
4077 switch (AR5K_REG_MS(tx_status->tx_status_1,
4078 AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX)) {
4079 case 0:
4080 ts->ts_rate = tx_ctl->tx_control_3 &
4081 AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
4082 break;
4083 case 1:
4084 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
4085 AR5K_4W_TX_DESC_CTL3_XMIT_RATE1);
4086 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
4087 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
4088 break;
4089 case 2:
4090 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
4091 AR5K_4W_TX_DESC_CTL3_XMIT_RATE2);
4092 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
4093 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2);
4094 break;
4095 case 3:
4096 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
4097 AR5K_4W_TX_DESC_CTL3_XMIT_RATE3);
4098 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
4099 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3);
4100 break;
4101 }
4102
4103 if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){
4104 if (tx_status->tx_status_0 &
4105 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
4106 ts->ts_status |= AR5K_TXERR_XRETRY;
4107
4108 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
4109 ts->ts_status |= AR5K_TXERR_FIFO;
4110
4111 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
4112 ts->ts_status |= AR5K_TXERR_FILT;
4113 }
4114
4115 return 0;
4116}
4117
4118/*
4119 * RX Descriptor
4120 */
4121
4122/*
4123 * Initialize an rx descriptor
4124 */
4125int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
4126 u32 size, unsigned int flags)
4127{
4128 struct ath5k_hw_rx_ctl *rx_ctl;
4129
4130 ATH5K_TRACE(ah->ah_sc);
4131 rx_ctl = &desc->ud.ds_rx.rx_ctl;
4132
4133 /*
4134 * Clear the descriptor
4135 * If we don't clean the status descriptor,
4136 * while scanning we get too many results,
4137 * most of them virtual, after some secs
4138 * of scanning system hangs. M.F.
4139 */
4140 memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
4141
4142 /* Setup descriptor */
4143 rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
4144 if (unlikely(rx_ctl->rx_control_1 != size))
4145 return -EINVAL;
4146
4147 if (flags & AR5K_RXDESC_INTREQ)
4148 rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
4149
4150 return 0;
4151}
4152
4153/*
4154 * Proccess the rx status descriptor on 5210/5211
4155 */
4156static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
4157 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
4158{
4159 struct ath5k_hw_rx_status *rx_status;
4160
4161 rx_status = &desc->ud.ds_rx.u.rx_stat;
4162
4163 /* No frame received / not ready */
4164 if (unlikely((rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_DONE)
4165 == 0))
4166 return -EINPROGRESS;
4167
4168 /*
4169 * Frame receive status
4170 */
4171 rs->rs_datalen = rx_status->rx_status_0 &
4172 AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
4173 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
4174 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
4175 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
4176 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
4177 rs->rs_antenna = rx_status->rx_status_0 &
4178 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA;
4179 rs->rs_more = rx_status->rx_status_0 &
4180 AR5K_5210_RX_DESC_STATUS0_MORE;
4181 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
4182 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
4183 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
4184 rs->rs_status = 0;
4185 rs->rs_phyerr = 0;
4186
4187 /*
4188 * Key table status
4189 */
4190 if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
4191 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
4192 AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
4193 else
4194 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
4195
4196 /*
4197 * Receive/descriptor errors
4198 */
4199 if ((rx_status->rx_status_1 &
4200 AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK) == 0) {
4201 if (rx_status->rx_status_1 &
4202 AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
4203 rs->rs_status |= AR5K_RXERR_CRC;
4204
4205 if (rx_status->rx_status_1 &
4206 AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
4207 rs->rs_status |= AR5K_RXERR_FIFO;
4208
4209 if (rx_status->rx_status_1 &
4210 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
4211 rs->rs_status |= AR5K_RXERR_PHY;
4212 rs->rs_phyerr |= AR5K_REG_MS(rx_status->rx_status_1,
4213 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
4214 }
4215
4216 if (rx_status->rx_status_1 &
4217 AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
4218 rs->rs_status |= AR5K_RXERR_DECRYPT;
4219 }
4220
4221 return 0;
4222}
4223
4224/*
4225 * Proccess the rx status descriptor on 5212
4226 */
4227static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
4228 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
4229{
4230 struct ath5k_hw_rx_status *rx_status;
4231 struct ath5k_hw_rx_error *rx_err;
4232
4233 ATH5K_TRACE(ah->ah_sc);
4234 rx_status = &desc->ud.ds_rx.u.rx_stat;
4235
4236 /* Overlay on error */
4237 rx_err = &desc->ud.ds_rx.u.rx_err;
4238
4239 /* No frame received / not ready */
4240 if (unlikely((rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_DONE)
4241 == 0))
4242 return -EINPROGRESS;
4243
4244 /*
4245 * Frame receive status
4246 */
4247 rs->rs_datalen = rx_status->rx_status_0 &
4248 AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
4249 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
4250 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
4251 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
4252 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
4253 rs->rs_antenna = rx_status->rx_status_0 &
4254 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA;
4255 rs->rs_more = rx_status->rx_status_0 &
4256 AR5K_5212_RX_DESC_STATUS0_MORE;
4257 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
4258 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
4259 rs->rs_status = 0;
4260 rs->rs_phyerr = 0;
4261
4262 /*
4263 * Key table status
4264 */
4265 if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
4266 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
4267 AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
4268 else
4269 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
4270
4271 /*
4272 * Receive/descriptor errors
4273 */
4274 if ((rx_status->rx_status_1 &
4275 AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK) == 0) {
4276 if (rx_status->rx_status_1 &
4277 AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
4278 rs->rs_status |= AR5K_RXERR_CRC;
4279
4280 if (rx_status->rx_status_1 &
4281 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
4282 rs->rs_status |= AR5K_RXERR_PHY;
4283 rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
4284 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
4285 }
4286
4287 if (rx_status->rx_status_1 &
4288 AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
4289 rs->rs_status |= AR5K_RXERR_DECRYPT;
4290
4291 if (rx_status->rx_status_1 &
4292 AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
4293 rs->rs_status |= AR5K_RXERR_MIC;
4294 }
4295
4296 return 0;
4297}
4298
4299
4300/****************\
4301 GPIO Functions
4302\****************/
4303
4304/*
4305 * Set led state
4306 */
4307void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
4308{
4309 u32 led;
4310 /*5210 has different led mode handling*/
4311 u32 led_5210;
4312
4313 ATH5K_TRACE(ah->ah_sc);
4314
4315 /*Reset led status*/
4316 if (ah->ah_version != AR5K_AR5210)
4317 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
4318 AR5K_PCICFG_LEDMODE | AR5K_PCICFG_LED);
4319 else
4320 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED);
4321
4322 /*
4323 * Some blinking values, define at your wish
4324 */
4325 switch (state) {
4326 case AR5K_LED_SCAN:
4327 case AR5K_LED_AUTH:
4328 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND;
4329 led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL;
4330 break;
4331
4332 case AR5K_LED_INIT:
4333 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE;
4334 led_5210 = AR5K_PCICFG_LED_PEND;
4335 break;
4336
4337 case AR5K_LED_ASSOC:
4338 case AR5K_LED_RUN:
4339 led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC;
4340 led_5210 = AR5K_PCICFG_LED_ASSOC;
4341 break;
4342
4343 default:
4344 led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE;
4345 led_5210 = AR5K_PCICFG_LED_PEND;
4346 break;
4347 }
4348
4349 /*Write new status to the register*/
4350 if (ah->ah_version != AR5K_AR5210)
4351 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led);
4352 else
4353 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
4354}
4355
4356/*
4357 * Set GPIO outputs
4358 */
4359int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
4360{
4361 ATH5K_TRACE(ah->ah_sc);
4362 if (gpio > AR5K_NUM_GPIO)
4363 return -EINVAL;
4364
4365 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &~
4366 AR5K_GPIOCR_OUT(gpio)) | AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR);
4367
4368 return 0;
4369}
4370
4371/*
4372 * Set GPIO inputs
4373 */
4374int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
4375{
4376 ATH5K_TRACE(ah->ah_sc);
4377 if (gpio > AR5K_NUM_GPIO)
4378 return -EINVAL;
4379
4380 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &~
4381 AR5K_GPIOCR_OUT(gpio)) | AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR);
4382
4383 return 0;
4384}
4385
4386/*
4387 * Get GPIO state
4388 */
4389u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
4390{
4391 ATH5K_TRACE(ah->ah_sc);
4392 if (gpio > AR5K_NUM_GPIO)
4393 return 0xffffffff;
4394
4395 /* GPIO input magic */
4396 return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) &
4397 0x1;
4398}
4399
4400/*
4401 * Set GPIO state
4402 */
4403int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
4404{
4405 u32 data;
4406 ATH5K_TRACE(ah->ah_sc);
4407
4408 if (gpio > AR5K_NUM_GPIO)
4409 return -EINVAL;
4410
4411 /* GPIO output magic */
4412 data = ath5k_hw_reg_read(ah, AR5K_GPIODO);
4413
4414 data &= ~(1 << gpio);
4415 data |= (val & 1) << gpio;
4416
4417 ath5k_hw_reg_write(ah, data, AR5K_GPIODO);
4418
4419 return 0;
4420}
4421
4422/*
4423 * Initialize the GPIO interrupt (RFKill switch)
4424 */
4425void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
4426 u32 interrupt_level)
4427{
4428 u32 data;
4429
4430 ATH5K_TRACE(ah->ah_sc);
4431 if (gpio > AR5K_NUM_GPIO)
4432 return;
4433
4434 /*
4435 * Set the GPIO interrupt
4436 */
4437 data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &
4438 ~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH |
4439 AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) |
4440 (AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA);
4441
4442 ath5k_hw_reg_write(ah, interrupt_level ? data :
4443 (data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR);
4444
4445 ah->ah_imr |= AR5K_IMR_GPIO;
4446
4447 /* Enable GPIO interrupts */
4448 AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO);
4449}
4450
4451
4452
4453
4454/****************\
4455 Misc functions
4456\****************/
4457
4458int ath5k_hw_get_capability(struct ath5k_hw *ah,
4459 enum ath5k_capability_type cap_type,
4460 u32 capability, u32 *result)
4461{
4462 ATH5K_TRACE(ah->ah_sc);
4463
4464 switch (cap_type) {
4465 case AR5K_CAP_NUM_TXQUEUES:
4466 if (result) {
4467 if (ah->ah_version == AR5K_AR5210)
4468 *result = AR5K_NUM_TX_QUEUES_NOQCU;
4469 else
4470 *result = AR5K_NUM_TX_QUEUES;
4471 goto yes;
4472 }
4473 case AR5K_CAP_VEOL:
4474 goto yes;
4475 case AR5K_CAP_COMPRESSION:
4476 if (ah->ah_version == AR5K_AR5212)
4477 goto yes;
4478 else
4479 goto no;
4480 case AR5K_CAP_BURST:
4481 goto yes;
4482 case AR5K_CAP_TPC:
4483 goto yes;
4484 case AR5K_CAP_BSSIDMASK:
4485 if (ah->ah_version == AR5K_AR5212)
4486 goto yes;
4487 else
4488 goto no;
4489 case AR5K_CAP_XR:
4490 if (ah->ah_version == AR5K_AR5212)
4491 goto yes;
4492 else
4493 goto no;
4494 default:
4495 goto no;
4496 }
4497
4498no:
4499 return -EINVAL;
4500yes:
4501 return 0;
4502}
4503
4504static int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
4505 u16 assoc_id)
4506{
4507 ATH5K_TRACE(ah->ah_sc);
4508
4509 if (ah->ah_version == AR5K_AR5210) {
4510 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
4511 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
4512 return 0;
4513 }
4514
4515 return -EIO;
4516}
4517
4518static int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
4519{
4520 ATH5K_TRACE(ah->ah_sc);
4521
4522 if (ah->ah_version == AR5K_AR5210) {
4523 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
4524 AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
4525 return 0;
4526 }
4527
4528 return -EIO;
4529}
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 2806b21bf90b..ea2e1a20b499 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * Initial register settings functions 2 * Initial register settings functions
3 * 3 *
4 * Copyright (c) 2004, 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org> 4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006, 2007 Nick Kossifidis <mickflemm@gmail.com> 5 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
6 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 * 7 *
8 * Permission to use, copy, modify, and distribute this software for any 8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above 9 * purpose with or without fee is hereby granted, provided that the above
@@ -20,13 +20,9 @@
20 */ 20 */
21 21
22#include "ath5k.h" 22#include "ath5k.h"
23#include "base.h"
24#include "reg.h" 23#include "reg.h"
25 24#include "debug.h"
26/* 25#include "base.h"
27 * MAC/PHY REGISTERS
28 */
29
30 26
31/* 27/*
32 * Mode-independent initial register writes 28 * Mode-independent initial register writes
@@ -65,10 +61,10 @@ static const struct ath5k_ini ar5210_ini[] = {
65 { AR5K_TXCFG, AR5K_DMASIZE_128B }, 61 { AR5K_TXCFG, AR5K_DMASIZE_128B },
66 { AR5K_RXCFG, AR5K_DMASIZE_128B }, 62 { AR5K_RXCFG, AR5K_DMASIZE_128B },
67 { AR5K_CFG, AR5K_INIT_CFG }, 63 { AR5K_CFG, AR5K_INIT_CFG },
68 { AR5K_TOPS, AR5K_INIT_TOPS }, 64 { AR5K_TOPS, 8 },
69 { AR5K_RXNOFRM, AR5K_INIT_RXNOFRM }, 65 { AR5K_RXNOFRM, 8 },
70 { AR5K_RPGTO, AR5K_INIT_RPGTO }, 66 { AR5K_RPGTO, 0 },
71 { AR5K_TXNOFRM, AR5K_INIT_TXNOFRM }, 67 { AR5K_TXNOFRM, 0 },
72 { AR5K_SFR, 0 }, 68 { AR5K_SFR, 0 },
73 { AR5K_MIBC, 0 }, 69 { AR5K_MIBC, 0 },
74 { AR5K_MISC, 0 }, 70 { AR5K_MISC, 0 },
diff --git a/drivers/net/wireless/ath5k/pcu.c b/drivers/net/wireless/ath5k/pcu.c
new file mode 100644
index 000000000000..a47df9a24aa1
--- /dev/null
+++ b/drivers/net/wireless/ath5k/pcu.c
@@ -0,0 +1,1014 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007-2008 Matthew W. S. Bell <mentor@madwifi.org>
5 * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
6 * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
7 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 *
21 */
22
23/*********************************\
24* Protocol Control Unit Functions *
25\*********************************/
26
27#include "ath5k.h"
28#include "reg.h"
29#include "debug.h"
30#include "base.h"
31
32/*******************\
33* Generic functions *
34\*******************/
35
36/**
37 * ath5k_hw_set_opmode - Set PCU operating mode
38 *
39 * @ah: The &struct ath5k_hw
40 *
41 * Initialize PCU for the various operating modes (AP/STA etc)
42 *
43 * NOTE: ah->ah_op_mode must be set before calling this.
44 */
45int ath5k_hw_set_opmode(struct ath5k_hw *ah)
46{
47 u32 pcu_reg, beacon_reg, low_id, high_id;
48
49 pcu_reg = 0;
50 beacon_reg = 0;
51
52 ATH5K_TRACE(ah->ah_sc);
53
54 switch (ah->ah_op_mode) {
55 case NL80211_IFTYPE_ADHOC:
56 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_DESC_ANTENNA |
57 (ah->ah_version == AR5K_AR5210 ?
58 AR5K_STA_ID1_NO_PSPOLL : 0);
59 beacon_reg |= AR5K_BCR_ADHOC;
60 break;
61
62 case NL80211_IFTYPE_AP:
63 case NL80211_IFTYPE_MESH_POINT:
64 pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_RTS_DEF_ANTENNA |
65 (ah->ah_version == AR5K_AR5210 ?
66 AR5K_STA_ID1_NO_PSPOLL : 0);
67 beacon_reg |= AR5K_BCR_AP;
68 break;
69
70 case NL80211_IFTYPE_STATION:
71 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
72 (ah->ah_version == AR5K_AR5210 ?
73 AR5K_STA_ID1_PWR_SV : 0);
74 case NL80211_IFTYPE_MONITOR:
75 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA |
76 (ah->ah_version == AR5K_AR5210 ?
77 AR5K_STA_ID1_NO_PSPOLL : 0);
78 break;
79
80 default:
81 return -EINVAL;
82 }
83
84 /*
85 * Set PCU registers
86 */
87 low_id = AR5K_LOW_ID(ah->ah_sta_id);
88 high_id = AR5K_HIGH_ID(ah->ah_sta_id);
89 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
90 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
91
92 /*
93 * Set Beacon Control Register on 5210
94 */
95 if (ah->ah_version == AR5K_AR5210)
96 ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
97
98 return 0;
99}
100
101/**
102 * ath5k_hw_update - Update mib counters (mac layer statistics)
103 *
104 * @ah: The &struct ath5k_hw
105 * @stats: The &struct ieee80211_low_level_stats we use to track
106 * statistics on the driver
107 *
108 * Reads MIB counters from PCU and updates sw statistics. Must be
109 * called after a MIB interrupt.
110 */
111void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
112 struct ieee80211_low_level_stats *stats)
113{
114 ATH5K_TRACE(ah->ah_sc);
115
116 /* Read-And-Clear */
117 stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
118 stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
119 stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
120 stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
121
122 /* XXX: Should we use this to track beacon count ?
123 * -we read it anyway to clear the register */
124 ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
125
126 /* Reset profile count registers on 5212*/
127 if (ah->ah_version == AR5K_AR5212) {
128 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
129 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
130 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
131 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
132 }
133}
134
135/**
136 * ath5k_hw_set_ack_bitrate - set bitrate for ACKs
137 *
138 * @ah: The &struct ath5k_hw
139 * @high: Flag to determine if we want to use high transmition rate
140 * for ACKs or not
141 *
142 * If high flag is set, we tell hw to use a set of control rates based on
143 * the current transmition rate (check out control_rates array inside reset.c).
144 * If not hw just uses the lowest rate available for the current modulation
145 * scheme being used (1Mbit for CCK and 6Mbits for OFDM).
146 */
147void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
148{
149 if (ah->ah_version != AR5K_AR5212)
150 return;
151 else {
152 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
153 if (high)
154 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
155 else
156 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
157 }
158}
159
160
161/******************\
162* ACK/CTS Timeouts *
163\******************/
164
165/**
166 * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
167 *
168 * @ah: The &struct ath5k_hw
169 */
170unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
171{
172 ATH5K_TRACE(ah->ah_sc);
173
174 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
175 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
176}
177
178/**
179 * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
180 *
181 * @ah: The &struct ath5k_hw
182 * @timeout: Timeout in usec
183 */
184int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
185{
186 ATH5K_TRACE(ah->ah_sc);
187 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
188 ah->ah_turbo) <= timeout)
189 return -EINVAL;
190
191 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
192 ath5k_hw_htoclock(timeout, ah->ah_turbo));
193
194 return 0;
195}
196
197/**
198 * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
199 *
200 * @ah: The &struct ath5k_hw
201 */
202unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
203{
204 ATH5K_TRACE(ah->ah_sc);
205 return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
206 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
207}
208
209/**
210 * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
211 *
212 * @ah: The &struct ath5k_hw
213 * @timeout: Timeout in usec
214 */
215int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
216{
217 ATH5K_TRACE(ah->ah_sc);
218 if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
219 ah->ah_turbo) <= timeout)
220 return -EINVAL;
221
222 AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
223 ath5k_hw_htoclock(timeout, ah->ah_turbo));
224
225 return 0;
226}
227
228
229/****************\
230* BSSID handling *
231\****************/
232
233/**
234 * ath5k_hw_get_lladdr - Get station id
235 *
236 * @ah: The &struct ath5k_hw
237 * @mac: The card's mac address
238 *
239 * Initialize ah->ah_sta_id using the mac address provided
240 * (just a memcpy).
241 *
242 * TODO: Remove it once we merge ath5k_softc and ath5k_hw
243 */
244void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
245{
246 ATH5K_TRACE(ah->ah_sc);
247 memcpy(mac, ah->ah_sta_id, ETH_ALEN);
248}
249
250/**
251 * ath5k_hw_set_lladdr - Set station id
252 *
253 * @ah: The &struct ath5k_hw
254 * @mac: The card's mac address
255 *
256 * Set station id on hw using the provided mac address
257 */
258int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
259{
260 u32 low_id, high_id;
261
262 ATH5K_TRACE(ah->ah_sc);
263 /* Set new station ID */
264 memcpy(ah->ah_sta_id, mac, ETH_ALEN);
265
266 low_id = AR5K_LOW_ID(mac);
267 high_id = AR5K_HIGH_ID(mac);
268
269 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
270 ath5k_hw_reg_write(ah, high_id, AR5K_STA_ID1);
271
272 return 0;
273}
274
275/**
276 * ath5k_hw_set_associd - Set BSSID for association
277 *
278 * @ah: The &struct ath5k_hw
279 * @bssid: BSSID
280 * @assoc_id: Assoc id
281 *
282 * Sets the BSSID which trigers the "SME Join" operation
283 */
284void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
285{
286 u32 low_id, high_id;
287 u16 tim_offset = 0;
288
289 /*
290 * Set simple BSSID mask on 5212
291 */
292 if (ah->ah_version == AR5K_AR5212) {
293 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM0);
294 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM1);
295 }
296
297 /*
298 * Set BSSID which triggers the "SME Join" operation
299 */
300 low_id = AR5K_LOW_ID(bssid);
301 high_id = AR5K_HIGH_ID(bssid);
302 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0);
303 ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) <<
304 AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1);
305
306 if (assoc_id == 0) {
307 ath5k_hw_disable_pspoll(ah);
308 return;
309 }
310
311 AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
312 tim_offset ? tim_offset + 4 : 0);
313
314 ath5k_hw_enable_pspoll(ah, NULL, 0);
315}
316
317/**
318 * ath5k_hw_set_bssid_mask - filter out bssids we listen
319 *
320 * @ah: the &struct ath5k_hw
321 * @mask: the bssid_mask, a u8 array of size ETH_ALEN
322 *
323 * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
324 * which bits of the interface's MAC address should be looked at when trying
325 * to decide which packets to ACK. In station mode and AP mode with a single
326 * BSS every bit matters since we lock to only one BSS. In AP mode with
327 * multiple BSSes (virtual interfaces) not every bit matters because hw must
328 * accept frames for all BSSes and so we tweak some bits of our mac address
329 * in order to have multiple BSSes.
330 *
331 * NOTE: This is a simple filter and does *not* filter out all
332 * relevant frames. Some frames that are not for us might get ACKed from us
333 * by PCU because they just match the mask.
334 *
335 * When handling multiple BSSes you can get the BSSID mask by computing the
336 * set of ~ ( MAC XOR BSSID ) for all bssids we handle.
337 *
338 * When you do this you are essentially computing the common bits of all your
339 * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
340 * the MAC address to obtain the relevant bits and compare the result with
341 * (frame's BSSID & mask) to see if they match.
342 */
343/*
344 * Simple example: on your card you have have two BSSes you have created with
345 * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
346 * There is another BSSID-03 but you are not part of it. For simplicity's sake,
347 * assuming only 4 bits for a mac address and for BSSIDs you can then have:
348 *
349 * \
350 * MAC: 0001 |
351 * BSSID-01: 0100 | --> Belongs to us
352 * BSSID-02: 1001 |
353 * /
354 * -------------------
355 * BSSID-03: 0110 | --> External
356 * -------------------
357 *
358 * Our bssid_mask would then be:
359 *
360 * On loop iteration for BSSID-01:
361 * ~(0001 ^ 0100) -> ~(0101)
362 * -> 1010
363 * bssid_mask = 1010
364 *
365 * On loop iteration for BSSID-02:
366 * bssid_mask &= ~(0001 ^ 1001)
367 * bssid_mask = (1010) & ~(0001 ^ 1001)
368 * bssid_mask = (1010) & ~(1001)
369 * bssid_mask = (1010) & (0110)
370 * bssid_mask = 0010
371 *
372 * A bssid_mask of 0010 means "only pay attention to the second least
373 * significant bit". This is because its the only bit common
374 * amongst the MAC and all BSSIDs we support. To findout what the real
375 * common bit is we can simply "&" the bssid_mask now with any BSSID we have
376 * or our MAC address (we assume the hardware uses the MAC address).
377 *
378 * Now, suppose there's an incoming frame for BSSID-03:
379 *
380 * IFRAME-01: 0110
381 *
382 * An easy eye-inspeciton of this already should tell you that this frame
383 * will not pass our check. This is beacuse the bssid_mask tells the
384 * hardware to only look at the second least significant bit and the
385 * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
386 * as 1, which does not match 0.
387 *
388 * So with IFRAME-01 we *assume* the hardware will do:
389 *
390 * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
391 * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
392 * --> allow = (0010) == 0000 ? 1 : 0;
393 * --> allow = 0
394 *
395 * Lets now test a frame that should work:
396 *
397 * IFRAME-02: 0001 (we should allow)
398 *
399 * allow = (0001 & 1010) == 1010
400 *
401 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
402 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
403 * --> allow = (0010) == (0010)
404 * --> allow = 1
405 *
406 * Other examples:
407 *
408 * IFRAME-03: 0100 --> allowed
409 * IFRAME-04: 1001 --> allowed
410 * IFRAME-05: 1101 --> allowed but its not for us!!!
411 *
412 */
413int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
414{
415 u32 low_id, high_id;
416 ATH5K_TRACE(ah->ah_sc);
417
418 if (ah->ah_version == AR5K_AR5212) {
419 low_id = AR5K_LOW_ID(mask);
420 high_id = AR5K_HIGH_ID(mask);
421
422 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
423 ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
424
425 return 0;
426 }
427
428 return -EIO;
429}
430
431
432/************\
433* RX Control *
434\************/
435
436/**
437 * ath5k_hw_start_rx_pcu - Start RX engine
438 *
439 * @ah: The &struct ath5k_hw
440 *
441 * Starts RX engine on PCU so that hw can process RXed frames
442 * (ACK etc).
443 *
444 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
445 * TODO: Init ANI here
446 */
447void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
448{
449 ATH5K_TRACE(ah->ah_sc);
450 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
451}
452
453/**
454 * at5k_hw_stop_rx_pcu - Stop RX engine
455 *
456 * @ah: The &struct ath5k_hw
457 *
458 * Stops RX engine on PCU
459 *
460 * TODO: Detach ANI here
461 */
462void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
463{
464 ATH5K_TRACE(ah->ah_sc);
465 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
466}
467
468/*
469 * Set multicast filter
470 */
471void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
472{
473 ATH5K_TRACE(ah->ah_sc);
474 /* Set the multicat filter */
475 ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
476 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
477}
478
479/*
480 * Set multicast filter by index
481 */
482int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
483{
484
485 ATH5K_TRACE(ah->ah_sc);
486 if (index >= 64)
487 return -EINVAL;
488 else if (index >= 32)
489 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
490 (1 << (index - 32)));
491 else
492 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
493
494 return 0;
495}
496
497/*
498 * Clear Multicast filter by index
499 */
500int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
501{
502
503 ATH5K_TRACE(ah->ah_sc);
504 if (index >= 64)
505 return -EINVAL;
506 else if (index >= 32)
507 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
508 (1 << (index - 32)));
509 else
510 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
511
512 return 0;
513}
514
515/**
516 * ath5k_hw_get_rx_filter - Get current rx filter
517 *
518 * @ah: The &struct ath5k_hw
519 *
520 * Returns the RX filter by reading rx filter and
521 * phy error filter registers. RX filter is used
522 * to set the allowed frame types that PCU will accept
523 * and pass to the driver. For a list of frame types
524 * check out reg.h.
525 */
526u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
527{
528 u32 data, filter = 0;
529
530 ATH5K_TRACE(ah->ah_sc);
531 filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
532
533 /*Radar detection for 5212*/
534 if (ah->ah_version == AR5K_AR5212) {
535 data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
536
537 if (data & AR5K_PHY_ERR_FIL_RADAR)
538 filter |= AR5K_RX_FILTER_RADARERR;
539 if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
540 filter |= AR5K_RX_FILTER_PHYERR;
541 }
542
543 return filter;
544}
545
546/**
547 * ath5k_hw_set_rx_filter - Set rx filter
548 *
549 * @ah: The &struct ath5k_hw
550 * @filter: RX filter mask (see reg.h)
551 *
552 * Sets RX filter register and also handles PHY error filter
553 * register on 5212 and newer chips so that we have proper PHY
554 * error reporting.
555 */
556void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
557{
558 u32 data = 0;
559
560 ATH5K_TRACE(ah->ah_sc);
561
562 /* Set PHY error filter register on 5212*/
563 if (ah->ah_version == AR5K_AR5212) {
564 if (filter & AR5K_RX_FILTER_RADARERR)
565 data |= AR5K_PHY_ERR_FIL_RADAR;
566 if (filter & AR5K_RX_FILTER_PHYERR)
567 data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
568 }
569
570 /*
571 * The AR5210 uses promiscous mode to detect radar activity
572 */
573 if (ah->ah_version == AR5K_AR5210 &&
574 (filter & AR5K_RX_FILTER_RADARERR)) {
575 filter &= ~AR5K_RX_FILTER_RADARERR;
576 filter |= AR5K_RX_FILTER_PROM;
577 }
578
579 /*Zero length DMA*/
580 if (data)
581 AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
582 else
583 AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
584
585 /*Write RX Filter register*/
586 ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
587
588 /*Write PHY error filter register on 5212*/
589 if (ah->ah_version == AR5K_AR5212)
590 ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
591
592}
593
594
595/****************\
596* Beacon control *
597\****************/
598
599/**
600 * ath5k_hw_get_tsf32 - Get a 32bit TSF
601 *
602 * @ah: The &struct ath5k_hw
603 *
604 * Returns lower 32 bits of current TSF
605 */
606u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
607{
608 ATH5K_TRACE(ah->ah_sc);
609 return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
610}
611
612/**
613 * ath5k_hw_get_tsf64 - Get the full 64bit TSF
614 *
615 * @ah: The &struct ath5k_hw
616 *
617 * Returns the current TSF
618 */
619u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
620{
621 u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
622 ATH5K_TRACE(ah->ah_sc);
623
624 return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32);
625}
626
627/**
628 * ath5k_hw_reset_tsf - Force a TSF reset
629 *
630 * @ah: The &struct ath5k_hw
631 *
632 * Forces a TSF reset on PCU
633 */
634void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
635{
636 u32 val;
637
638 ATH5K_TRACE(ah->ah_sc);
639
640 val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
641
642 /*
643 * Each write to the RESET_TSF bit toggles a hardware internal
644 * signal to reset TSF, but if left high it will cause a TSF reset
645 * on the next chip reset as well. Thus we always write the value
646 * twice to clear the signal.
647 */
648 ath5k_hw_reg_write(ah, val, AR5K_BEACON);
649 ath5k_hw_reg_write(ah, val, AR5K_BEACON);
650}
651
652/*
653 * Initialize beacon timers
654 */
655void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
656{
657 u32 timer1, timer2, timer3;
658
659 ATH5K_TRACE(ah->ah_sc);
660 /*
661 * Set the additional timers by mode
662 */
663 switch (ah->ah_op_mode) {
664 case NL80211_IFTYPE_STATION:
665 if (ah->ah_version == AR5K_AR5210) {
666 timer1 = 0xffffffff;
667 timer2 = 0xffffffff;
668 } else {
669 timer1 = 0x0000ffff;
670 timer2 = 0x0007ffff;
671 }
672 break;
673
674 default:
675 timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
676 timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
677 }
678
679 timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1);
680
681 /*
682 * Set the beacon register and enable all timers.
683 * (next beacon, DMA beacon, software beacon, ATIM window time)
684 */
685 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
686 ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
687 ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
688 ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
689
690 ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
691 AR5K_BEACON_RESET_TSF | AR5K_BEACON_ENABLE),
692 AR5K_BEACON);
693}
694
695#if 0
696/*
697 * Set beacon timers
698 */
699int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
700 const struct ath5k_beacon_state *state)
701{
702 u32 cfp_period, next_cfp, dtim, interval, next_beacon;
703
704 /*
705 * TODO: should be changed through *state
706 * review struct ath5k_beacon_state struct
707 *
708 * XXX: These are used for cfp period bellow, are they
709 * ok ? Is it O.K. for tsf here to be 0 or should we use
710 * get_tsf ?
711 */
712 u32 dtim_count = 0; /* XXX */
713 u32 cfp_count = 0; /* XXX */
714 u32 tsf = 0; /* XXX */
715
716 ATH5K_TRACE(ah->ah_sc);
717 /* Return on an invalid beacon state */
718 if (state->bs_interval < 1)
719 return -EINVAL;
720
721 interval = state->bs_interval;
722 dtim = state->bs_dtim_period;
723
724 /*
725 * PCF support?
726 */
727 if (state->bs_cfp_period > 0) {
728 /*
729 * Enable PCF mode and set the CFP
730 * (Contention Free Period) and timer registers
731 */
732 cfp_period = state->bs_cfp_period * state->bs_dtim_period *
733 state->bs_interval;
734 next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
735 state->bs_interval;
736
737 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
738 AR5K_STA_ID1_DEFAULT_ANTENNA |
739 AR5K_STA_ID1_PCF);
740 ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
741 ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
742 AR5K_CFP_DUR);
743 ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
744 next_cfp)) << 3, AR5K_TIMER2);
745 } else {
746 /* Disable PCF mode */
747 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
748 AR5K_STA_ID1_DEFAULT_ANTENNA |
749 AR5K_STA_ID1_PCF);
750 }
751
752 /*
753 * Enable the beacon timer register
754 */
755 ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
756
757 /*
758 * Start the beacon timers
759 */
760 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
761 ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
762 AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
763 AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
764 AR5K_BEACON_PERIOD), AR5K_BEACON);
765
766 /*
767 * Write new beacon miss threshold, if it appears to be valid
768 * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
769 * and return if its not in range. We can test this by reading value and
770 * setting value to a largest value and seeing which values register.
771 */
772
773 AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
774 state->bs_bmiss_threshold);
775
776 /*
777 * Set sleep control register
778 * XXX: Didn't find this in 5210 code but since this register
779 * exists also in ar5k's 5210 headers i leave it as common code.
780 */
781 AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
782 (state->bs_sleep_duration - 3) << 3);
783
784 /*
785 * Set enhanced sleep registers on 5212
786 */
787 if (ah->ah_version == AR5K_AR5212) {
788 if (state->bs_sleep_duration > state->bs_interval &&
789 roundup(state->bs_sleep_duration, interval) ==
790 state->bs_sleep_duration)
791 interval = state->bs_sleep_duration;
792
793 if (state->bs_sleep_duration > dtim && (dtim == 0 ||
794 roundup(state->bs_sleep_duration, dtim) ==
795 state->bs_sleep_duration))
796 dtim = state->bs_sleep_duration;
797
798 if (interval > dtim)
799 return -EINVAL;
800
801 next_beacon = interval == dtim ? state->bs_next_dtim :
802 state->bs_next_beacon;
803
804 ath5k_hw_reg_write(ah,
805 AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
806 AR5K_SLEEP0_NEXT_DTIM) |
807 AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
808 AR5K_SLEEP0_ENH_SLEEP_EN |
809 AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
810
811 ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
812 AR5K_SLEEP1_NEXT_TIM) |
813 AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
814
815 ath5k_hw_reg_write(ah,
816 AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
817 AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
818 }
819
820 return 0;
821}
822
823/*
824 * Reset beacon timers
825 */
826void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
827{
828 ATH5K_TRACE(ah->ah_sc);
829 /*
830 * Disable beacon timer
831 */
832 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
833
834 /*
835 * Disable some beacon register values
836 */
837 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
838 AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
839 ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
840}
841
842/*
843 * Wait for beacon queue to finish
844 */
845int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
846{
847 unsigned int i;
848 int ret;
849
850 ATH5K_TRACE(ah->ah_sc);
851
852 /* 5210 doesn't have QCU*/
853 if (ah->ah_version == AR5K_AR5210) {
854 /*
855 * Wait for beaconn queue to finish by checking
856 * Control Register and Beacon Status Register.
857 */
858 for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
859 if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
860 ||
861 !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
862 break;
863 udelay(10);
864 }
865
866 /* Timeout... */
867 if (i <= 0) {
868 /*
869 * Re-schedule the beacon queue
870 */
871 ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
872 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
873 AR5K_BCR);
874
875 return -EIO;
876 }
877 ret = 0;
878 } else {
879 /*5211/5212*/
880 ret = ath5k_hw_register_timeout(ah,
881 AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
882 AR5K_QCU_STS_FRMPENDCNT, 0, false);
883
884 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
885 return -EIO;
886 }
887
888 return ret;
889}
890#endif
891
892
893/*********************\
894* Key table functions *
895\*********************/
896
897/*
898 * Reset a key entry on the table
899 */
900int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
901{
902 unsigned int i;
903
904 ATH5K_TRACE(ah->ah_sc);
905 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
906
907 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
908 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
909
910 /*
911 * Set NULL encryption on AR5212+
912 *
913 * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5)
914 * AR5K_KEYTABLE_TYPE_NULL -> 0x00000007
915 *
916 * Note2: Windows driver (ndiswrapper) sets this to
917 * 0x00000714 instead of 0x00000007
918 */
919 if (ah->ah_version > AR5K_AR5211)
920 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
921 AR5K_KEYTABLE_TYPE(entry));
922
923 return 0;
924}
925
926/*
927 * Check if a table entry is valid
928 */
929int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
930{
931 ATH5K_TRACE(ah->ah_sc);
932 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
933
934 /* Check the validation flag at the end of the entry */
935 return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
936 AR5K_KEYTABLE_VALID;
937}
938
939/*
940 * Set a key entry on the table
941 */
942int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
943 const struct ieee80211_key_conf *key, const u8 *mac)
944{
945 unsigned int i;
946 __le32 key_v[5] = {};
947 u32 keytype;
948
949 ATH5K_TRACE(ah->ah_sc);
950
951 /* key->keylen comes in from mac80211 in bytes */
952
953 if (key->keylen > AR5K_KEYTABLE_SIZE / 8)
954 return -EOPNOTSUPP;
955
956 switch (key->keylen) {
957 /* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit */
958 case 40 / 8:
959 memcpy(&key_v[0], key->key, 5);
960 keytype = AR5K_KEYTABLE_TYPE_40;
961 break;
962
963 /* WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit */
964 case 104 / 8:
965 memcpy(&key_v[0], &key->key[0], 6);
966 memcpy(&key_v[2], &key->key[6], 6);
967 memcpy(&key_v[4], &key->key[12], 1);
968 keytype = AR5K_KEYTABLE_TYPE_104;
969 break;
970 /* WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit */
971 case 128 / 8:
972 memcpy(&key_v[0], &key->key[0], 6);
973 memcpy(&key_v[2], &key->key[6], 6);
974 memcpy(&key_v[4], &key->key[12], 4);
975 keytype = AR5K_KEYTABLE_TYPE_128;
976 break;
977
978 default:
979 return -EINVAL; /* shouldn't happen */
980 }
981
982 for (i = 0; i < ARRAY_SIZE(key_v); i++)
983 ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
984 AR5K_KEYTABLE_OFF(entry, i));
985
986 ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
987
988 return ath5k_hw_set_key_lladdr(ah, entry, mac);
989}
990
991int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
992{
993 u32 low_id, high_id;
994
995 ATH5K_TRACE(ah->ah_sc);
996 /* Invalid entry (key table overflow) */
997 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
998
999 /* MAC may be NULL if it's a broadcast key. In this case no need to
1000 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */
1001 if (unlikely(mac == NULL)) {
1002 low_id = 0xffffffff;
1003 high_id = 0xffff | AR5K_KEYTABLE_VALID;
1004 } else {
1005 low_id = AR5K_LOW_ID(mac);
1006 high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID;
1007 }
1008
1009 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
1010 ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry));
1011
1012 return 0;
1013}
1014
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index fa0d47faf574..e43f6563e61a 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -1,9 +1,9 @@
1/* 1/*
2 * PHY functions 2 * PHY functions
3 * 3 *
4 * Copyright (c) 2004, 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org> 4 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2006, 2007 Nick Kossifidis <mickflemm@gmail.com> 5 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
6 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 * 7 *
8 * Permission to use, copy, modify, and distribute this software for any 8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above 9 * purpose with or without fee is hereby granted, provided that the above
@@ -19,6 +19,8 @@
19 * 19 *
20 */ 20 */
21 21
22#define _ATH5K_PHY
23
22#include <linux/delay.h> 24#include <linux/delay.h>
23 25
24#include "ath5k.h" 26#include "ath5k.h"
@@ -2122,7 +2124,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
2122 beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210); 2124 beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
2123 ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210); 2125 ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
2124 2126
2125 udelay(2300); 2127 mdelay(2);
2126 2128
2127 /* 2129 /*
2128 * Set the channel (with AGC turned off) 2130 * Set the channel (with AGC turned off)
@@ -2501,3 +2503,5 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, unsigned int power)
2501 2503
2502 return ath5k_hw_txpower(ah, channel, power); 2504 return ath5k_hw_txpower(ah, channel, power);
2503} 2505}
2506
2507#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath5k/qcu.c b/drivers/net/wireless/ath5k/qcu.c
new file mode 100644
index 000000000000..01bf09176d23
--- /dev/null
+++ b/drivers/net/wireless/ath5k/qcu.c
@@ -0,0 +1,488 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 */
18
19/********************************************\
20Queue Control Unit, DFS Control Unit Functions
21\********************************************/
22
23#include "ath5k.h"
24#include "reg.h"
25#include "debug.h"
26#include "base.h"
27
28/*
29 * Get properties for a transmit queue
30 */
31int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
32 struct ath5k_txq_info *queue_info)
33{
34 ATH5K_TRACE(ah->ah_sc);
35 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
36 return 0;
37}
38
39/*
40 * Set properties for a transmit queue
41 */
42int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
43 const struct ath5k_txq_info *queue_info)
44{
45 ATH5K_TRACE(ah->ah_sc);
46 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
47
48 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
49 return -EIO;
50
51 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
52
53 /*XXX: Is this supported on 5210 ?*/
54 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
55 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
56 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
57 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
58 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
59
60 return 0;
61}
62
63/*
64 * Initialize a transmit queue
65 */
66int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
67 struct ath5k_txq_info *queue_info)
68{
69 unsigned int queue;
70 int ret;
71
72 ATH5K_TRACE(ah->ah_sc);
73
74 /*
75 * Get queue by type
76 */
77 /*5210 only has 2 queues*/
78 if (ah->ah_version == AR5K_AR5210) {
79 switch (queue_type) {
80 case AR5K_TX_QUEUE_DATA:
81 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
82 break;
83 case AR5K_TX_QUEUE_BEACON:
84 case AR5K_TX_QUEUE_CAB:
85 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
86 break;
87 default:
88 return -EINVAL;
89 }
90 } else {
91 switch (queue_type) {
92 case AR5K_TX_QUEUE_DATA:
93 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
94 ah->ah_txq[queue].tqi_type !=
95 AR5K_TX_QUEUE_INACTIVE; queue++) {
96
97 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
98 return -EINVAL;
99 }
100 break;
101 case AR5K_TX_QUEUE_UAPSD:
102 queue = AR5K_TX_QUEUE_ID_UAPSD;
103 break;
104 case AR5K_TX_QUEUE_BEACON:
105 queue = AR5K_TX_QUEUE_ID_BEACON;
106 break;
107 case AR5K_TX_QUEUE_CAB:
108 queue = AR5K_TX_QUEUE_ID_CAB;
109 break;
110 case AR5K_TX_QUEUE_XR_DATA:
111 if (ah->ah_version != AR5K_AR5212)
112 ATH5K_ERR(ah->ah_sc,
113 "XR data queues only supported in"
114 " 5212!\n");
115 queue = AR5K_TX_QUEUE_ID_XR_DATA;
116 break;
117 default:
118 return -EINVAL;
119 }
120 }
121
122 /*
123 * Setup internal queue structure
124 */
125 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
126 ah->ah_txq[queue].tqi_type = queue_type;
127
128 if (queue_info != NULL) {
129 queue_info->tqi_type = queue_type;
130 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
131 if (ret)
132 return ret;
133 }
134
135 /*
136 * We use ah_txq_status to hold a temp value for
137 * the Secondary interrupt mask registers on 5211+
138 * check out ath5k_hw_reset_tx_queue
139 */
140 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
141
142 return queue;
143}
144
145/*
146 * Get number of pending frames
147 * for a specific queue [5211+]
148 */
149u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
150{
151 ATH5K_TRACE(ah->ah_sc);
152 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
153
154 /* Return if queue is declared inactive */
155 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
156 return false;
157
158 /* XXX: How about AR5K_CFG_TXCNT ? */
159 if (ah->ah_version == AR5K_AR5210)
160 return false;
161
162 return AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT;
163}
164
165/*
166 * Set a transmit queue inactive
167 */
168void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
169{
170 ATH5K_TRACE(ah->ah_sc);
171 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
172 return;
173
174 /* This queue will be skipped in further operations */
175 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
176 /*For SIMR setup*/
177 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
178}
179
180/*
181 * Set DFS properties for a transmit queue on DCU
182 */
183int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
184{
185 u32 cw_min, cw_max, retry_lg, retry_sh;
186 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
187
188 ATH5K_TRACE(ah->ah_sc);
189 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
190
191 tq = &ah->ah_txq[queue];
192
193 if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
194 return 0;
195
196 if (ah->ah_version == AR5K_AR5210) {
197 /* Only handle data queues, others will be ignored */
198 if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
199 return 0;
200
201 /* Set Slot time */
202 ath5k_hw_reg_write(ah, ah->ah_turbo ?
203 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
204 AR5K_SLOT_TIME);
205 /* Set ACK_CTS timeout */
206 ath5k_hw_reg_write(ah, ah->ah_turbo ?
207 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
208 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
209 /* Set Transmit Latency */
210 ath5k_hw_reg_write(ah, ah->ah_turbo ?
211 AR5K_INIT_TRANSMIT_LATENCY_TURBO :
212 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
213
214 /* Set IFS0 */
215 if (ah->ah_turbo) {
216 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
217 (ah->ah_aifs + tq->tqi_aifs) *
218 AR5K_INIT_SLOT_TIME_TURBO) <<
219 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
220 AR5K_IFS0);
221 } else {
222 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
223 (ah->ah_aifs + tq->tqi_aifs) *
224 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
225 AR5K_INIT_SIFS, AR5K_IFS0);
226 }
227
228 /* Set IFS1 */
229 ath5k_hw_reg_write(ah, ah->ah_turbo ?
230 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
231 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
232 /* Set AR5K_PHY_SETTLING */
233 ath5k_hw_reg_write(ah, ah->ah_turbo ?
234 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
235 | 0x38 :
236 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
237 | 0x1C,
238 AR5K_PHY_SETTLING);
239 /* Set Frame Control Register */
240 ath5k_hw_reg_write(ah, ah->ah_turbo ?
241 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
242 AR5K_PHY_TURBO_SHORT | 0x2020) :
243 (AR5K_PHY_FRAME_CTL_INI | 0x1020),
244 AR5K_PHY_FRAME_CTL_5210);
245 }
246
247 /*
248 * Calculate cwmin/max by channel mode
249 */
250 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
251 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
252 ah->ah_aifs = AR5K_TUNE_AIFS;
253 /*XR is only supported on 5212*/
254 if (IS_CHAN_XR(ah->ah_current_channel) &&
255 ah->ah_version == AR5K_AR5212) {
256 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
257 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
258 ah->ah_aifs = AR5K_TUNE_AIFS_XR;
259 /*B mode is not supported on 5210*/
260 } else if (IS_CHAN_B(ah->ah_current_channel) &&
261 ah->ah_version != AR5K_AR5210) {
262 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
263 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
264 ah->ah_aifs = AR5K_TUNE_AIFS_11B;
265 }
266
267 cw_min = 1;
268 while (cw_min < ah->ah_cw_min)
269 cw_min = (cw_min << 1) | 1;
270
271 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
272 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
273 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
274 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
275
276 /*
277 * Calculate and set retry limits
278 */
279 if (ah->ah_software_retry) {
280 /* XXX Need to test this */
281 retry_lg = ah->ah_limit_tx_retries;
282 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
283 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
284 } else {
285 retry_lg = AR5K_INIT_LG_RETRY;
286 retry_sh = AR5K_INIT_SH_RETRY;
287 }
288
289 /*No QCU/DCU [5210]*/
290 if (ah->ah_version == AR5K_AR5210) {
291 ath5k_hw_reg_write(ah,
292 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
293 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
294 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
295 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
296 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
297 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
298 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
299 AR5K_NODCU_RETRY_LMT);
300 } else {
301 /*QCU/DCU [5211+]*/
302 ath5k_hw_reg_write(ah,
303 AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
304 AR5K_DCU_RETRY_LMT_SLG_RETRY) |
305 AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
306 AR5K_DCU_RETRY_LMT_SSH_RETRY) |
307 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
308 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
309 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
310
311 /*===Rest is also for QCU/DCU only [5211+]===*/
312
313 /*
314 * Set initial content window (cw_min/cw_max)
315 * and arbitrated interframe space (aifs)...
316 */
317 ath5k_hw_reg_write(ah,
318 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
319 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
320 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
321 AR5K_DCU_LCL_IFS_AIFS),
322 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
323
324 /*
325 * Set misc registers
326 */
327 ath5k_hw_reg_write(ah, AR5K_QCU_MISC_DCU_EARLY,
328 AR5K_QUEUE_MISC(queue));
329
330 if (tq->tqi_cbr_period) {
331 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
332 AR5K_QCU_CBRCFG_INTVAL) |
333 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
334 AR5K_QCU_CBRCFG_ORN_THRES),
335 AR5K_QUEUE_CBRCFG(queue));
336 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
337 AR5K_QCU_MISC_FRSHED_CBR);
338 if (tq->tqi_cbr_overflow_limit)
339 AR5K_REG_ENABLE_BITS(ah,
340 AR5K_QUEUE_MISC(queue),
341 AR5K_QCU_MISC_CBR_THRES_ENABLE);
342 }
343
344 if (tq->tqi_ready_time)
345 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
346 AR5K_QCU_RDYTIMECFG_INTVAL) |
347 AR5K_QCU_RDYTIMECFG_ENABLE,
348 AR5K_QUEUE_RDYTIMECFG(queue));
349
350 if (tq->tqi_burst_time) {
351 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
352 AR5K_DCU_CHAN_TIME_DUR) |
353 AR5K_DCU_CHAN_TIME_ENABLE,
354 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
355
356 if (tq->tqi_flags
357 & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
358 AR5K_REG_ENABLE_BITS(ah,
359 AR5K_QUEUE_MISC(queue),
360 AR5K_QCU_MISC_RDY_VEOL_POLICY);
361 }
362
363 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
364 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
365 AR5K_QUEUE_DFS_MISC(queue));
366
367 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
368 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
369 AR5K_QUEUE_DFS_MISC(queue));
370
371 /*
372 * Set registers by queue type
373 */
374 switch (tq->tqi_type) {
375 case AR5K_TX_QUEUE_BEACON:
376 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
377 AR5K_QCU_MISC_FRSHED_DBA_GT |
378 AR5K_QCU_MISC_CBREXP_BCN_DIS |
379 AR5K_QCU_MISC_BCN_ENABLE);
380
381 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
382 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
383 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
384 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
385 AR5K_DCU_MISC_BCN_ENABLE);
386
387 ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
388 (AR5K_TUNE_SW_BEACON_RESP -
389 AR5K_TUNE_DMA_BEACON_RESP) -
390 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
391 AR5K_QCU_RDYTIMECFG_ENABLE,
392 AR5K_QUEUE_RDYTIMECFG(queue));
393 break;
394
395 case AR5K_TX_QUEUE_CAB:
396 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
397 AR5K_QCU_MISC_FRSHED_DBA_GT |
398 AR5K_QCU_MISC_CBREXP_DIS |
399 AR5K_QCU_MISC_CBREXP_BCN_DIS);
400
401 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
402 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
403 AR5K_DCU_MISC_ARBLOCK_CTL_S));
404 break;
405
406 case AR5K_TX_QUEUE_UAPSD:
407 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
408 AR5K_QCU_MISC_CBREXP_DIS);
409 break;
410
411 case AR5K_TX_QUEUE_DATA:
412 default:
413 break;
414 }
415
416 /*
417 * Enable interrupts for this tx queue
418 * in the secondary interrupt mask registers
419 */
420 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
421 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
422
423 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
424 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
425
426 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
427 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
428
429 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
430 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
431
432 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
433 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
434
435
436 /* Update secondary interrupt mask registers */
437 ah->ah_txq_imr_txok &= ah->ah_txq_status;
438 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
439 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
440 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
441 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
442
443 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
444 AR5K_SIMR0_QCU_TXOK) |
445 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
446 AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
447 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
448 AR5K_SIMR1_QCU_TXERR) |
449 AR5K_REG_SM(ah->ah_txq_imr_txeol,
450 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
451 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txurn,
452 AR5K_SIMR2_QCU_TXURN), AR5K_SIMR2);
453 }
454
455 return 0;
456}
457
458/*
459 * Get slot time from DCU
460 */
461unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
462{
463 ATH5K_TRACE(ah->ah_sc);
464 if (ah->ah_version == AR5K_AR5210)
465 return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
466 AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
467 else
468 return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
469}
470
471/*
472 * Set slot time on DCU
473 */
474int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
475{
476 ATH5K_TRACE(ah->ah_sc);
477 if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
478 return -EINVAL;
479
480 if (ah->ah_version == AR5K_AR5210)
481 ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
482 ah->ah_turbo), AR5K_SLOT_TIME);
483 else
484 ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
485
486 return 0;
487}
488
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 7562bf173d3e..e557fe178bbf 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2007 Nick Kossifidis <mickflemm@gmail.com> 2 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
3 * Copyright (c) 2004, 2005, 2006, 2007 Reyk Floeter <reyk@openbsd.org> 3 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
4 * Copyright (c) 2007 Michael Taylor <mike.taylor@apprion.com> 4 * Copyright (c) 2007-2008 Michael Taylor <mike.taylor@apprion.com>
5 * 5 *
6 * Permission to use, copy, modify, and distribute this software for any 6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
@@ -29,6 +29,10 @@
29 * http://www.it.iitb.ac.in/~janak/wifire/01222734.pdf 29 * http://www.it.iitb.ac.in/~janak/wifire/01222734.pdf
30 * 30 *
31 * 5211 - http://www.hotchips.org/archives/hc14/3_Tue/16_mcfarland.pdf 31 * 5211 - http://www.hotchips.org/archives/hc14/3_Tue/16_mcfarland.pdf
32 *
33 * This file also contains register values found on a memory dump of
34 * Atheros's ART program (Atheros Radio Test), on ath9k, on legacy-hal
35 * released by Atheros and on various debug messages found on the net.
32 */ 36 */
33 37
34 38
@@ -295,7 +299,7 @@
295#define AR5K_ISR_RXPHY 0x00004000 /* PHY error */ 299#define AR5K_ISR_RXPHY 0x00004000 /* PHY error */
296#define AR5K_ISR_RXKCM 0x00008000 /* RX Key cache miss */ 300#define AR5K_ISR_RXKCM 0x00008000 /* RX Key cache miss */
297#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */ 301#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
298#define AR5K_ISR_BRSSI 0x00020000 302#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
299#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */ 303#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
300#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ 304#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
301#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */ 305#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
@@ -303,46 +307,56 @@
303#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */ 307#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
304#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */ 308#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
305#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */ 309#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */
306#define AR5K_ISR_TIM 0x00800000 /* [5210] */ 310#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
307#define AR5K_ISR_BCNMISC 0x00800000 /* [5212+] */ 311#define AR5K_ISR_TIM 0x00800000 /* [5211+] */
308#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill)*/ 312#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
309#define AR5K_ISR_QCBRORN 0x02000000 /* CBR overrun (?) [5211+] */ 313 CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
310#define AR5K_ISR_QCBRURN 0x04000000 /* CBR underrun (?) [5211+] */ 314#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */
311#define AR5K_ISR_QTRIG 0x08000000 /* [5211+] */ 315#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */
316#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */
317#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
312 318
313/* 319/*
314 * Secondary status registers [5211+] (0 - 4) 320 * Secondary status registers [5211+] (0 - 4)
315 * 321 *
316 * I guess from the names that these give the status for each 322 * These give the status for each QCU, only QCUs 0-9 are
317 * queue, that's why only masks are defined here, haven't got 323 * represented.
318 * any info about them (couldn't find them anywhere in ar5k code).
319 */ 324 */
320#define AR5K_SISR0 0x0084 /* Register Address [5211+] */ 325#define AR5K_SISR0 0x0084 /* Register Address [5211+] */
321#define AR5K_SISR0_QCU_TXOK 0x000003ff /* Mask for QCU_TXOK */ 326#define AR5K_SISR0_QCU_TXOK 0x000003ff /* Mask for QCU_TXOK */
327#define AR5K_SISR0_QCU_TXOK_S 0
322#define AR5K_SISR0_QCU_TXDESC 0x03ff0000 /* Mask for QCU_TXDESC */ 328#define AR5K_SISR0_QCU_TXDESC 0x03ff0000 /* Mask for QCU_TXDESC */
329#define AR5K_SISR0_QCU_TXDESC_S 16
323 330
324#define AR5K_SISR1 0x0088 /* Register Address [5211+] */ 331#define AR5K_SISR1 0x0088 /* Register Address [5211+] */
325#define AR5K_SISR1_QCU_TXERR 0x000003ff /* Mask for QCU_TXERR */ 332#define AR5K_SISR1_QCU_TXERR 0x000003ff /* Mask for QCU_TXERR */
333#define AR5K_SISR1_QCU_TXERR_S 0
326#define AR5K_SISR1_QCU_TXEOL 0x03ff0000 /* Mask for QCU_TXEOL */ 334#define AR5K_SISR1_QCU_TXEOL 0x03ff0000 /* Mask for QCU_TXEOL */
335#define AR5K_SISR1_QCU_TXEOL_S 16
327 336
328#define AR5K_SISR2 0x008c /* Register Address [5211+] */ 337#define AR5K_SISR2 0x008c /* Register Address [5211+] */
329#define AR5K_SISR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */ 338#define AR5K_SISR2_QCU_TXURN 0x000003ff /* Mask for QCU_TXURN */
339#define AR5K_SISR2_QCU_TXURN_S 0
330#define AR5K_SISR2_MCABT 0x00100000 /* Master Cycle Abort */ 340#define AR5K_SISR2_MCABT 0x00100000 /* Master Cycle Abort */
331#define AR5K_SISR2_SSERR 0x00200000 /* Signaled System Error */ 341#define AR5K_SISR2_SSERR 0x00200000 /* Signaled System Error */
332#define AR5K_SISR2_DPERR 0x00400000 /* Det par Error (?) */ 342#define AR5K_SISR2_DPERR 0x00400000 /* Bus parity error */
333#define AR5K_SISR2_TIM 0x01000000 /* [5212+] */ 343#define AR5K_SISR2_TIM 0x01000000 /* [5212+] */
334#define AR5K_SISR2_CAB_END 0x02000000 /* [5212+] */ 344#define AR5K_SISR2_CAB_END 0x02000000 /* [5212+] */
335#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* DTIM sync lost [5212+] */ 345#define AR5K_SISR2_DTIM_SYNC 0x04000000 /* DTIM sync lost [5212+] */
336#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */ 346#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
337#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */ 347#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
338#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */ 348#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
349#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */
339 350
340#define AR5K_SISR3 0x0090 /* Register Address [5211+] */ 351#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
341#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */ 352#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
353#define AR5K_SISR3_QCBORN_S 0
342#define AR5K_SISR3_QCBRURN 0x03ff0000 /* Mask for QCBRURN */ 354#define AR5K_SISR3_QCBRURN 0x03ff0000 /* Mask for QCBRURN */
355#define AR5K_SISR3_QCBRURN_S 16
343 356
344#define AR5K_SISR4 0x0094 /* Register Address [5211+] */ 357#define AR5K_SISR4 0x0094 /* Register Address [5211+] */
345#define AR5K_SISR4_QTRIG 0x000003ff /* Mask for QTRIG */ 358#define AR5K_SISR4_QTRIG 0x000003ff /* Mask for QTRIG */
359#define AR5K_SISR4_QTRIG_S 0
346 360
347/* 361/*
348 * Shadow read-and-clear interrupt status registers [5211+] 362 * Shadow read-and-clear interrupt status registers [5211+]
@@ -379,7 +393,7 @@
379#define AR5K_IMR_RXPHY 0x00004000 /* PHY error*/ 393#define AR5K_IMR_RXPHY 0x00004000 /* PHY error*/
380#define AR5K_IMR_RXKCM 0x00008000 /* RX Key cache miss */ 394#define AR5K_IMR_RXKCM 0x00008000 /* RX Key cache miss */
381#define AR5K_IMR_SWBA 0x00010000 /* Software beacon alert*/ 395#define AR5K_IMR_SWBA 0x00010000 /* Software beacon alert*/
382#define AR5K_IMR_BRSSI 0x00020000 396#define AR5K_IMR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
383#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/ 397#define AR5K_IMR_BMISS 0x00040000 /* Beacon missed*/
384#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */ 398#define AR5K_IMR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
385#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */ 399#define AR5K_IMR_BNR 0x00100000 /* Beacon not ready [5211+] */
@@ -387,12 +401,14 @@
387#define AR5K_IMR_RXCHIRP 0x00200000 /* CHIRP Received [5212+]*/ 401#define AR5K_IMR_RXCHIRP 0x00200000 /* CHIRP Received [5212+]*/
388#define AR5K_IMR_SSERR 0x00200000 /* Signaled System Error [5210] */ 402#define AR5K_IMR_SSERR 0x00200000 /* Signaled System Error [5210] */
389#define AR5K_IMR_DPERR 0x00400000 /* Det par Error (?) [5210] */ 403#define AR5K_IMR_DPERR 0x00400000 /* Det par Error (?) [5210] */
404#define AR5K_IMR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
390#define AR5K_IMR_TIM 0x00800000 /* [5211+] */ 405#define AR5K_IMR_TIM 0x00800000 /* [5211+] */
391#define AR5K_IMR_BCNMISC 0x00800000 /* [5212+] */ 406#define AR5K_IMR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
407 CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
392#define AR5K_IMR_GPIO 0x01000000 /* GPIO (rf kill)*/ 408#define AR5K_IMR_GPIO 0x01000000 /* GPIO (rf kill)*/
393#define AR5K_IMR_QCBRORN 0x02000000 /* CBR overrun (?) [5211+] */ 409#define AR5K_IMR_QCBRORN 0x02000000 /* QCU CBR overrun (?) [5211+] */
394#define AR5K_IMR_QCBRURN 0x04000000 /* CBR underrun (?) [5211+] */ 410#define AR5K_IMR_QCBRURN 0x04000000 /* QCU CBR underrun (?) [5211+] */
395#define AR5K_IMR_QTRIG 0x08000000 /* [5211+] */ 411#define AR5K_IMR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
396 412
397/* 413/*
398 * Secondary interrupt mask registers [5211+] (0 - 4) 414 * Secondary interrupt mask registers [5211+] (0 - 4)
@@ -414,13 +430,14 @@
414#define AR5K_SIMR2_QCU_TXURN_S 0 430#define AR5K_SIMR2_QCU_TXURN_S 0
415#define AR5K_SIMR2_MCABT 0x00100000 /* Master Cycle Abort */ 431#define AR5K_SIMR2_MCABT 0x00100000 /* Master Cycle Abort */
416#define AR5K_SIMR2_SSERR 0x00200000 /* Signaled System Error */ 432#define AR5K_SIMR2_SSERR 0x00200000 /* Signaled System Error */
417#define AR5K_SIMR2_DPERR 0x00400000 /* Det par Error (?) */ 433#define AR5K_SIMR2_DPERR 0x00400000 /* Bus parity error */
418#define AR5K_SIMR2_TIM 0x01000000 /* [5212+] */ 434#define AR5K_SIMR2_TIM 0x01000000 /* [5212+] */
419#define AR5K_SIMR2_CAB_END 0x02000000 /* [5212+] */ 435#define AR5K_SIMR2_CAB_END 0x02000000 /* [5212+] */
420#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* DTIM Sync lost [5212+] */ 436#define AR5K_SIMR2_DTIM_SYNC 0x04000000 /* DTIM Sync lost [5212+] */
421#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */ 437#define AR5K_SIMR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
422#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */ 438#define AR5K_SIMR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
423#define AR5K_SIMR2_DTIM 0x20000000 /* [5212+] */ 439#define AR5K_SIMR2_DTIM 0x20000000 /* [5212+] */
440#define AR5K_SIMR2_TSFOOR 0x80000000 /* TSF OOR (?) */
424 441
425#define AR5K_SIMR3 0x00b0 /* Register Address [5211+] */ 442#define AR5K_SIMR3 0x00b0 /* Register Address [5211+] */
426#define AR5K_SIMR3_QCBRORN 0x000003ff /* Mask for QCBRORN */ 443#define AR5K_SIMR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
@@ -586,15 +603,15 @@
586#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame sheduling mask */ 603#define AR5K_QCU_MISC_FRSHED_M 0x0000000f /* Frame sheduling mask */
587#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */ 604#define AR5K_QCU_MISC_FRSHED_ASAP 0 /* ASAP */
588#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */ 605#define AR5K_QCU_MISC_FRSHED_CBR 1 /* Constant Bit Rate */
589#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated (?) */ 606#define AR5K_QCU_MISC_FRSHED_DBA_GT 2 /* DMA Beacon alert gated */
590#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* Time gated (?) */ 607#define AR5K_QCU_MISC_FRSHED_TIM_GT 3 /* TIMT gated */
591#define AR5K_QCU_MISC_FRSHED_BCN_SENT_GT 4 /* Beacon sent gated (?) */ 608#define AR5K_QCU_MISC_FRSHED_BCN_SENT_GT 4 /* Beacon sent gated */
592#define AR5K_QCU_MISC_ONESHOT_ENABLE 0x00000010 /* Oneshot enable */ 609#define AR5K_QCU_MISC_ONESHOT_ENABLE 0x00000010 /* Oneshot enable */
593#define AR5K_QCU_MISC_CBREXP 0x00000020 /* CBR expired (normal queue) */ 610#define AR5K_QCU_MISC_CBREXP_DIS 0x00000020 /* Disable CBR expired counter (normal queue) */
594#define AR5K_QCU_MISC_CBREXP_BCN 0x00000040 /* CBR expired (beacon queue) */ 611#define AR5K_QCU_MISC_CBREXP_BCN_DIS 0x00000040 /* Disable CBR expired counter (beacon queue) */
595#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Enable Beacon use */ 612#define AR5K_QCU_MISC_BCN_ENABLE 0x00000080 /* Enable Beacon use */
596#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR threshold enabled */ 613#define AR5K_QCU_MISC_CBR_THRES_ENABLE 0x00000100 /* CBR expired threshold enabled */
597#define AR5K_QCU_MISC_RDY_VEOL_POLICY 0x00000200 /* TXE reset when RDYTIME enalbed */ 614#define AR5K_QCU_MISC_RDY_VEOL_POLICY 0x00000200 /* TXE reset when RDYTIME expired or VEOL */
598#define AR5K_QCU_MISC_CBR_RESET_CNT 0x00000400 /* CBR threshold (counter) reset */ 615#define AR5K_QCU_MISC_CBR_RESET_CNT 0x00000400 /* CBR threshold (counter) reset */
599#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU early termination */ 616#define AR5K_QCU_MISC_DCU_EARLY 0x00000800 /* DCU early termination */
600#define AR5K_QCU_MISC_DCU_CMP_EN 0x00001000 /* Enable frame compression */ 617#define AR5K_QCU_MISC_DCU_CMP_EN 0x00001000 /* Enable frame compression */
@@ -663,6 +680,7 @@
663#define AR5K_DCU_LCL_IFS_CW_MAX_S 10 680#define AR5K_DCU_LCL_IFS_CW_MAX_S 10
664#define AR5K_DCU_LCL_IFS_AIFS 0x0ff00000 /* Arbitrated Interframe Space */ 681#define AR5K_DCU_LCL_IFS_AIFS 0x0ff00000 /* Arbitrated Interframe Space */
665#define AR5K_DCU_LCL_IFS_AIFS_S 20 682#define AR5K_DCU_LCL_IFS_AIFS_S 20
683#define AR5K_DCU_LCL_IFS_AIFS_MAX 0xfc /* Anything above that can cause DCU to hang */
666#define AR5K_QUEUE_DFS_LOCAL_IFS(_q) AR5K_QUEUE_REG(AR5K_DCU_LCL_IFS_BASE, _q) 684#define AR5K_QUEUE_DFS_LOCAL_IFS(_q) AR5K_QUEUE_REG(AR5K_DCU_LCL_IFS_BASE, _q)
667 685
668/* 686/*
@@ -691,11 +709,7 @@
691/* 709/*
692 * DCU misc registers [5211+] 710 * DCU misc registers [5211+]
693 * 711 *
694 * For some of the registers i couldn't find in the code 712 * Note: Arbiter lockout control controls the
695 * (only backoff stuff is there realy) i tried to match the
696 * names with 802.11e parameters etc, so i guess VIRTCOL here
697 * means Virtual Collision and HCFPOLL means Hybrid Coordination
698 * factor Poll (CF- Poll). Arbiter lockout control controls the
699 * behaviour on low priority queues when we have multiple queues 713 * behaviour on low priority queues when we have multiple queues
700 * with pending frames. Intra-frame lockout means we wait until 714 * with pending frames. Intra-frame lockout means we wait until
701 * the queue's current frame transmits (with post frame backoff and bursting) 715 * the queue's current frame transmits (with post frame backoff and bursting)
@@ -705,15 +719,20 @@
705 * No lockout means there is no special handling. 719 * No lockout means there is no special handling.
706 */ 720 */
707#define AR5K_DCU_MISC_BASE 0x1100 /* Register Address -Queue0 DCU_MISC */ 721#define AR5K_DCU_MISC_BASE 0x1100 /* Register Address -Queue0 DCU_MISC */
708#define AR5K_DCU_MISC_BACKOFF 0x000007ff /* Mask for backoff threshold */ 722#define AR5K_DCU_MISC_BACKOFF 0x0000003f /* Mask for backoff threshold */
723#define AR5K_DCU_MISC_ETS_RTS_POL 0x00000040 /* End of transmission series
724 station RTS/data failure count
725 reset policy (?) */
726#define AR5K_DCU_MISC_ETS_CW_POL 0x00000080 /* End of transmission series
727 CW reset policy */
728#define AR5K_DCU_MISC_FRAG_WAIT 0x00000100 /* Wait for next fragment */
709#define AR5K_DCU_MISC_BACKOFF_FRAG 0x00000200 /* Enable backoff while bursting */ 729#define AR5K_DCU_MISC_BACKOFF_FRAG 0x00000200 /* Enable backoff while bursting */
710#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll enable */ 730#define AR5K_DCU_MISC_HCFPOLL_ENABLE 0x00000800 /* CF - Poll enable */
711#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff */ 731#define AR5K_DCU_MISC_BACKOFF_PERSIST 0x00001000 /* Persistent backoff */
712#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch */ 732#define AR5K_DCU_MISC_FRMPRFTCH_ENABLE 0x00002000 /* Enable frame pre-fetch */
713#define AR5K_DCU_MISC_VIRTCOL 0x0000c000 /* Mask for Virtual Collision (?) */ 733#define AR5K_DCU_MISC_VIRTCOL 0x0000c000 /* Mask for Virtual Collision (?) */
714#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0 734#define AR5K_DCU_MISC_VIRTCOL_NORMAL 0
715#define AR5K_DCU_MISC_VIRTCOL_MODIFIED 1 735#define AR5K_DCU_MISC_VIRTCOL_IGNORE 1
716#define AR5K_DCU_MISC_VIRTCOL_IGNORE 2
717#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Enable Beacon use */ 736#define AR5K_DCU_MISC_BCN_ENABLE 0x00010000 /* Enable Beacon use */
718#define AR5K_DCU_MISC_ARBLOCK_CTL 0x00060000 /* Arbiter lockout control mask */ 737#define AR5K_DCU_MISC_ARBLOCK_CTL 0x00060000 /* Arbiter lockout control mask */
719#define AR5K_DCU_MISC_ARBLOCK_CTL_S 17 738#define AR5K_DCU_MISC_ARBLOCK_CTL_S 17
@@ -768,8 +787,9 @@
768#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */ 787#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */
769#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */ 788#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */
770#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */ 789#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */
790#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR_S 10
771#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */ 791#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */
772#define AR5K_DCU_GBL_IFS_MISC_SIFS_CNT_RST 0x00400000 /* SIFC cnt reset policy (?) */ 792#define AR5K_DCU_GBL_IFS_MISC_SIFS_CNT_RST 0x00400000 /* SIFS cnt reset policy (?) */
773#define AR5K_DCU_GBL_IFS_MISC_AIFS_CNT_RST 0x00800000 /* AIFS cnt reset policy (?) */ 793#define AR5K_DCU_GBL_IFS_MISC_AIFS_CNT_RST 0x00800000 /* AIFS cnt reset policy (?) */
774#define AR5K_DCU_GBL_IFS_MISC_RND_LFSR_SL_DIS 0x01000000 /* Disable random LFSR slice */ 794#define AR5K_DCU_GBL_IFS_MISC_RND_LFSR_SL_DIS 0x01000000 /* Disable random LFSR slice */
775 795
@@ -820,8 +840,6 @@
820#define AR5K_RESET_CTL_MAC 0x00000004 /* MAC reset (PCU+Baseband ?) [5210] */ 840#define AR5K_RESET_CTL_MAC 0x00000004 /* MAC reset (PCU+Baseband ?) [5210] */
821#define AR5K_RESET_CTL_PHY 0x00000008 /* PHY reset [5210] */ 841#define AR5K_RESET_CTL_PHY 0x00000008 /* PHY reset [5210] */
822#define AR5K_RESET_CTL_PCI 0x00000010 /* PCI Core reset (interrupts etc) */ 842#define AR5K_RESET_CTL_PCI 0x00000010 /* PCI Core reset (interrupts etc) */
823#define AR5K_RESET_CTL_CHIP (AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA | \
824 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY)
825 843
826/* 844/*
827 * Sleep control register 845 * Sleep control register
@@ -833,9 +851,11 @@
833#define AR5K_SLEEP_CTL_SLE_S 16 851#define AR5K_SLEEP_CTL_SLE_S 16
834#define AR5K_SLEEP_CTL_SLE_WAKE 0x00000000 /* Force chip awake */ 852#define AR5K_SLEEP_CTL_SLE_WAKE 0x00000000 /* Force chip awake */
835#define AR5K_SLEEP_CTL_SLE_SLP 0x00010000 /* Force chip sleep */ 853#define AR5K_SLEEP_CTL_SLE_SLP 0x00010000 /* Force chip sleep */
836#define AR5K_SLEEP_CTL_SLE_ALLOW 0x00020000 854#define AR5K_SLEEP_CTL_SLE_ALLOW 0x00020000 /* Normal sleep policy */
837#define AR5K_SLEEP_CTL_SLE_UNITS 0x00000008 /* [5211+] */ 855#define AR5K_SLEEP_CTL_SLE_UNITS 0x00000008 /* [5211+] */
838/* more bits */ 856#define AR5K_SLEEP_CTL_DUR_TIM_POL 0x00040000 /* Sleep duration timing policy */
857#define AR5K_SLEEP_CTL_DUR_WRITE_POL 0x00080000 /* Sleep duration write policy */
858#define AR5K_SLEEP_CTL_SLE_POL 0x00100000 /* Sleep policy mode */
839 859
840/* 860/*
841 * Interrupt pending register 861 * Interrupt pending register
@@ -851,27 +871,28 @@
851 871
852/* 872/*
853 * PCI configuration register 873 * PCI configuration register
874 * TODO: Fix LED stuff
854 */ 875 */
855#define AR5K_PCICFG 0x4010 /* Register Address */ 876#define AR5K_PCICFG 0x4010 /* Register Address */
856#define AR5K_PCICFG_EEAE 0x00000001 /* Eeprom access enable [5210] */ 877#define AR5K_PCICFG_EEAE 0x00000001 /* Eeprom access enable [5210] */
857#define AR5K_PCICFG_SLEEP_CLOCK_EN 0x00000002 /* Enable sleep clock (?) */ 878#define AR5K_PCICFG_SLEEP_CLOCK_EN 0x00000002 /* Enable sleep clock */
858#define AR5K_PCICFG_CLKRUNEN 0x00000004 /* CLKRUN enable [5211+] */ 879#define AR5K_PCICFG_CLKRUNEN 0x00000004 /* CLKRUN enable [5211+] */
859#define AR5K_PCICFG_EESIZE 0x00000018 /* Mask for EEPROM size [5211+] */ 880#define AR5K_PCICFG_EESIZE 0x00000018 /* Mask for EEPROM size [5211+] */
860#define AR5K_PCICFG_EESIZE_S 3 881#define AR5K_PCICFG_EESIZE_S 3
861#define AR5K_PCICFG_EESIZE_4K 0 /* 4K */ 882#define AR5K_PCICFG_EESIZE_4K 0 /* 4K */
862#define AR5K_PCICFG_EESIZE_8K 1 /* 8K */ 883#define AR5K_PCICFG_EESIZE_8K 1 /* 8K */
863#define AR5K_PCICFG_EESIZE_16K 2 /* 16K */ 884#define AR5K_PCICFG_EESIZE_16K 2 /* 16K */
864#define AR5K_PCICFG_EESIZE_FAIL 3 /* Failed to get size (?) [5211+] */ 885#define AR5K_PCICFG_EESIZE_FAIL 3 /* Failed to get size [5211+] */
865#define AR5K_PCICFG_LED 0x00000060 /* Led status [5211+] */ 886#define AR5K_PCICFG_LED 0x00000060 /* Led status [5211+] */
866#define AR5K_PCICFG_LED_NONE 0x00000000 /* Default [5211+] */ 887#define AR5K_PCICFG_LED_NONE 0x00000000 /* Default [5211+] */
867#define AR5K_PCICFG_LED_PEND 0x00000020 /* Scan / Auth pending */ 888#define AR5K_PCICFG_LED_PEND 0x00000020 /* Scan / Auth pending */
868#define AR5K_PCICFG_LED_ASSOC 0x00000040 /* Associated */ 889#define AR5K_PCICFG_LED_ASSOC 0x00000040 /* Associated */
869#define AR5K_PCICFG_BUS_SEL 0x00000380 /* Mask for "bus select" [5211+] (?) */ 890#define AR5K_PCICFG_BUS_SEL 0x00000380 /* Mask for "bus select" [5211+] (?) */
870#define AR5K_PCICFG_CBEFIX_DIS 0x00000400 /* Disable CBE fix (?) */ 891#define AR5K_PCICFG_CBEFIX_DIS 0x00000400 /* Disable CBE fix */
871#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep (?) */ 892#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep */
872#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */ 893#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */
873#define AR5K_PCICFG_UNK 0x00001000 /* Passed on some parts durring attach (?) */ 894#define AR5K_PCICFG_RETRY_FIX 0x00001000 /* Enable pci core retry fix */
874#define AR5K_PCICFG_SL_INPEN 0x00002000 /* Sleep even whith pending interrupts (?) */ 895#define AR5K_PCICFG_SL_INPEN 0x00002000 /* Sleep even whith pending interrupts*/
875#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */ 896#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */
876#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */ 897#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */
877#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */ 898#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */
@@ -884,7 +905,8 @@
884#define AR5K_PCICFG_LEDSTATE \ 905#define AR5K_PCICFG_LEDSTATE \
885 (AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE | \ 906 (AR5K_PCICFG_LED | AR5K_PCICFG_LEDMODE | \
886 AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW) 907 AR5K_PCICFG_LEDBLINK | AR5K_PCICFG_LEDSLOW)
887#define AR5K_PCICFG_SLEEP_CLOCK_RATE 0x03000000 /* Sleep clock rate (field) */ 908#define AR5K_PCICFG_SLEEP_CLOCK_RATE 0x03000000 /* Sleep clock rate */
909#define AR5K_PCICFG_SLEEP_CLOCK_RATE_S 24
888 910
889/* 911/*
890 * "General Purpose Input/Output" (GPIO) control register 912 * "General Purpose Input/Output" (GPIO) control register
@@ -906,8 +928,8 @@
906 928
907#define AR5K_GPIOCR 0x4014 /* Register Address */ 929#define AR5K_GPIOCR 0x4014 /* Register Address */
908#define AR5K_GPIOCR_INT_ENA 0x00008000 /* Enable GPIO interrupt */ 930#define AR5K_GPIOCR_INT_ENA 0x00008000 /* Enable GPIO interrupt */
909#define AR5K_GPIOCR_INT_SELL 0x00000000 /* Generate interrupt when pin is off (?) */ 931#define AR5K_GPIOCR_INT_SELL 0x00000000 /* Generate interrupt when pin is low */
910#define AR5K_GPIOCR_INT_SELH 0x00010000 /* Generate interrupt when pin is on */ 932#define AR5K_GPIOCR_INT_SELH 0x00010000 /* Generate interrupt when pin is high */
911#define AR5K_GPIOCR_IN(n) (0 << ((n) * 2)) /* Mode 0 for pin n */ 933#define AR5K_GPIOCR_IN(n) (0 << ((n) * 2)) /* Mode 0 for pin n */
912#define AR5K_GPIOCR_OUT0(n) (1 << ((n) * 2)) /* Mode 1 for pin n */ 934#define AR5K_GPIOCR_OUT0(n) (1 << ((n) * 2)) /* Mode 1 for pin n */
913#define AR5K_GPIOCR_OUT1(n) (2 << ((n) * 2)) /* Mode 2 for pin n */ 935#define AR5K_GPIOCR_OUT1(n) (2 << ((n) * 2)) /* Mode 2 for pin n */
@@ -925,7 +947,6 @@
925#define AR5K_GPIODI 0x401c 947#define AR5K_GPIODI 0x401c
926#define AR5K_GPIODI_M 0x0000002f 948#define AR5K_GPIODI_M 0x0000002f
927 949
928
929/* 950/*
930 * Silicon revision register 951 * Silicon revision register
931 */ 952 */
@@ -935,7 +956,59 @@
935#define AR5K_SREV_VER 0x000000ff /* Mask for version */ 956#define AR5K_SREV_VER 0x000000ff /* Mask for version */
936#define AR5K_SREV_VER_S 4 957#define AR5K_SREV_VER_S 4
937 958
959/*
960 * TXE write posting register
961 */
962#define AR5K_TXEPOST 0x4028
963
964/*
965 * QCU sleep mask
966 */
967#define AR5K_QCU_SLEEP_MASK 0x402c
968
969/* 0x4068 is compression buffer configuration
970 * register on 5414 and pm configuration register
971 * on 5424 and newer pci-e chips. */
972
973/*
974 * Compression buffer configuration
975 * register (enable/disable) [5414]
976 */
977#define AR5K_5414_CBCFG 0x4068
978#define AR5K_5414_CBCFG_BUF_DIS 0x10 /* Disable buffer */
979
980/*
981 * PCI-E Power managment configuration
982 * and status register [5424+]
983 */
984#define AR5K_PCIE_PM_CTL 0x4068 /* Register address */
985/* Only 5424 */
986#define AR5K_PCIE_PM_CTL_L1_WHEN_D2 0x00000001 /* enable PCIe core enter L1
987 when d2_sleep_en is asserted */
988#define AR5K_PCIE_PM_CTL_L0_L0S_CLEAR 0x00000002 /* Clear L0 and L0S counters */
989#define AR5K_PCIE_PM_CTL_L0_L0S_EN 0x00000004 /* Start L0 nd L0S counters */
990#define AR5K_PCIE_PM_CTL_LDRESET_EN 0x00000008 /* Enable reset when link goes
991 down */
992/* Wake On Wireless */
993#define AR5K_PCIE_PM_CTL_PME_EN 0x00000010 /* PME Enable */
994#define AR5K_PCIE_PM_CTL_AUX_PWR_DET 0x00000020 /* Aux power detect */
995#define AR5K_PCIE_PM_CTL_PME_CLEAR 0x00000040 /* Clear PME */
996#define AR5K_PCIE_PM_CTL_PSM_D0 0x00000080
997#define AR5K_PCIE_PM_CTL_PSM_D1 0x00000100
998#define AR5K_PCIE_PM_CTL_PSM_D2 0x00000200
999#define AR5K_PCIE_PM_CTL_PSM_D3 0x00000400
1000
1001/*
1002 * PCI-E Workaround enable register
1003 */
1004#define AR5K_PCIE_WAEN 0x407c
938 1005
1006/*
1007 * PCI-E Serializer/Desirializer
1008 * registers
1009 */
1010#define AR5K_PCIE_SERDES 0x4080
1011#define AR5K_PCIE_SERDES_RESET 0x4084
939 1012
940/*====EEPROM REGISTERS====*/ 1013/*====EEPROM REGISTERS====*/
941 1014
@@ -977,98 +1050,6 @@
977#define AR5K_EEPROM_BASE 0x6000 1050#define AR5K_EEPROM_BASE 0x6000
978 1051
979/* 1052/*
980 * Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE)
981 */
982#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
983#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
984#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
985#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
986#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
987
988#define AR5K_EEPROM_PROTECT 0x003f /* EEPROM protect status */
989#define AR5K_EEPROM_PROTECT_RD_0_31 0x0001 /* Read protection bit for offsets 0x0 - 0x1f */
990#define AR5K_EEPROM_PROTECT_WR_0_31 0x0002 /* Write protection bit for offsets 0x0 - 0x1f */
991#define AR5K_EEPROM_PROTECT_RD_32_63 0x0004 /* 0x20 - 0x3f */
992#define AR5K_EEPROM_PROTECT_WR_32_63 0x0008
993#define AR5K_EEPROM_PROTECT_RD_64_127 0x0010 /* 0x40 - 0x7f */
994#define AR5K_EEPROM_PROTECT_WR_64_127 0x0020
995#define AR5K_EEPROM_PROTECT_RD_128_191 0x0040 /* 0x80 - 0xbf (regdom) */
996#define AR5K_EEPROM_PROTECT_WR_128_191 0x0080
997#define AR5K_EEPROM_PROTECT_RD_192_207 0x0100 /* 0xc0 - 0xcf */
998#define AR5K_EEPROM_PROTECT_WR_192_207 0x0200
999#define AR5K_EEPROM_PROTECT_RD_208_223 0x0400 /* 0xd0 - 0xdf */
1000#define AR5K_EEPROM_PROTECT_WR_208_223 0x0800
1001#define AR5K_EEPROM_PROTECT_RD_224_239 0x1000 /* 0xe0 - 0xef */
1002#define AR5K_EEPROM_PROTECT_WR_224_239 0x2000
1003#define AR5K_EEPROM_PROTECT_RD_240_255 0x4000 /* 0xf0 - 0xff */
1004#define AR5K_EEPROM_PROTECT_WR_240_255 0x8000
1005#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
1006#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
1007#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
1008#define AR5K_EEPROM_INFO_CKSUM 0xffff
1009#define AR5K_EEPROM_INFO(_n) (AR5K_EEPROM_INFO_BASE + (_n))
1010
1011#define AR5K_EEPROM_VERSION AR5K_EEPROM_INFO(1) /* EEPROM Version */
1012#define AR5K_EEPROM_VERSION_3_0 0x3000 /* No idea what's going on before this version */
1013#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2Ghz (ar5211_rfregs) */
1014#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */
1015#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
1016#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain ee_cck_ofdm_power_delta (eeprom_read_modes) */
1017#define AR5K_EEPROM_VERSION_4_0 0x4000 /* has ee_misc*, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */
1018#define AR5K_EEPROM_VERSION_4_1 0x4001 /* has ee_margin_tx_rx (eeprom_init) */
1019#define AR5K_EEPROM_VERSION_4_2 0x4002 /* has ee_cck_ofdm_gain_delta (eeprom_init) */
1020#define AR5K_EEPROM_VERSION_4_3 0x4003
1021#define AR5K_EEPROM_VERSION_4_4 0x4004
1022#define AR5K_EEPROM_VERSION_4_5 0x4005
1023#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
1024#define AR5K_EEPROM_VERSION_4_7 0x4007
1025
1026#define AR5K_EEPROM_MODE_11A 0
1027#define AR5K_EEPROM_MODE_11B 1
1028#define AR5K_EEPROM_MODE_11G 2
1029
1030#define AR5K_EEPROM_HDR AR5K_EEPROM_INFO(2) /* Header that contains the device caps */
1031#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
1032#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
1033#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
1034#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */
1035#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */
1036#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7)
1037#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz (?) */
1038#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
1039
1040#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c
1041#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2
1042#define AR5K_EEPROM_RFKILL_POLARITY 0x00000002
1043#define AR5K_EEPROM_RFKILL_POLARITY_S 1
1044
1045/* Newer EEPROMs are using a different offset */
1046#define AR5K_EEPROM_OFF(_v, _v3_0, _v3_3) \
1047 (((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
1048
1049#define AR5K_EEPROM_ANT_GAIN(_v) AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3)
1050#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v) ((int8_t)(((_v) >> 8) & 0xff))
1051#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v) ((int8_t)((_v) & 0xff))
1052
1053/* calibration settings */
1054#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
1055#define AR5K_EEPROM_MODES_11B(_v) AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2)
1056#define AR5K_EEPROM_MODES_11G(_v) AR5K_EEPROM_OFF(_v, 0x00da, 0x010d)
1057#define AR5K_EEPROM_CTL(_v) AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128) /* Conformance test limits */
1058
1059/* [3.1 - 3.3] */
1060#define AR5K_EEPROM_OBDB0_2GHZ 0x00ec
1061#define AR5K_EEPROM_OBDB1_2GHZ 0x00ed
1062
1063/* Misc values available since EEPROM 4.0 */
1064#define AR5K_EEPROM_MISC0 0x00c4
1065#define AR5K_EEPROM_EARSTART(_v) ((_v) & 0xfff)
1066#define AR5K_EEPROM_EEMAP(_v) (((_v) >> 14) & 0x3)
1067#define AR5K_EEPROM_MISC1 0x00c5
1068#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
1069#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1)
1070
1071/*
1072 * EEPROM data register 1053 * EEPROM data register
1073 */ 1054 */
1074#define AR5K_EEPROM_DATA_5211 0x6004 1055#define AR5K_EEPROM_DATA_5211 0x6004
@@ -1100,14 +1081,28 @@
1100 * EEPROM config register 1081 * EEPROM config register
1101 */ 1082 */
1102#define AR5K_EEPROM_CFG 0x6010 /* Register Addres */ 1083#define AR5K_EEPROM_CFG 0x6010 /* Register Addres */
1103#define AR5K_EEPROM_CFG_SIZE_OVR 0x00000001 1084#define AR5K_EEPROM_CFG_SIZE 0x00000003 /* Size determination override */
1085#define AR5K_EEPROM_CFG_SIZE_AUTO 0
1086#define AR5K_EEPROM_CFG_SIZE_4KBIT 1
1087#define AR5K_EEPROM_CFG_SIZE_8KBIT 2
1088#define AR5K_EEPROM_CFG_SIZE_16KBIT 3
1104#define AR5K_EEPROM_CFG_WR_WAIT_DIS 0x00000004 /* Disable write wait */ 1089#define AR5K_EEPROM_CFG_WR_WAIT_DIS 0x00000004 /* Disable write wait */
1105#define AR5K_EEPROM_CFG_CLK_RATE 0x00000018 /* Clock rate */ 1090#define AR5K_EEPROM_CFG_CLK_RATE 0x00000018 /* Clock rate */
1106#define AR5K_EEPROM_CFG_PROT_KEY 0x00ffff00 /* Protectio key */ 1091#define AR5K_EEPROM_CFG_CLK_RATE_S 3
1092#define AR5K_EEPROM_CFG_CLK_RATE_156KHZ 0
1093#define AR5K_EEPROM_CFG_CLK_RATE_312KHZ 1
1094#define AR5K_EEPROM_CFG_CLK_RATE_625KHZ 2
1095#define AR5K_EEPROM_CFG_PROT_KEY 0x00ffff00 /* Protection key */
1096#define AR5K_EEPROM_CFG_PROT_KEY_S 8
1107#define AR5K_EEPROM_CFG_LIND_EN 0x01000000 /* Enable length indicator (?) */ 1097#define AR5K_EEPROM_CFG_LIND_EN 0x01000000 /* Enable length indicator (?) */
1108 1098
1109 1099
1110/* 1100/*
1101 * TODO: Wake On Wireless registers
1102 * Range 0x7000 - 0x7ce0
1103 */
1104
1105/*
1111 * Protocol Control Unit (PCU) registers 1106 * Protocol Control Unit (PCU) registers
1112 */ 1107 */
1113/* 1108/*
@@ -1139,11 +1134,13 @@
1139#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */ 1134#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
1140#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */ 1135#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
1141#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */ 1136#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */
1142#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate (for ACK/CTS ?) [5211+] */ 1137#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate for ACK/CTS [5211+] */
1143#define AR5K_STA_ID1_SELF_GEN_SECTORE 0x04000000 /* Self generate sectore (?) */ 1138#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */
1144#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */ 1139#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
1145#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Keysearch mode (?) */ 1140#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */
1146#define AR5K_STA_ID1_PRESERVE_SEQ_NUM 0x20000000 /* Preserve sequence number */ 1141#define AR5K_STA_ID1_PRESERVE_SEQ_NUM 0x20000000 /* Preserve sequence number */
1142#define AR5K_STA_ID1_CBCIV_ENDIAN 0x40000000 /* ??? */
1143#define AR5K_STA_ID1_KEYSRCH_MCAST 0x80000000 /* Do key cache search for mcast frames */
1147 1144
1148/* 1145/*
1149 * First BSSID register (MAC address, lower 32bits) 1146 * First BSSID register (MAC address, lower 32bits)
@@ -1402,16 +1399,16 @@
1402#define AR5K_DIAG_SW_LOOP_BACK_5211 0x00000040 1399#define AR5K_DIAG_SW_LOOP_BACK_5211 0x00000040
1403#define AR5K_DIAG_SW_LOOP_BACK (ah->ah_version == AR5K_AR5210 ? \ 1400#define AR5K_DIAG_SW_LOOP_BACK (ah->ah_version == AR5K_AR5210 ? \
1404 AR5K_DIAG_SW_LOOP_BACK_5210 : AR5K_DIAG_SW_LOOP_BACK_5211) 1401 AR5K_DIAG_SW_LOOP_BACK_5210 : AR5K_DIAG_SW_LOOP_BACK_5211)
1405#define AR5K_DIAG_SW_CORR_FCS_5210 0x00000100 1402#define AR5K_DIAG_SW_CORR_FCS_5210 0x00000100 /* Corrupted FCS */
1406#define AR5K_DIAG_SW_CORR_FCS_5211 0x00000080 1403#define AR5K_DIAG_SW_CORR_FCS_5211 0x00000080
1407#define AR5K_DIAG_SW_CORR_FCS (ah->ah_version == AR5K_AR5210 ? \ 1404#define AR5K_DIAG_SW_CORR_FCS (ah->ah_version == AR5K_AR5210 ? \
1408 AR5K_DIAG_SW_CORR_FCS_5210 : AR5K_DIAG_SW_CORR_FCS_5211) 1405 AR5K_DIAG_SW_CORR_FCS_5210 : AR5K_DIAG_SW_CORR_FCS_5211)
1409#define AR5K_DIAG_SW_CHAN_INFO_5210 0x00000200 1406#define AR5K_DIAG_SW_CHAN_INFO_5210 0x00000200 /* Dump channel info */
1410#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100 1407#define AR5K_DIAG_SW_CHAN_INFO_5211 0x00000100
1411#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \ 1408#define AR5K_DIAG_SW_CHAN_INFO (ah->ah_version == AR5K_AR5210 ? \
1412 AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211) 1409 AR5K_DIAG_SW_CHAN_INFO_5210 : AR5K_DIAG_SW_CHAN_INFO_5211)
1413#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200 /* Enable scrambler seed */ 1410#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210 0x00000400 /* Enable fixed scrambler seed */
1414#define AR5K_DIAG_SW_EN_SCRAM_SEED_5210 0x00000400 1411#define AR5K_DIAG_SW_EN_SCRAM_SEED_5211 0x00000200
1415#define AR5K_DIAG_SW_EN_SCRAM_SEED (ah->ah_version == AR5K_AR5210 ? \ 1412#define AR5K_DIAG_SW_EN_SCRAM_SEED (ah->ah_version == AR5K_AR5210 ? \
1416 AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211) 1413 AR5K_DIAG_SW_EN_SCRAM_SEED_5210 : AR5K_DIAG_SW_EN_SCRAM_SEED_5211)
1417#define AR5K_DIAG_SW_ECO_ENABLE 0x00000400 /* [5211+] */ 1414#define AR5K_DIAG_SW_ECO_ENABLE 0x00000400 /* [5211+] */
@@ -1420,12 +1417,15 @@
1420#define AR5K_DIAG_SW_SCRAM_SEED_S 10 1417#define AR5K_DIAG_SW_SCRAM_SEED_S 10
1421#define AR5K_DIAG_SW_DIS_SEQ_INC 0x00040000 /* Disable seqnum increment (?)[5210] */ 1418#define AR5K_DIAG_SW_DIS_SEQ_INC 0x00040000 /* Disable seqnum increment (?)[5210] */
1422#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000 1419#define AR5K_DIAG_SW_FRAME_NV0_5210 0x00080000
1423#define AR5K_DIAG_SW_FRAME_NV0_5211 0x00020000 1420#define AR5K_DIAG_SW_FRAME_NV0_5211 0x00020000 /* Accept frames of non-zero protocol number */
1424#define AR5K_DIAG_SW_FRAME_NV0 (ah->ah_version == AR5K_AR5210 ? \ 1421#define AR5K_DIAG_SW_FRAME_NV0 (ah->ah_version == AR5K_AR5210 ? \
1425 AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211) 1422 AR5K_DIAG_SW_FRAME_NV0_5210 : AR5K_DIAG_SW_FRAME_NV0_5211)
1426#define AR5K_DIAG_SW_OBSPT_M 0x000c0000 1423#define AR5K_DIAG_SW_OBSPT_M 0x000c0000 /* Observation point select (?) */
1427#define AR5K_DIAG_SW_OBSPT_S 18 1424#define AR5K_DIAG_SW_OBSPT_S 18
1428/* more bits */ 1425#define AR5K_DIAG_SW_RX_CLEAR_HIGH 0x0010000 /* Force RX Clear high */
1426#define AR5K_DIAG_SW_IGNORE_CARR_SENSE 0x0020000 /* Ignore virtual carrier sense */
1427#define AR5K_DIAG_SW_CHANEL_IDLE_HIGH 0x0040000 /* Force channel idle high */
1428#define AR5K_DIAG_SW_PHEAR_ME 0x0080000 /* ??? */
1429 1429
1430/* 1430/*
1431 * TSF (clock) register (lower 32 bits) 1431 * TSF (clock) register (lower 32 bits)
@@ -1636,16 +1636,16 @@
1636 * 1636 *
1637 * XXX: PCDAC steps (0.5dbm) or DBM ? 1637 * XXX: PCDAC steps (0.5dbm) or DBM ?
1638 * 1638 *
1639 * XXX: Mask changes for newer chips to 7f
1640 * like tx power table ?
1641 */ 1639 */
1642#define AR5K_TXPC 0x80e8 /* Register Address */ 1640#define AR5K_TXPC 0x80e8 /* Register Address */
1643#define AR5K_TXPC_ACK_M 0x0000003f /* Mask for ACK tx power */ 1641#define AR5K_TXPC_ACK_M 0x0000003f /* ACK tx power */
1644#define AR5K_TXPC_ACK_S 0 1642#define AR5K_TXPC_ACK_S 0
1645#define AR5K_TXPC_CTS_M 0x00003f00 /* Mask for CTS tx power */ 1643#define AR5K_TXPC_CTS_M 0x00003f00 /* CTS tx power */
1646#define AR5K_TXPC_CTS_S 8 1644#define AR5K_TXPC_CTS_S 8
1647#define AR5K_TXPC_CHIRP_M 0x003f0000 /* Mask for CHIRP tx power */ 1645#define AR5K_TXPC_CHIRP_M 0x003f0000 /* CHIRP tx power */
1648#define AR5K_TXPC_CHIRP_S 22 1646#define AR5K_TXPC_CHIRP_S 16
1647#define AR5K_TXPC_DOPPLER 0x0f000000 /* Doppler chirp span (?) */
1648#define AR5K_TXPC_DOPPLER_S 24
1649 1649
1650/* 1650/*
1651 * Profile count registers 1651 * Profile count registers
@@ -1656,14 +1656,19 @@
1656#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */ 1656#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */
1657 1657
1658/* 1658/*
1659 * Quiet (period) control registers (?) 1659 * Quiet period control registers
1660 */ 1660 */
1661#define AR5K_QUIET_CTL1 0x80fc /* Register Address */ 1661#define AR5K_QUIET_CTL1 0x80fc /* Register Address */
1662#define AR5K_QUIET_CTL1_NEXT_QT 0x0000ffff /* Mask for next quiet (period?) (?) */ 1662#define AR5K_QUIET_CTL1_NEXT_QT_TSF 0x0000ffff /* Next quiet period TSF (TU) */
1663#define AR5K_QUIET_CTL1_QT_EN 0x00010000 /* Enable quiet (period?) */ 1663#define AR5K_QUIET_CTL1_NEXT_QT_TSF_S 0
1664#define AR5K_QUIET_CTL1_QT_EN 0x00010000 /* Enable quiet period */
1665#define AR5K_QUIET_CTL1_ACK_CTS_EN 0x00020000 /* Send ACK/CTS during quiet period */
1666
1664#define AR5K_QUIET_CTL2 0x8100 /* Register Address */ 1667#define AR5K_QUIET_CTL2 0x8100 /* Register Address */
1665#define AR5K_QUIET_CTL2_QT_PER 0x0000ffff /* Mask for quiet period (?) */ 1668#define AR5K_QUIET_CTL2_QT_PER 0x0000ffff /* Mask for quiet period periodicity */
1666#define AR5K_QUIET_CTL2_QT_DUR 0xffff0000 /* Mask for quiet duration (?) */ 1669#define AR5K_QUIET_CTL2_QT_PER_S 0
1670#define AR5K_QUIET_CTL2_QT_DUR 0xffff0000 /* Mask for quiet period duration */
1671#define AR5K_QUIET_CTL2_QT_DUR_S 16
1667 1672
1668/* 1673/*
1669 * TSF parameter register 1674 * TSF parameter register
@@ -1673,12 +1678,15 @@
1673#define AR5K_TSF_PARM_INC_S 0 1678#define AR5K_TSF_PARM_INC_S 0
1674 1679
1675/* 1680/*
1676 * QoS register (?) 1681 * QoS NOACK policy
1677 */ 1682 */
1678#define AR5K_QOS 0x8108 /* Register Address */ 1683#define AR5K_QOS_NOACK 0x8108 /* Register Address */
1679#define AR5K_QOS_NOACK_2BIT_VALUES 0x00000000 /* (field) */ 1684#define AR5K_QOS_NOACK_2BIT_VALUES 0x0000000f /* ??? */
1680#define AR5K_QOS_NOACK_BIT_OFFSET 0x00000020 /* (field) */ 1685#define AR5K_QOS_NOACK_2BIT_VALUES_S 0
1681#define AR5K_QOS_NOACK_BYTE_OFFSET 0x00000080 /* (field) */ 1686#define AR5K_QOS_NOACK_BIT_OFFSET 0x00000070 /* ??? */
1687#define AR5K_QOS_NOACK_BIT_OFFSET_S 4
1688#define AR5K_QOS_NOACK_BYTE_OFFSET 0x00000180 /* ??? */
1689#define AR5K_QOS_NOACK_BYTE_OFFSET_S 8
1682 1690
1683/* 1691/*
1684 * PHY error filter register 1692 * PHY error filter register
@@ -1702,29 +1710,15 @@
1702/* 1710/*
1703 * MIC QoS control register (?) 1711 * MIC QoS control register (?)
1704 */ 1712 */
1705#define AR5K_MIC_QOS_CTL 0x8118 /* Register Address */ 1713#define AR5K_MIC_QOS_CTL 0x8118 /* Register Address */
1706#define AR5K_MIC_QOS_CTL_0 0x00000001 /* MIC QoS control 0 (?) */ 1714#define AR5K_MIC_QOS_CTL_OFF(_n) (1 << (_n * 2))
1707#define AR5K_MIC_QOS_CTL_1 0x00000004 /* MIC QoS control 1 (?) */ 1715#define AR5K_MIC_QOS_CTL_MQ_EN 0x00010000 /* Enable MIC QoS */
1708#define AR5K_MIC_QOS_CTL_2 0x00000010 /* MIC QoS control 2 (?) */
1709#define AR5K_MIC_QOS_CTL_3 0x00000040 /* MIC QoS control 3 (?) */
1710#define AR5K_MIC_QOS_CTL_4 0x00000100 /* MIC QoS control 4 (?) */
1711#define AR5K_MIC_QOS_CTL_5 0x00000400 /* MIC QoS control 5 (?) */
1712#define AR5K_MIC_QOS_CTL_6 0x00001000 /* MIC QoS control 6 (?) */
1713#define AR5K_MIC_QOS_CTL_7 0x00004000 /* MIC QoS control 7 (?) */
1714#define AR5K_MIC_QOS_CTL_MQ_EN 0x00010000 /* Enable MIC QoS */
1715 1716
1716/* 1717/*
1717 * MIC QoS select register (?) 1718 * MIC QoS select register (?)
1718 */ 1719 */
1719#define AR5K_MIC_QOS_SEL 0x811c 1720#define AR5K_MIC_QOS_SEL 0x811c
1720#define AR5K_MIC_QOS_SEL_0 0x00000001 1721#define AR5K_MIC_QOS_SEL_OFF(_n) (1 << (_n * 4))
1721#define AR5K_MIC_QOS_SEL_1 0x00000010
1722#define AR5K_MIC_QOS_SEL_2 0x00000100
1723#define AR5K_MIC_QOS_SEL_3 0x00001000
1724#define AR5K_MIC_QOS_SEL_4 0x00010000
1725#define AR5K_MIC_QOS_SEL_5 0x00100000
1726#define AR5K_MIC_QOS_SEL_6 0x01000000
1727#define AR5K_MIC_QOS_SEL_7 0x10000000
1728 1722
1729/* 1723/*
1730 * Misc mode control register (?) 1724 * Misc mode control register (?)
@@ -1759,6 +1753,11 @@
1759#define AR5K_TSF_THRES 0x813c 1753#define AR5K_TSF_THRES 0x813c
1760 1754
1761/* 1755/*
1756 * TODO: Wake On Wireless registers
1757 * Range: 0x8147 - 0x818c
1758 */
1759
1760/*
1762 * Rate -> ACK SIFS mapping table (32 entries) 1761 * Rate -> ACK SIFS mapping table (32 entries)
1763 */ 1762 */
1764#define AR5K_RATE_ACKSIFS_BASE 0x8680 /* Register Address */ 1763#define AR5K_RATE_ACKSIFS_BASE 0x8680 /* Register Address */
@@ -1873,7 +1872,8 @@
1873 */ 1872 */
1874#define AR5K_PHY_TURBO 0x9804 /* Register Address */ 1873#define AR5K_PHY_TURBO 0x9804 /* Register Address */
1875#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */ 1874#define AR5K_PHY_TURBO_MODE 0x00000001 /* Enable turbo mode */
1876#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Short mode (20Mhz channels) (?) */ 1875#define AR5K_PHY_TURBO_SHORT 0x00000002 /* Set short symbols to turbo mode */
1876#define AR5K_PHY_TURBO_MIMO 0x00000004 /* Set turbo for mimo mimo */
1877 1877
1878/* 1878/*
1879 * PHY agility command register 1879 * PHY agility command register
@@ -1883,6 +1883,11 @@
1883#define AR5K_PHY_TST1 0x9808 1883#define AR5K_PHY_TST1 0x9808
1884#define AR5K_PHY_AGC_DISABLE 0x08000000 /* Disable AGC to A2 (?)*/ 1884#define AR5K_PHY_AGC_DISABLE 0x08000000 /* Disable AGC to A2 (?)*/
1885#define AR5K_PHY_TST1_TXHOLD 0x00003800 /* Set tx hold (?) */ 1885#define AR5K_PHY_TST1_TXHOLD 0x00003800 /* Set tx hold (?) */
1886#define AR5K_PHY_TST1_TXSRC_SRC 0x00000002 /* Used with bit 7 (?) */
1887#define AR5K_PHY_TST1_TXSRC_SRC_S 1
1888#define AR5K_PHY_TST1_TXSRC_ALT 0x00000080 /* Set input to tsdac (?) */
1889#define AR5K_PHY_TST1_TXSRC_ALT_S 7
1890
1886 1891
1887/* 1892/*
1888 * PHY timing register 3 [5112+] 1893 * PHY timing register 3 [5112+]
@@ -1907,15 +1912,23 @@
1907 1912
1908/* 1913/*
1909 * PHY RF control registers 1914 * PHY RF control registers
1910 * (i think these are delay times,
1911 * these calibration values exist
1912 * in EEPROM)
1913 */ 1915 */
1914#define AR5K_PHY_RF_CTL2 0x9824 /* Register Address */ 1916#define AR5K_PHY_RF_CTL2 0x9824 /* Register Address */
1915#define AR5K_PHY_RF_CTL2_TXF2TXD_START 0x0000000f /* Mask for TX frame to TX d(esc?) start */ 1917#define AR5K_PHY_RF_CTL2_TXF2TXD_START 0x0000000f /* TX frame to TX data start */
1918#define AR5K_PHY_RF_CTL2_TXF2TXD_START_S 0
1916 1919
1917#define AR5K_PHY_RF_CTL3 0x9828 /* Register Address */ 1920#define AR5K_PHY_RF_CTL3 0x9828 /* Register Address */
1918#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON 0x0000000f /* Mask for TX end to XLNA on */ 1921#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON 0x0000000f /* TX end to XLNA on */
1922#define AR5K_PHY_RF_CTL3_TXE2XLNA_ON_S 0
1923
1924#define AR5K_PHY_ADC_CTL 0x982c
1925#define AR5K_PHY_ADC_CTL_INBUFGAIN_OFF 0x00000003
1926#define AR5K_PHY_ADC_CTL_INBUFGAIN_OFF_S 0
1927#define AR5K_PHY_ADC_CTL_PWD_DAC_OFF 0x00002000
1928#define AR5K_PHY_ADC_CTL_PWD_BAND_GAP_OFF 0x00004000
1929#define AR5K_PHY_ADC_CTL_PWD_ADC_OFF 0x00008000
1930#define AR5K_PHY_ADC_CTL_INBUFGAIN_ON 0x00030000
1931#define AR5K_PHY_ADC_CTL_INBUFGAIN_ON_S 16
1919 1932
1920#define AR5K_PHY_RF_CTL4 0x9834 /* Register Address */ 1933#define AR5K_PHY_RF_CTL4 0x9834 /* Register Address */
1921#define AR5K_PHY_RF_CTL4_TXF2XPA_A_ON 0x00000001 /* TX frame to XPA A on (field) */ 1934#define AR5K_PHY_RF_CTL4_TXF2XPA_A_ON 0x00000001 /* TX frame to XPA A on (field) */
@@ -1937,35 +1950,43 @@
1937 * PHY settling register 1950 * PHY settling register
1938 */ 1951 */
1939#define AR5K_PHY_SETTLING 0x9844 /* Register Address */ 1952#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
1940#define AR5K_PHY_SETTLING_AGC 0x0000007f /* Mask for AGC settling time */ 1953#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */
1941#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Mask for Switch settlig time */ 1954#define AR5K_PHY_SETTLING_AGC_S 0
1955#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */
1956#define AR5K_PHY_SETTLINK_SWITCH_S 7
1942 1957
1943/* 1958/*
1944 * PHY Gain registers 1959 * PHY Gain registers
1945 */ 1960 */
1946#define AR5K_PHY_GAIN 0x9848 /* Register Address */ 1961#define AR5K_PHY_GAIN 0x9848 /* Register Address */
1947#define AR5K_PHY_GAIN_TXRX_ATTEN 0x0003f000 /* Mask for TX-RX Attenuation */ 1962#define AR5K_PHY_GAIN_TXRX_ATTEN 0x0003f000 /* TX-RX Attenuation */
1963#define AR5K_PHY_GAIN_TXRX_ATTEN_S 12
1964#define AR5K_PHY_GAIN_TXRX_RF_MAX 0x007c0000
1965#define AR5K_PHY_GAIN_TXRX_RF_MAX_S 18
1948 1966
1949#define AR5K_PHY_GAIN_OFFSET 0x984c /* Register Address */ 1967#define AR5K_PHY_GAIN_OFFSET 0x984c /* Register Address */
1950#define AR5K_PHY_GAIN_OFFSET_RXTX_FLAG 0x00020000 /* RX-TX flag (?) */ 1968#define AR5K_PHY_GAIN_OFFSET_RXTX_FLAG 0x00020000 /* RX-TX flag (?) */
1951 1969
1952/* 1970/*
1953 * Desired size register 1971 * Desired ADC/PGA size register
1954 * (for more infos read ANI patent) 1972 * (for more infos read ANI patent)
1955 */ 1973 */
1956#define AR5K_PHY_DESIRED_SIZE 0x9850 /* Register Address */ 1974#define AR5K_PHY_DESIRED_SIZE 0x9850 /* Register Address */
1957#define AR5K_PHY_DESIRED_SIZE_ADC 0x000000ff /* Mask for ADC desired size */ 1975#define AR5K_PHY_DESIRED_SIZE_ADC 0x000000ff /* ADC desired size */
1958#define AR5K_PHY_DESIRED_SIZE_PGA 0x0000ff00 /* Mask for PGA desired size */ 1976#define AR5K_PHY_DESIRED_SIZE_ADC_S 0
1959#define AR5K_PHY_DESIRED_SIZE_TOT 0x0ff00000 /* Mask for Total desired size (?) */ 1977#define AR5K_PHY_DESIRED_SIZE_PGA 0x0000ff00 /* PGA desired size */
1978#define AR5K_PHY_DESIRED_SIZE_PGA_S 8
1979#define AR5K_PHY_DESIRED_SIZE_TOT 0x0ff00000 /* Total desired size */
1980#define AR5K_PHY_DESIRED_SIZE_TOT_S 20
1960 1981
1961/* 1982/*
1962 * PHY signal register 1983 * PHY signal register
1963 * (for more infos read ANI patent) 1984 * (for more infos read ANI patent)
1964 */ 1985 */
1965#define AR5K_PHY_SIG 0x9858 /* Register Address */ 1986#define AR5K_PHY_SIG 0x9858 /* Register Address */
1966#define AR5K_PHY_SIG_FIRSTEP 0x0003f000 /* Mask for FIRSTEP */ 1987#define AR5K_PHY_SIG_FIRSTEP 0x0003f000 /* FIRSTEP */
1967#define AR5K_PHY_SIG_FIRSTEP_S 12 1988#define AR5K_PHY_SIG_FIRSTEP_S 12
1968#define AR5K_PHY_SIG_FIRPWR 0x03fc0000 /* Mask for FIPWR */ 1989#define AR5K_PHY_SIG_FIRPWR 0x03fc0000 /* FIPWR */
1969#define AR5K_PHY_SIG_FIRPWR_S 18 1990#define AR5K_PHY_SIG_FIRPWR_S 18
1970 1991
1971/* 1992/*
@@ -1973,9 +1994,9 @@
1973 * (for more infos read ANI patent) 1994 * (for more infos read ANI patent)
1974 */ 1995 */
1975#define AR5K_PHY_AGCCOARSE 0x985c /* Register Address */ 1996#define AR5K_PHY_AGCCOARSE 0x985c /* Register Address */
1976#define AR5K_PHY_AGCCOARSE_LO 0x00007f80 /* Mask for AGC Coarse low */ 1997#define AR5K_PHY_AGCCOARSE_LO 0x00007f80 /* AGC Coarse low */
1977#define AR5K_PHY_AGCCOARSE_LO_S 7 1998#define AR5K_PHY_AGCCOARSE_LO_S 7
1978#define AR5K_PHY_AGCCOARSE_HI 0x003f8000 /* Mask for AGC Coarse high */ 1999#define AR5K_PHY_AGCCOARSE_HI 0x003f8000 /* AGC Coarse high */
1979#define AR5K_PHY_AGCCOARSE_HI_S 15 2000#define AR5K_PHY_AGCCOARSE_HI_S 15
1980 2001
1981/* 2002/*
@@ -1984,6 +2005,8 @@
1984#define AR5K_PHY_AGCCTL 0x9860 /* Register address */ 2005#define AR5K_PHY_AGCCTL 0x9860 /* Register address */
1985#define AR5K_PHY_AGCCTL_CAL 0x00000001 /* Enable PHY calibration */ 2006#define AR5K_PHY_AGCCTL_CAL 0x00000001 /* Enable PHY calibration */
1986#define AR5K_PHY_AGCCTL_NF 0x00000002 /* Enable Noise Floor calibration */ 2007#define AR5K_PHY_AGCCTL_NF 0x00000002 /* Enable Noise Floor calibration */
2008#define AR5K_PHY_AGCCTL_NF_EN 0x00008000 /* Enable nf calibration to happen (?) */
2009#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */
1987 2010
1988/* 2011/*
1989 * PHY noise floor status register 2012 * PHY noise floor status register
@@ -1994,7 +2017,10 @@
1994#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M) 2017#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M)
1995#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1) 2018#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1)
1996#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9)) 2019#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
1997#define AR5K_PHY_NF_THRESH62 0x00001000 /* Thresh62 -check ANI patent- (field) */ 2020#define AR5K_PHY_NF_THRESH62 0x0007f000 /* Thresh62 -check ANI patent- (field) */
2021#define AR5K_PHY_NF_THRESH62_S 12
2022#define AR5K_PHY_NF_MINCCA_PWR 0x0ff80000 /* ??? */
2023#define AR5K_PHY_NF_MINCCA_PWR_S 19
1998 2024
1999/* 2025/*
2000 * PHY ADC saturation register [5110] 2026 * PHY ADC saturation register [5110]
@@ -2034,24 +2060,31 @@
2034 */ 2060 */
2035#define AR5K_PHY_SCR 0x9870 2061#define AR5K_PHY_SCR 0x9870
2036#define AR5K_PHY_SCR_32MHZ 0x0000001f 2062#define AR5K_PHY_SCR_32MHZ 0x0000001f
2063
2037#define AR5K_PHY_SLMT 0x9874 2064#define AR5K_PHY_SLMT 0x9874
2038#define AR5K_PHY_SLMT_32MHZ 0x0000007f 2065#define AR5K_PHY_SLMT_32MHZ 0x0000007f
2066
2039#define AR5K_PHY_SCAL 0x9878 2067#define AR5K_PHY_SCAL 0x9878
2040#define AR5K_PHY_SCAL_32MHZ 0x0000000e 2068#define AR5K_PHY_SCAL_32MHZ 0x0000000e
2041 2069
2070
2042/* 2071/*
2043 * PHY PLL (Phase Locked Loop) control register 2072 * PHY PLL (Phase Locked Loop) control register
2044 */ 2073 */
2045#define AR5K_PHY_PLL 0x987c 2074#define AR5K_PHY_PLL 0x987c
2046#define AR5K_PHY_PLL_20MHZ 0x13 /* For half rate (?) [5111+] */ 2075#define AR5K_PHY_PLL_20MHZ 0x00000013 /* For half rate (?) */
2047#define AR5K_PHY_PLL_40MHZ_5211 0x18 /* For 802.11a */ 2076/* 40MHz -> 5GHz band */
2077#define AR5K_PHY_PLL_40MHZ_5211 0x00000018
2048#define AR5K_PHY_PLL_40MHZ_5212 0x000000aa 2078#define AR5K_PHY_PLL_40MHZ_5212 0x000000aa
2079#define AR5K_PHY_PLL_40MHZ_5413 0x00000004
2049#define AR5K_PHY_PLL_40MHZ (ah->ah_version == AR5K_AR5211 ? \ 2080#define AR5K_PHY_PLL_40MHZ (ah->ah_version == AR5K_AR5211 ? \
2050 AR5K_PHY_PLL_40MHZ_5211 : AR5K_PHY_PLL_40MHZ_5212) 2081 AR5K_PHY_PLL_40MHZ_5211 : AR5K_PHY_PLL_40MHZ_5212)
2051#define AR5K_PHY_PLL_44MHZ_5211 0x19 /* For 802.11b/g */ 2082/* 44MHz -> 2.4GHz band */
2083#define AR5K_PHY_PLL_44MHZ_5211 0x00000019
2052#define AR5K_PHY_PLL_44MHZ_5212 0x000000ab 2084#define AR5K_PHY_PLL_44MHZ_5212 0x000000ab
2053#define AR5K_PHY_PLL_44MHZ (ah->ah_version == AR5K_AR5211 ? \ 2085#define AR5K_PHY_PLL_44MHZ (ah->ah_version == AR5K_AR5211 ? \
2054 AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212) 2086 AR5K_PHY_PLL_44MHZ_5211 : AR5K_PHY_PLL_44MHZ_5212)
2087
2055#define AR5K_PHY_PLL_RF5111 0x00000000 2088#define AR5K_PHY_PLL_RF5111 0x00000000
2056#define AR5K_PHY_PLL_RF5112 0x00000040 2089#define AR5K_PHY_PLL_RF5112 0x00000040
2057#define AR5K_PHY_PLL_HALF_RATE 0x00000100 2090#define AR5K_PHY_PLL_HALF_RATE 0x00000100
@@ -2118,6 +2151,19 @@
2118#define AR5K_PHY_RFSTG_DISABLE 0x00000021 2151#define AR5K_PHY_RFSTG_DISABLE 0x00000021
2119 2152
2120/* 2153/*
2154 * BIN masks (?)
2155 */
2156#define AR5K_PHY_BIN_MASK_1 0x9900
2157#define AR5K_PHY_BIN_MASK_2 0x9904
2158#define AR5K_PHY_BIN_MASK_3 0x9908
2159
2160#define AR5K_PHY_BIN_MASK_CTL 0x990c
2161#define AR5K_PHY_BIN_MASK_CTL_MASK_4 0x00003fff
2162#define AR5K_PHY_BIN_MASK_CTL_MASK_4_S 0
2163#define AR5K_PHY_BIN_MASK_CTL_RATE 0xff000000
2164#define AR5K_PHY_BIN_MASK_CTL_RATE_S 24
2165
2166/*
2121 * PHY Antenna control register 2167 * PHY Antenna control register
2122 */ 2168 */
2123#define AR5K_PHY_ANT_CTL 0x9910 /* Register Address */ 2169#define AR5K_PHY_ANT_CTL 0x9910 /* Register Address */
@@ -2164,6 +2210,7 @@
2164#define AR5K_PHY_OFDM_SELFCORR 0x9924 /* Register Address */ 2210#define AR5K_PHY_OFDM_SELFCORR 0x9924 /* Register Address */
2165#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN 0x00000001 /* Enable cyclic RSSI thr 1 */ 2211#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_EN 0x00000001 /* Enable cyclic RSSI thr 1 */
2166#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1 0x000000fe /* Mask for Cyclic RSSI threshold 1 */ 2212#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1 0x000000fe /* Mask for Cyclic RSSI threshold 1 */
2213#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1_S 0
2167#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3 0x00000100 /* Cyclic RSSI threshold 3 (field) (?) */ 2214#define AR5K_PHY_OFDM_SELFCORR_CYPWR_THR3 0x00000100 /* Cyclic RSSI threshold 3 (field) (?) */
2168#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN 0x00008000 /* Enable 1A RSSI threshold (?) */ 2215#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR_EN 0x00008000 /* Enable 1A RSSI threshold (?) */
2169#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR 0x00010000 /* 1A RSSI threshold (field) (?) */ 2216#define AR5K_PHY_OFDM_SELFCORR_RSSI_1ATHR 0x00010000 /* 1A RSSI threshold (field) (?) */
@@ -2210,7 +2257,6 @@
2210#define AR5K_PHY_PAPD_PROBE_INI_5111 0x00004883 /* [5212+] */ 2257#define AR5K_PHY_PAPD_PROBE_INI_5111 0x00004883 /* [5212+] */
2211#define AR5K_PHY_PAPD_PROBE_INI_5112 0x00004882 /* [5212+] */ 2258#define AR5K_PHY_PAPD_PROBE_INI_5112 0x00004882 /* [5212+] */
2212 2259
2213
2214/* 2260/*
2215 * PHY TX rate power registers [5112+] 2261 * PHY TX rate power registers [5112+]
2216 */ 2262 */
@@ -2232,6 +2278,8 @@
2232#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */ 2278#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */
2233#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3 2279#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3
2234#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */ 2280#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */
2281#define AR5K_PHY_FRAME_CTL_EMU 0x80000000
2282#define AR5K_PHY_FRAME_CTL_EMU_S 31
2235/*---[5110/5111]---*/ 2283/*---[5110/5111]---*/
2236#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000 /* PHY timing error */ 2284#define AR5K_PHY_FRAME_CTL_TIMING_ERR 0x01000000 /* PHY timing error */
2237#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000 /* Parity error */ 2285#define AR5K_PHY_FRAME_CTL_PARITY_ERR 0x02000000 /* Parity error */
@@ -2250,48 +2298,36 @@
2250 * PHY radar detection register [5111+] 2298 * PHY radar detection register [5111+]
2251 */ 2299 */
2252#define AR5K_PHY_RADAR 0x9954 2300#define AR5K_PHY_RADAR 0x9954
2253
2254/* Radar enable ........ ........ ........ .......1 */
2255#define AR5K_PHY_RADAR_ENABLE 0x00000001 2301#define AR5K_PHY_RADAR_ENABLE 0x00000001
2256#define AR5K_PHY_RADAR_DISABLE 0x00000000 2302#define AR5K_PHY_RADAR_DISABLE 0x00000000
2257#define AR5K_PHY_RADAR_ENABLE_S 0 2303#define AR5K_PHY_RADAR_INBANDTHR 0x0000003e /* Inband threshold
2258 2304 5-bits, units unknown {0..31}
2259/* This is the value found on the card .1.111.1 .1.1.... 111....1 1...1... 2305 (? MHz ?) */
2260at power on. */
2261#define AR5K_PHY_RADAR_PWONDEF_AR5213 0x5d50e188
2262
2263/* This is the value found on the card .1.1.111 ..11...1 .1...1.1 1...11.1
2264after DFS is enabled */
2265#define AR5K_PHY_RADAR_ENABLED_AR5213 0x5731458d
2266
2267/* Finite Impulse Response (FIR) filter .1111111 ........ ........ ........
2268 * power out threshold.
2269 * 7-bits, standard power range {0..127} in 1/2 dBm units. */
2270#define AR5K_PHY_RADAR_FIRPWROUTTHR 0x7f000000
2271#define AR5K_PHY_RADAR_FIRPWROUTTHR_S 24
2272
2273/* Radar RSSI/SNR threshold. ........ 111111.. ........ ........
2274 * 6-bits, dBm range {0..63} in dBm units. */
2275#define AR5K_PHY_RADAR_RADARRSSITHR 0x00fc0000
2276#define AR5K_PHY_RADAR_RADARRSSITHR_S 18
2277
2278/* Pulse height threshold ........ ......11 1111.... ........
2279 * 6-bits, dBm range {0..63} in dBm units. */
2280#define AR5K_PHY_RADAR_PULSEHEIGHTTHR 0x0003f000
2281#define AR5K_PHY_RADAR_PULSEHEIGHTTHR_S 12
2282
2283/* Pulse RSSI/SNR threshold ........ ........ ....1111 11......
2284 * 6-bits, dBm range {0..63} in dBm units. */
2285#define AR5K_PHY_RADAR_PULSERSSITHR 0x00000fc0
2286#define AR5K_PHY_RADAR_PULSERSSITHR_S 6
2287
2288/* Inband threshold ........ ........ ........ ..11111.
2289 * 5-bits, units unknown {0..31} (? MHz ?) */
2290#define AR5K_PHY_RADAR_INBANDTHR 0x0000003e
2291#define AR5K_PHY_RADAR_INBANDTHR_S 1 2306#define AR5K_PHY_RADAR_INBANDTHR_S 1
2292 2307
2308#define AR5K_PHY_RADAR_PRSSI_THR 0x00000fc0 /* Pulse RSSI/SNR threshold
2309 6-bits, dBm range {0..63}
2310 in dBm units. */
2311#define AR5K_PHY_RADAR_PRSSI_THR_S 6
2312
2313#define AR5K_PHY_RADAR_PHEIGHT_THR 0x0003f000 /* Pulse height threshold
2314 6-bits, dBm range {0..63}
2315 in dBm units. */
2316#define AR5K_PHY_RADAR_PHEIGHT_THR_S 12
2317
2318#define AR5K_PHY_RADAR_RSSI_THR 0x00fc0000 /* Radar RSSI/SNR threshold.
2319 6-bits, dBm range {0..63}
2320 in dBm units. */
2321#define AR5K_PHY_RADAR_RSSI_THR_S 18
2322
2323#define AR5K_PHY_RADAR_FIRPWR_THR 0x7f000000 /* Finite Impulse Response
2324 filter power out threshold.
2325 7-bits, standard power range
2326 {0..127} in 1/2 dBm units. */
2327#define AR5K_PHY_RADAR_FIRPWR_THRS 24
2328
2293/* 2329/*
2294 * PHY antenna switch table registers [5110] 2330 * PHY antenna switch table registers
2295 */ 2331 */
2296#define AR5K_PHY_ANT_SWITCH_TABLE_0 0x9960 2332#define AR5K_PHY_ANT_SWITCH_TABLE_0 0x9960
2297#define AR5K_PHY_ANT_SWITCH_TABLE_1 0x9964 2333#define AR5K_PHY_ANT_SWITCH_TABLE_1 0x9964
@@ -2302,25 +2338,65 @@ after DFS is enabled */
2302#define AR5K_PHY_NFTHRES 0x9968 2338#define AR5K_PHY_NFTHRES 0x9968
2303 2339
2304/* 2340/*
2305 * PHY clock sleep registers [5112+] 2341 * Sigma Delta register (?) [5213]
2306 */ 2342 */
2307#define AR5K_PHY_SCLOCK 0x99f0 2343#define AR5K_PHY_SIGMA_DELTA 0x996C
2308#define AR5K_PHY_SCLOCK_32MHZ 0x0000000c 2344#define AR5K_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
2309#define AR5K_PHY_SDELAY 0x99f4 2345#define AR5K_PHY_SIGMA_DELTA_ADC_SEL_S 0
2310#define AR5K_PHY_SDELAY_32MHZ 0x000000ff 2346#define AR5K_PHY_SIGMA_DELTA_FILT2 0x000000f8
2311#define AR5K_PHY_SPENDING 0x99f8 2347#define AR5K_PHY_SIGMA_DELTA_FILT2_S 3
2312#define AR5K_PHY_SPENDING_14 0x00000014 2348#define AR5K_PHY_SIGMA_DELTA_FILT1 0x00001f00
2313#define AR5K_PHY_SPENDING_18 0x00000018 2349#define AR5K_PHY_SIGMA_DELTA_FILT1_S 8
2314#define AR5K_PHY_SPENDING_RF5111 0x00000018 2350#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP 0x01ff3000
2315#define AR5K_PHY_SPENDING_RF5112 0x00000014 2351#define AR5K_PHY_SIGMA_DELTA_ADC_CLIP_S 13
2316/* #define AR5K_PHY_SPENDING_RF5112A 0x0000000e */ 2352
2317/* #define AR5K_PHY_SPENDING_RF5424 0x00000012 */ 2353/*
2318#define AR5K_PHY_SPENDING_RF5413 0x00000014 2354 * RF restart register [5112+] (?)
2319#define AR5K_PHY_SPENDING_RF2413 0x00000014 2355 */
2320#define AR5K_PHY_SPENDING_RF2425 0x00000018 2356#define AR5K_PHY_RESTART 0x9970 /* restart */
2357#define AR5K_PHY_RESTART_DIV_GC 0x001c0000 /* Fast diversity gc_limit (?) */
2358#define AR5K_PHY_RESTART_DIV_GC_S 18
2359
2360/*
2361 * RF Bus access request register (for synth-oly channel switching)
2362 */
2363#define AR5K_PHY_RFBUS_REQ 0x997C
2364#define AR5K_PHY_RFBUS_REQ_REQUEST 0x00000001
2365
2366/*
2367 * Spur mitigation masks (?)
2368 */
2369#define AR5K_PHY_TIMING_7 0x9980
2370#define AR5K_PHY_TIMING_8 0x9984
2371#define AR5K_PHY_TIMING_8_PILOT_MASK_2 0x000fffff
2372#define AR5K_PHY_TIMING_8_PILOT_MASK_2_S 0
2373
2374#define AR5K_PHY_BIN_MASK2_1 0x9988
2375#define AR5K_PHY_BIN_MASK2_2 0x998c
2376#define AR5K_PHY_BIN_MASK2_3 0x9990
2377
2378#define AR5K_PHY_BIN_MASK2_4 0x9994
2379#define AR5K_PHY_BIN_MASK2_4_MASK_4 0x00003fff
2380#define AR5K_PHY_BIN_MASK2_4_MASK_4_S 0
2381
2382#define AR_PHY_TIMING_9 0x9998
2383#define AR_PHY_TIMING_10 0x999c
2384#define AR_PHY_TIMING_10_PILOT_MASK_2 0x000fffff
2385#define AR_PHY_TIMING_10_PILOT_MASK_2_S 0
2386
2387/*
2388 * Spur mitigation control
2389 */
2390#define AR_PHY_TIMING_11 0x99a0 /* Register address */
2391#define AR_PHY_TIMING_11_SPUR_DELTA_PHASE 0x000fffff /* Spur delta phase */
2392#define AR_PHY_TIMING_11_SPUR_DELTA_PHASE_S 0
2393#define AR_PHY_TIMING_11_SPUR_FREQ_SD 0x3ff00000 /* Freq sigma delta */
2394#define AR_PHY_TIMING_11_SPUR_FREQ_SD_S 20
2395#define AR_PHY_TIMING_11_USE_SPUR_IN_AGC 0x40000000 /* Spur filter in AGC detector */
2396#define AR_PHY_TIMING_11_USE_SPUR_IN_SELFCOR 0x80000000 /* Spur filter in OFDM self correlator */
2321 2397
2322/* 2398/*
2323 * Misc PHY/radio registers [5110 - 5111] 2399 * Gain tables
2324 */ 2400 */
2325#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */ 2401#define AR5K_BB_GAIN_BASE 0x9b00 /* BaseBand Amplifier Gain table base address */
2326#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2)) 2402#define AR5K_BB_GAIN(_n) (AR5K_BB_GAIN_BASE + ((_n) << 2))
@@ -2340,9 +2416,10 @@ after DFS is enabled */
2340#define AR5K_PHY_CURRENT_RSSI 0x9c1c 2416#define AR5K_PHY_CURRENT_RSSI 0x9c1c
2341 2417
2342/* 2418/*
2343 * PHY RF Bus grant register (?) 2419 * PHY RF Bus grant register
2344 */ 2420 */
2345#define AR5K_PHY_RFBUS_GRANT 0x9c20 2421#define AR5K_PHY_RFBUS_GRANT 0x9c20
2422#define AR5K_PHY_RFBUS_GRANT_OK 0x00000001
2346 2423
2347/* 2424/*
2348 * PHY ADC test register 2425 * PHY ADC test register
@@ -2386,6 +2463,31 @@ after DFS is enabled */
2386#define AR5K_PHY_CHAN_STATUS_RX_CLR_PAP 0x00000008 2463#define AR5K_PHY_CHAN_STATUS_RX_CLR_PAP 0x00000008
2387 2464
2388/* 2465/*
2466 * Heavy clip enable register
2467 */
2468#define AR5K_PHY_HEAVY_CLIP_ENABLE 0x99e0
2469
2470/*
2471 * PHY clock sleep registers [5112+]
2472 */
2473#define AR5K_PHY_SCLOCK 0x99f0
2474#define AR5K_PHY_SCLOCK_32MHZ 0x0000000c
2475#define AR5K_PHY_SDELAY 0x99f4
2476#define AR5K_PHY_SDELAY_32MHZ 0x000000ff
2477#define AR5K_PHY_SPENDING 0x99f8
2478#define AR5K_PHY_SPENDING_14 0x00000014
2479#define AR5K_PHY_SPENDING_18 0x00000018
2480#define AR5K_PHY_SPENDING_RF5111 0x00000018
2481#define AR5K_PHY_SPENDING_RF5112 0x00000014
2482/* #define AR5K_PHY_SPENDING_RF5112A 0x0000000e */
2483/* #define AR5K_PHY_SPENDING_RF5424 0x00000012 */
2484#define AR5K_PHY_SPENDING_RF5413 0x00000018
2485#define AR5K_PHY_SPENDING_RF2413 0x00000018
2486#define AR5K_PHY_SPENDING_RF2316 0x00000018
2487#define AR5K_PHY_SPENDING_RF2317 0x00000018
2488#define AR5K_PHY_SPENDING_RF2425 0x00000014
2489
2490/*
2389 * PHY PAPD I (power?) table (?) 2491 * PHY PAPD I (power?) table (?)
2390 * (92! entries) 2492 * (92! entries)
2391 */ 2493 */
@@ -2436,10 +2538,47 @@ after DFS is enabled */
2436#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR 0x0000000f 2538#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR 0x0000000f
2437#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR_S 0 2539#define AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR_S 0
2438 2540
2541/* Same address is used for antenna diversity activation */
2542#define AR5K_PHY_FAST_ANT_DIV 0xa208
2543#define AR5K_PHY_FAST_ANT_DIV_EN 0x00002000
2544
2439/* 2545/*
2440 * PHY 2GHz gain register [5111+] 2546 * PHY 2GHz gain register [5111+]
2441 */ 2547 */
2442#define AR5K_PHY_GAIN_2GHZ 0xa20c 2548#define AR5K_PHY_GAIN_2GHZ 0xa20c
2443#define AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX 0x00fc0000 2549#define AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX 0x00fc0000
2444#define AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX_S 18 2550#define AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX_S 18
2445#define AR5K_PHY_GAIN_2GHZ_INI_5111 0x6480416c 2551#define AR5K_PHY_GAIN_2GHZ_INI_5111 0x6480416c
2552
2553#define AR5K_PHY_CCK_RX_CTL_4 0xa21c
2554#define AR5K_PHY_CCK_RX_CTL_4_FREQ_EST_SHORT 0x01f80000
2555#define AR5K_PHY_CCK_RX_CTL_4_FREQ_EST_SHORT_S 19
2556
2557#define AR5K_PHY_DAG_CCK_CTL 0xa228
2558#define AR5K_PHY_DAG_CCK_CTL_EN_RSSI_THR 0x00000200
2559#define AR5K_PHY_DAG_CCK_CTL_RSSI_THR 0x0001fc00
2560#define AR5K_PHY_DAG_CCK_CTL_RSSI_THR_S 10
2561
2562#define AR5K_PHY_FAST_ADC 0xa24c
2563
2564#define AR5K_PHY_BLUETOOTH 0xa254
2565
2566/*
2567 * Transmit Power Control register
2568 * [2413+]
2569 */
2570#define AR5K_PHY_TPC_RG1 0xa258
2571#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN 0x0000c000
2572#define AR5K_PHY_TPC_RG1_NUM_PD_GAIN_S 14
2573
2574#define AR5K_PHY_TPC_RG5 0xa26C
2575#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP 0x0000000F
2576#define AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP_S 0
2577#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1 0x000003F0
2578#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1_S 4
2579#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2 0x0000FC00
2580#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2_S 10
2581#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3 0x003F0000
2582#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3_S 16
2583#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4 0x0FC00000
2584#define AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4_S 22
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
new file mode 100644
index 000000000000..8f1886834e61
--- /dev/null
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -0,0 +1,931 @@
1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
5 * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
6 * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 *
20 */
21
22#define _ATH5K_RESET
23
24/*****************************\
25 Reset functions and helpers
26\*****************************/
27
28#include <linux/pci.h>
29#include "ath5k.h"
30#include "reg.h"
31#include "base.h"
32#include "debug.h"
33
34/**
35 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
36 *
37 * @ah: the &struct ath5k_hw
38 * @channel: the currently set channel upon reset
39 *
40 * Write the OFDM timings for the AR5212 upon reset. This is a helper for
41 * ath5k_hw_reset(). This seems to tune the PLL a specified frequency
42 * depending on the bandwidth of the channel.
43 *
44 */
45static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
46 struct ieee80211_channel *channel)
47{
48 /* Get exponent and mantissa and set it */
49 u32 coef_scaled, coef_exp, coef_man,
50 ds_coef_exp, ds_coef_man, clock;
51
52 if (!(ah->ah_version == AR5K_AR5212) ||
53 !(channel->hw_value & CHANNEL_OFDM))
54 BUG();
55
56 /* Seems there are two PLLs, one for baseband sampling and one
57 * for tuning. Tuning basebands are 40 MHz or 80MHz when in
58 * turbo. */
59 clock = channel->hw_value & CHANNEL_TURBO ? 80 : 40;
60 coef_scaled = ((5 * (clock << 24)) / 2) /
61 channel->center_freq;
62
63 for (coef_exp = 31; coef_exp > 0; coef_exp--)
64 if ((coef_scaled >> coef_exp) & 0x1)
65 break;
66
67 if (!coef_exp)
68 return -EINVAL;
69
70 coef_exp = 14 - (coef_exp - 24);
71 coef_man = coef_scaled +
72 (1 << (24 - coef_exp - 1));
73 ds_coef_man = coef_man >> (24 - coef_exp);
74 ds_coef_exp = coef_exp - 16;
75
76 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
77 AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
78 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
79 AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
80
81 return 0;
82}
83
84
85/*
86 * index into rates for control rates, we can set it up like this because
87 * this is only used for AR5212 and we know it supports G mode
88 */
89static int control_rates[] =
90 { 0, 1, 1, 1, 4, 4, 6, 6, 8, 8, 8, 8 };
91
92/**
93 * ath5k_hw_write_rate_duration - set rate duration during hw resets
94 *
95 * @ah: the &struct ath5k_hw
96 * @mode: one of enum ath5k_driver_mode
97 *
98 * Write the rate duration table upon hw reset. This is a helper for
99 * ath5k_hw_reset(). It seems all this is doing is setting an ACK timeout for
100 * the hardware for the current mode for each rate. The rates which are capable
101 * of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have another
102 * register for the short preamble ACK timeout calculation.
103 */
104static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
105 unsigned int mode)
106{
107 struct ath5k_softc *sc = ah->ah_sc;
108 struct ieee80211_rate *rate;
109 unsigned int i;
110
111 /* Write rate duration table */
112 for (i = 0; i < sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates; i++) {
113 u32 reg;
114 u16 tx_time;
115
116 rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[control_rates[i]];
117
118 /* Set ACK timeout */
119 reg = AR5K_RATE_DUR(rate->hw_value);
120
121 /* An ACK frame consists of 10 bytes. If you add the FCS,
122 * which ieee80211_generic_frame_duration() adds,
123 * its 14 bytes. Note we use the control rate and not the
124 * actual rate for this rate. See mac80211 tx.c
125 * ieee80211_duration() for a brief description of
126 * what rate we should choose to TX ACKs. */
127 tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
128 sc->vif, 10, rate));
129
130 ath5k_hw_reg_write(ah, tx_time, reg);
131
132 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
133 continue;
134
135 /*
136 * We're not distinguishing short preamble here,
137 * This is true, all we'll get is a longer value here
138 * which is not necessarilly bad. We could use
139 * export ieee80211_frame_duration() but that needs to be
140 * fixed first to be properly used by mac802111 drivers:
141 *
142 * - remove erp stuff and let the routine figure ofdm
143 * erp rates
144 * - remove passing argument ieee80211_local as
145 * drivers don't have access to it
146 * - move drivers using ieee80211_generic_frame_duration()
147 * to this
148 */
149 ath5k_hw_reg_write(ah, tx_time,
150 reg + (AR5K_SET_SHORT_PREAMBLE << 2));
151 }
152}
153
154/*
155 * Reset chipset
156 */
157static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
158{
159 int ret;
160 u32 mask = val ? val : ~0U;
161
162 ATH5K_TRACE(ah->ah_sc);
163
164 /* Read-and-clear RX Descriptor Pointer*/
165 ath5k_hw_reg_read(ah, AR5K_RXDP);
166
167 /*
168 * Reset the device and wait until success
169 */
170 ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
171
172 /* Wait at least 128 PCI clocks */
173 udelay(15);
174
175 if (ah->ah_version == AR5K_AR5210) {
176 val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
177 | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY;
178 mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
179 | AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY;
180 } else {
181 val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
182 mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
183 }
184
185 ret = ath5k_hw_register_timeout(ah, AR5K_RESET_CTL, mask, val, false);
186
187 /*
188 * Reset configuration register (for hw byte-swap). Note that this
189 * is only set for big endian. We do the necessary magic in
190 * AR5K_INIT_CFG.
191 */
192 if ((val & AR5K_RESET_CTL_PCU) == 0)
193 ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
194
195 return ret;
196}
197
198/*
199 * Sleep control
200 */
201int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
202 bool set_chip, u16 sleep_duration)
203{
204 unsigned int i;
205 u32 staid, data;
206
207 ATH5K_TRACE(ah->ah_sc);
208 staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
209
210 switch (mode) {
211 case AR5K_PM_AUTO:
212 staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
213 /* fallthrough */
214 case AR5K_PM_NETWORK_SLEEP:
215 if (set_chip)
216 ath5k_hw_reg_write(ah,
217 AR5K_SLEEP_CTL_SLE_ALLOW |
218 sleep_duration,
219 AR5K_SLEEP_CTL);
220
221 staid |= AR5K_STA_ID1_PWR_SV;
222 break;
223
224 case AR5K_PM_FULL_SLEEP:
225 if (set_chip)
226 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP,
227 AR5K_SLEEP_CTL);
228
229 staid |= AR5K_STA_ID1_PWR_SV;
230 break;
231
232 case AR5K_PM_AWAKE:
233
234 staid &= ~AR5K_STA_ID1_PWR_SV;
235
236 if (!set_chip)
237 goto commit;
238
239 /* Preserve sleep duration */
240 data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
241 if (data & 0xffc00000)
242 data = 0;
243 else
244 data = data & 0xfffcffff;
245
246 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
247 udelay(15);
248
249 for (i = 50; i > 0; i--) {
250 /* Check if the chip did wake up */
251 if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
252 AR5K_PCICFG_SPWR_DN) == 0)
253 break;
254
255 /* Wait a bit and retry */
256 udelay(200);
257 ath5k_hw_reg_write(ah, data, AR5K_SLEEP_CTL);
258 }
259
260 /* Fail if the chip didn't wake up */
261 if (i <= 0)
262 return -EIO;
263
264 break;
265
266 default:
267 return -EINVAL;
268 }
269
270commit:
271 ah->ah_power_mode = mode;
272 ath5k_hw_reg_write(ah, staid, AR5K_STA_ID1);
273
274 return 0;
275}
276
277/*
278 * Bring up MAC + PHY Chips
279 */
280int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
281{
282 struct pci_dev *pdev = ah->ah_sc->pdev;
283 u32 turbo, mode, clock, bus_flags;
284 int ret;
285
286 turbo = 0;
287 mode = 0;
288 clock = 0;
289
290 ATH5K_TRACE(ah->ah_sc);
291
292 /* Wakeup the device */
293 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
294 if (ret) {
295 ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
296 return ret;
297 }
298
299 if (ah->ah_version != AR5K_AR5210) {
300 /*
301 * Get channel mode flags
302 */
303
304 if (ah->ah_radio >= AR5K_RF5112) {
305 mode = AR5K_PHY_MODE_RAD_RF5112;
306 clock = AR5K_PHY_PLL_RF5112;
307 } else {
308 mode = AR5K_PHY_MODE_RAD_RF5111; /*Zero*/
309 clock = AR5K_PHY_PLL_RF5111; /*Zero*/
310 }
311
312 if (flags & CHANNEL_2GHZ) {
313 mode |= AR5K_PHY_MODE_FREQ_2GHZ;
314 clock |= AR5K_PHY_PLL_44MHZ;
315
316 if (flags & CHANNEL_CCK) {
317 mode |= AR5K_PHY_MODE_MOD_CCK;
318 } else if (flags & CHANNEL_OFDM) {
319 /* XXX Dynamic OFDM/CCK is not supported by the
320 * AR5211 so we set MOD_OFDM for plain g (no
321 * CCK headers) operation. We need to test
322 * this, 5211 might support ofdm-only g after
323 * all, there are also initial register values
324 * in the code for g mode (see initvals.c). */
325 if (ah->ah_version == AR5K_AR5211)
326 mode |= AR5K_PHY_MODE_MOD_OFDM;
327 else
328 mode |= AR5K_PHY_MODE_MOD_DYN;
329 } else {
330 ATH5K_ERR(ah->ah_sc,
331 "invalid radio modulation mode\n");
332 return -EINVAL;
333 }
334 } else if (flags & CHANNEL_5GHZ) {
335 mode |= AR5K_PHY_MODE_FREQ_5GHZ;
336 clock |= AR5K_PHY_PLL_40MHZ;
337
338 if (flags & CHANNEL_OFDM)
339 mode |= AR5K_PHY_MODE_MOD_OFDM;
340 else {
341 ATH5K_ERR(ah->ah_sc,
342 "invalid radio modulation mode\n");
343 return -EINVAL;
344 }
345 } else {
346 ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n");
347 return -EINVAL;
348 }
349
350 if (flags & CHANNEL_TURBO)
351 turbo = AR5K_PHY_TURBO_MODE | AR5K_PHY_TURBO_SHORT;
352 } else { /* Reset the device */
353
354 /* ...enable Atheros turbo mode if requested */
355 if (flags & CHANNEL_TURBO)
356 ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
357 AR5K_PHY_TURBO);
358 }
359
360 /* reseting PCI on PCI-E cards results card to hang
361 * and always return 0xffff... so we ingore that flag
362 * for PCI-E cards */
363 bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI;
364
365 /* Reset chipset */
366 if (ah->ah_version == AR5K_AR5210) {
367 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
368 AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
369 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
370 mdelay(2);
371 } else {
372 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
373 AR5K_RESET_CTL_BASEBAND | bus_flags);
374 }
375 if (ret) {
376 ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
377 return -EIO;
378 }
379
380 /* ...wakeup again!*/
381 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
382 if (ret) {
383 ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n");
384 return ret;
385 }
386
387 /* ...final warm reset */
388 if (ath5k_hw_nic_reset(ah, 0)) {
389 ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
390 return -EIO;
391 }
392
393 if (ah->ah_version != AR5K_AR5210) {
394 /* ...set the PHY operating mode */
395 ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
396 udelay(300);
397
398 ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE);
399 ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO);
400 }
401
402 return 0;
403}
404
405/*
406 * Main reset function
407 */
408int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
409 struct ieee80211_channel *channel, bool change_channel)
410{
411 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
412 struct pci_dev *pdev = ah->ah_sc->pdev;
413 u32 data, s_seq, s_ant, s_led[3], dma_size;
414 unsigned int i, mode, freq, ee_mode, ant[2];
415 int ret;
416
417 ATH5K_TRACE(ah->ah_sc);
418
419 s_seq = 0;
420 s_ant = 0;
421 ee_mode = 0;
422 freq = 0;
423 mode = 0;
424
425 /*
426 * Save some registers before a reset
427 */
428 /*DCU/Antenna selection not available on 5210*/
429 if (ah->ah_version != AR5K_AR5210) {
430 if (change_channel) {
431 /* Seq number for queue 0 -do this for all queues ? */
432 s_seq = ath5k_hw_reg_read(ah,
433 AR5K_QUEUE_DFS_SEQNUM(0));
434 /*Default antenna*/
435 s_ant = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
436 }
437 }
438
439 /*GPIOs*/
440 s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) & AR5K_PCICFG_LEDSTATE;
441 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
442 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
443
444 if (change_channel && ah->ah_rf_banks != NULL)
445 ath5k_hw_get_rf_gain(ah);
446
447
448 /*Wakeup the device*/
449 ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
450 if (ret)
451 return ret;
452
453 /*
454 * Initialize operating mode
455 */
456 ah->ah_op_mode = op_mode;
457
458 /*
459 * 5111/5112 Settings
460 * 5210 only comes with RF5110
461 */
462 if (ah->ah_version != AR5K_AR5210) {
463 if (ah->ah_radio != AR5K_RF5111 &&
464 ah->ah_radio != AR5K_RF5112 &&
465 ah->ah_radio != AR5K_RF5413 &&
466 ah->ah_radio != AR5K_RF2413 &&
467 ah->ah_radio != AR5K_RF2425) {
468 ATH5K_ERR(ah->ah_sc,
469 "invalid phy radio: %u\n", ah->ah_radio);
470 return -EINVAL;
471 }
472
473 switch (channel->hw_value & CHANNEL_MODES) {
474 case CHANNEL_A:
475 mode = AR5K_MODE_11A;
476 freq = AR5K_INI_RFGAIN_5GHZ;
477 ee_mode = AR5K_EEPROM_MODE_11A;
478 break;
479 case CHANNEL_G:
480 mode = AR5K_MODE_11G;
481 freq = AR5K_INI_RFGAIN_2GHZ;
482 ee_mode = AR5K_EEPROM_MODE_11G;
483 break;
484 case CHANNEL_B:
485 mode = AR5K_MODE_11B;
486 freq = AR5K_INI_RFGAIN_2GHZ;
487 ee_mode = AR5K_EEPROM_MODE_11B;
488 break;
489 case CHANNEL_T:
490 mode = AR5K_MODE_11A_TURBO;
491 freq = AR5K_INI_RFGAIN_5GHZ;
492 ee_mode = AR5K_EEPROM_MODE_11A;
493 break;
494 /*Is this ok on 5211 too ?*/
495 case CHANNEL_TG:
496 mode = AR5K_MODE_11G_TURBO;
497 freq = AR5K_INI_RFGAIN_2GHZ;
498 ee_mode = AR5K_EEPROM_MODE_11G;
499 break;
500 case CHANNEL_XR:
501 if (ah->ah_version == AR5K_AR5211) {
502 ATH5K_ERR(ah->ah_sc,
503 "XR mode not available on 5211");
504 return -EINVAL;
505 }
506 mode = AR5K_MODE_XR;
507 freq = AR5K_INI_RFGAIN_5GHZ;
508 ee_mode = AR5K_EEPROM_MODE_11A;
509 break;
510 default:
511 ATH5K_ERR(ah->ah_sc,
512 "invalid channel: %d\n", channel->center_freq);
513 return -EINVAL;
514 }
515
516 /* PHY access enable */
517 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
518
519 }
520
521 ret = ath5k_hw_write_initvals(ah, mode, change_channel);
522 if (ret)
523 return ret;
524
525 /*
526 * 5211/5212 Specific
527 */
528 if (ah->ah_version != AR5K_AR5210) {
529 /*
530 * Write initial RF gain settings
531 * This should work for both 5111/5112
532 */
533 ret = ath5k_hw_rfgain(ah, freq);
534 if (ret)
535 return ret;
536
537 mdelay(1);
538
539 /*
540 * Write some more initial register settings
541 */
542 if (ah->ah_version == AR5K_AR5212) {
543 ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
544
545 if (channel->hw_value == CHANNEL_G)
546 if (ah->ah_mac_srev < AR5K_SREV_AR2413)
547 ath5k_hw_reg_write(ah, 0x00f80d80,
548 0x994c);
549 else if (ah->ah_mac_srev < AR5K_SREV_AR5424)
550 ath5k_hw_reg_write(ah, 0x00380140,
551 0x994c);
552 else if (ah->ah_mac_srev < AR5K_SREV_AR2425)
553 ath5k_hw_reg_write(ah, 0x00fc0ec0,
554 0x994c);
555 else /* 2425 */
556 ath5k_hw_reg_write(ah, 0x00fc0fc0,
557 0x994c);
558 else
559 ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
560
561 /* Some bits are disabled here, we know nothing about
562 * register 0xa228 yet, most of the times this ends up
563 * with a value 0x9b5 -haven't seen any dump with
564 * a different value- */
565 /* Got this from decompiling binary HAL */
566 data = ath5k_hw_reg_read(ah, 0xa228);
567 data &= 0xfffffdff;
568 ath5k_hw_reg_write(ah, data, 0xa228);
569
570 data = ath5k_hw_reg_read(ah, 0xa228);
571 data &= 0xfffe03ff;
572 ath5k_hw_reg_write(ah, data, 0xa228);
573 data = 0;
574
575 /* Just write 0x9b5 ? */
576 /* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
577 ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
578 ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
579 ath5k_hw_reg_write(ah, 0x0000000e, AR5K_PHY_SCAL);
580 }
581
582 /* Fix for first revision of the RF5112 RF chipset */
583 if (ah->ah_radio >= AR5K_RF5112 &&
584 ah->ah_radio_5ghz_revision <
585 AR5K_SREV_RAD_5112A) {
586 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
587 AR5K_PHY_CCKTXCTL);
588 if (channel->hw_value & CHANNEL_5GHZ)
589 data = 0xffb81020;
590 else
591 data = 0xffb80d20;
592 ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
593 data = 0;
594 }
595
596 /*
597 * Set TX power (FIXME)
598 */
599 ret = ath5k_hw_txpower(ah, channel, AR5K_TUNE_DEFAULT_TXPOWER);
600 if (ret)
601 return ret;
602
603 /* Write rate duration table only on AR5212 and if
604 * virtual interface has already been brought up
605 * XXX: rethink this after new mode changes to
606 * mac80211 are integrated */
607 if (ah->ah_version == AR5K_AR5212 &&
608 ah->ah_sc->vif != NULL)
609 ath5k_hw_write_rate_duration(ah, mode);
610
611 /*
612 * Write RF registers
613 */
614 ret = ath5k_hw_rfregs(ah, channel, mode);
615 if (ret)
616 return ret;
617
618 /*
619 * Configure additional registers
620 */
621
622 /* Write OFDM timings on 5212*/
623 if (ah->ah_version == AR5K_AR5212 &&
624 channel->hw_value & CHANNEL_OFDM) {
625 ret = ath5k_hw_write_ofdm_timings(ah, channel);
626 if (ret)
627 return ret;
628 }
629
630 /*Enable/disable 802.11b mode on 5111
631 (enable 2111 frequency converter + CCK)*/
632 if (ah->ah_radio == AR5K_RF5111) {
633 if (mode == AR5K_MODE_11B)
634 AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
635 AR5K_TXCFG_B_MODE);
636 else
637 AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
638 AR5K_TXCFG_B_MODE);
639 }
640
641 /*
642 * Set channel and calibrate the PHY
643 */
644 ret = ath5k_hw_channel(ah, channel);
645 if (ret)
646 return ret;
647
648 /* Set antenna mode */
649 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_ANT_CTL,
650 ah->ah_antenna[ee_mode][0], 0xfffffc06);
651
652 /*
653 * In case a fixed antenna was set as default
654 * write the same settings on both AR5K_PHY_ANT_SWITCH_TABLE
655 * registers.
656 */
657 if (s_ant != 0) {
658 if (s_ant == AR5K_ANT_FIXED_A) /* 1 - Main */
659 ant[0] = ant[1] = AR5K_ANT_FIXED_A;
660 else /* 2 - Aux */
661 ant[0] = ant[1] = AR5K_ANT_FIXED_B;
662 } else {
663 ant[0] = AR5K_ANT_FIXED_A;
664 ant[1] = AR5K_ANT_FIXED_B;
665 }
666
667 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[0]],
668 AR5K_PHY_ANT_SWITCH_TABLE_0);
669 ath5k_hw_reg_write(ah, ah->ah_antenna[ee_mode][ant[1]],
670 AR5K_PHY_ANT_SWITCH_TABLE_1);
671
672 /* Commit values from EEPROM */
673 if (ah->ah_radio == AR5K_RF5111)
674 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
675 AR5K_PHY_FRAME_CTL_TX_CLIP, ee->ee_tx_clip);
676
677 ath5k_hw_reg_write(ah,
678 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
679 AR5K_PHY_NFTHRES);
680
681 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_SETTLING,
682 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
683 0xffffc07f);
684 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
685 (ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000,
686 0xfffc0fff);
687 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
688 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
689 ((ee->ee_pga_desired_size[ee_mode] << 8) & 0xff00),
690 0xffff0000);
691
692 ath5k_hw_reg_write(ah,
693 (ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
694 (ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
695 (ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
696 (ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
697
698 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_RF_CTL3,
699 ee->ee_tx_end2xlna_enable[ee_mode] << 8, 0xffff00ff);
700 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_NF,
701 (ee->ee_thr_62[ee_mode] << 12) & 0x7f000, 0xfff80fff);
702 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_OFDM_SELFCORR, 4, 0xffffff01);
703
704 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
705 AR5K_PHY_IQ_CORR_ENABLE |
706 (ee->ee_i_cal[ee_mode] << AR5K_PHY_IQ_CORR_Q_I_COFF_S) |
707 ee->ee_q_cal[ee_mode]);
708
709 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
710 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
711 AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
712 ee->ee_margin_tx_rx[ee_mode]);
713
714 } else {
715 mdelay(1);
716 /* Disable phy and wait */
717 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
718 mdelay(1);
719 }
720
721 /*
722 * Restore saved values
723 */
724 /*DCU/Antenna selection not available on 5210*/
725 if (ah->ah_version != AR5K_AR5210) {
726 ath5k_hw_reg_write(ah, s_seq, AR5K_QUEUE_DFS_SEQNUM(0));
727 ath5k_hw_reg_write(ah, s_ant, AR5K_DEFAULT_ANTENNA);
728 }
729 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]);
730 ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
731 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
732
733 /*
734 * Misc
735 */
736 /* XXX: add ah->aid once mac80211 gives this to us */
737 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
738
739 ath5k_hw_set_opmode(ah);
740 /*PISR/SISR Not available on 5210*/
741 if (ah->ah_version != AR5K_AR5210) {
742 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
743 /* If we later allow tuning for this, store into sc structure */
744 data = AR5K_TUNE_RSSI_THRES |
745 AR5K_TUNE_BMISS_THRES << AR5K_RSSI_THR_BMISS_S;
746 ath5k_hw_reg_write(ah, data, AR5K_RSSI_THR);
747 }
748
749 /*
750 * Set Rx/Tx DMA Configuration
751 *
752 * Set maximum DMA size (512) except for PCI-E cards since
753 * it causes rx overruns and tx errors (tested on 5424 but since
754 * rx overruns also occur on 5416/5418 with madwifi we set 128
755 * for all PCI-E cards to be safe).
756 *
757 * In dumps this is 128 for allchips.
758 *
759 * XXX: need to check 5210 for this
760 * TODO: Check out tx triger level, it's always 64 on dumps but I
761 * guess we can tweak it and see how it goes ;-)
762 */
763 dma_size = (pdev->is_pcie) ? AR5K_DMASIZE_128B : AR5K_DMASIZE_512B;
764 if (ah->ah_version != AR5K_AR5210) {
765 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
766 AR5K_TXCFG_SDMAMR, dma_size);
767 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
768 AR5K_RXCFG_SDMAMW, dma_size);
769 }
770
771 /*
772 * Enable the PHY and wait until completion
773 */
774 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
775
776 /*
777 * On 5211+ read activation -> rx delay
778 * and use it.
779 */
780 if (ah->ah_version != AR5K_AR5210) {
781 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
782 AR5K_PHY_RX_DELAY_M;
783 data = (channel->hw_value & CHANNEL_CCK) ?
784 ((data << 2) / 22) : (data / 10);
785
786 udelay(100 + (2 * data));
787 data = 0;
788 } else {
789 mdelay(1);
790 }
791
792 /*
793 * Perform ADC test (?)
794 */
795 data = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
796 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
797 for (i = 0; i <= 20; i++) {
798 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
799 break;
800 udelay(200);
801 }
802 ath5k_hw_reg_write(ah, data, AR5K_PHY_TST1);
803 data = 0;
804
805 /*
806 * Start automatic gain calibration
807 *
808 * During AGC calibration RX path is re-routed to
809 * a signal detector so we don't receive anything.
810 *
811 * This method is used to calibrate some static offsets
812 * used together with on-the fly I/Q calibration (the
813 * one performed via ath5k_hw_phy_calibrate), that doesn't
814 * interrupt rx path.
815 *
816 * If we are in a noisy environment AGC calibration may time
817 * out.
818 */
819 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
820 AR5K_PHY_AGCCTL_CAL);
821
822 /* At the same time start I/Q calibration for QAM constellation
823 * -no need for CCK- */
824 ah->ah_calibration = false;
825 if (!(mode == AR5K_MODE_11B)) {
826 ah->ah_calibration = true;
827 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
828 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
829 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
830 AR5K_PHY_IQ_RUN);
831 }
832
833 /* Wait for gain calibration to finish (we check for I/Q calibration
834 * during ath5k_phy_calibrate) */
835 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
836 AR5K_PHY_AGCCTL_CAL, 0, false)) {
837 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
838 channel->center_freq);
839 return -EAGAIN;
840 }
841
842 /*
843 * Start noise floor calibration
844 *
845 * If we run NF calibration before AGC, it always times out.
846 * Binary HAL starts NF and AGC calibration at the same time
847 * and only waits for AGC to finish. I believe that's wrong because
848 * during NF calibration, rx path is also routed to a detector, so if
849 * it doesn't finish we won't have RX.
850 *
851 * XXX: Find an interval that's OK for all cards...
852 */
853 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
854 if (ret)
855 return ret;
856
857 /*
858 * Reset queues and start beacon timers at the end of the reset routine
859 */
860 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
861 /*No QCU on 5210*/
862 if (ah->ah_version != AR5K_AR5210)
863 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(i), i);
864
865 ret = ath5k_hw_reset_tx_queue(ah, i);
866 if (ret) {
867 ATH5K_ERR(ah->ah_sc,
868 "failed to reset TX queue #%d\n", i);
869 return ret;
870 }
871 }
872
873 /* Pre-enable interrupts on 5211/5212*/
874 if (ah->ah_version != AR5K_AR5210)
875 ath5k_hw_set_imr(ah, AR5K_INT_RX | AR5K_INT_TX |
876 AR5K_INT_FATAL);
877
878 /*
879 * Set RF kill flags if supported by the device (read from the EEPROM)
880 * Disable gpio_intr for now since it results system hang.
881 * TODO: Handle this in ath5k_intr
882 */
883#if 0
884 if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header)) {
885 ath5k_hw_set_gpio_input(ah, 0);
886 ah->ah_gpio[0] = ath5k_hw_get_gpio(ah, 0);
887 if (ah->ah_gpio[0] == 0)
888 ath5k_hw_set_gpio_intr(ah, 0, 1);
889 else
890 ath5k_hw_set_gpio_intr(ah, 0, 0);
891 }
892#endif
893
894 /*
895 * Set the 32MHz reference clock on 5212 phy clock sleep register
896 *
897 * TODO: Find out how to switch to external 32Khz clock to save power
898 */
899 if (ah->ah_version == AR5K_AR5212) {
900 ath5k_hw_reg_write(ah, AR5K_PHY_SCR_32MHZ, AR5K_PHY_SCR);
901 ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
902 ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ, AR5K_PHY_SCAL);
903 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
904 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
905 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
906
907 data = ath5k_hw_reg_read(ah, AR5K_USEC_5211) & 0xffffc07f ;
908 data |= (ah->ah_phy_spending == AR5K_PHY_SPENDING_18) ?
909 0x00000f80 : 0x00001380 ;
910 ath5k_hw_reg_write(ah, data, AR5K_USEC_5211);
911 data = 0;
912 }
913
914 if (ah->ah_version == AR5K_AR5212) {
915 ath5k_hw_reg_write(ah, 0x000100aa, 0x8118);
916 ath5k_hw_reg_write(ah, 0x00003210, 0x811c);
917 ath5k_hw_reg_write(ah, 0x00000052, 0x8108);
918 if (ah->ah_mac_srev >= AR5K_SREV_AR2413)
919 ath5k_hw_reg_write(ah, 0x00000004, 0x8120);
920 }
921
922 /*
923 * Disable beacons and reset the register
924 */
925 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE |
926 AR5K_BEACON_RESET_TSF);
927
928 return 0;
929}
930
931#undef _ATH5K_RESET
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
index 9e19dcceb3a2..80a692430413 100644
--- a/drivers/net/wireless/ath9k/Kconfig
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -1,6 +1,9 @@
1config ATH9K 1config ATH9K
2 tristate "Atheros 802.11n wireless cards support" 2 tristate "Atheros 802.11n wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211 3 depends on PCI && MAC80211 && WLAN_80211
4 select MAC80211_LEDS
5 select LEDS_CLASS
6 select NEW_LEDS
4 ---help--- 7 ---help---
5 This module adds support for wireless adapters based on 8 This module adds support for wireless adapters based on
6 Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets. 9 Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets.
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index d1b0fbae5a32..0e897c276858 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -144,6 +144,7 @@ struct ath_desc {
144#define ATH9K_TXDESC_EXT_AND_CTL 0x0080 144#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
145#define ATH9K_TXDESC_VMF 0x0100 145#define ATH9K_TXDESC_VMF 0x0100
146#define ATH9K_TXDESC_FRAG_IS_ON 0x0200 146#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
147#define ATH9K_TXDESC_CAB 0x0400
147 148
148#define ATH9K_RXDESC_INTREQ 0x0020 149#define ATH9K_RXDESC_INTREQ 0x0020
149 150
@@ -564,8 +565,6 @@ enum ath9k_cipher {
564#define CTL_5GHT40 8 565#define CTL_5GHT40 8
565 566
566#define AR_EEPROM_MAC(i) (0x1d+(i)) 567#define AR_EEPROM_MAC(i) (0x1d+(i))
567#define EEP_SCALE 100
568#define EEP_DELTA 10
569 568
570#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c 569#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c
571#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2 570#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2
@@ -606,9 +605,6 @@ struct ath9k_country_entry {
606#define REG_CLR_BIT(_a, _r, _f) \ 605#define REG_CLR_BIT(_a, _r, _f) \
607 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f) 606 REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f)
608 607
609#define ATH9K_COMP_BUF_MAX_SIZE 9216
610#define ATH9K_COMP_BUF_ALIGN_SIZE 512
611
612#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001 608#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
613 609
614#define INIT_AIFS 2 610#define INIT_AIFS 2
@@ -632,12 +628,6 @@ struct ath9k_country_entry {
632 (IEEE80211_WEP_IVLEN + \ 628 (IEEE80211_WEP_IVLEN + \
633 IEEE80211_WEP_KIDLEN + \ 629 IEEE80211_WEP_KIDLEN + \
634 IEEE80211_WEP_CRCLEN)) 630 IEEE80211_WEP_CRCLEN))
635#define IEEE80211_MAX_LEN (2300 + FCS_LEN + \
636 (IEEE80211_WEP_IVLEN + \
637 IEEE80211_WEP_KIDLEN + \
638 IEEE80211_WEP_CRCLEN))
639
640#define MAX_REG_ADD_COUNT 129
641#define MAX_RATE_POWER 63 631#define MAX_RATE_POWER 63
642 632
643enum ath9k_power_mode { 633enum ath9k_power_mode {
@@ -707,13 +697,6 @@ enum phytype {
707}; 697};
708#define PHY_CCK PHY_DS 698#define PHY_CCK PHY_DS
709 699
710enum start_adhoc_option {
711 START_ADHOC_NO_11A,
712 START_ADHOC_PER_11D,
713 START_ADHOC_IN_11A,
714 START_ADHOC_IN_11B,
715};
716
717enum ath9k_tp_scale { 700enum ath9k_tp_scale {
718 ATH9K_TP_SCALE_MAX = 0, 701 ATH9K_TP_SCALE_MAX = 0,
719 ATH9K_TP_SCALE_50, 702 ATH9K_TP_SCALE_50,
@@ -769,14 +752,11 @@ struct ath9k_node_stats {
769 752
770#define ATH9K_RSSI_EP_MULTIPLIER (1<<7) 753#define ATH9K_RSSI_EP_MULTIPLIER (1<<7)
771 754
772enum ath9k_gpio_output_mux_type { 755#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
773 ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT, 756#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
774 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED, 757#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
775 ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED, 758#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
776 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED, 759#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
777 ATH9K_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
778 ATH9K_GPIO_OUTPUT_MUX_NUM_ENTRIES
779};
780 760
781enum { 761enum {
782 ATH9K_RESET_POWER_ON, 762 ATH9K_RESET_POWER_ON,
@@ -790,19 +770,20 @@ struct ath_hal {
790 u32 ah_magic; 770 u32 ah_magic;
791 u16 ah_devid; 771 u16 ah_devid;
792 u16 ah_subvendorid; 772 u16 ah_subvendorid;
793 struct ath_softc *ah_sc;
794 void __iomem *ah_sh;
795 u16 ah_countryCode;
796 u32 ah_macVersion; 773 u32 ah_macVersion;
797 u16 ah_macRev; 774 u16 ah_macRev;
798 u16 ah_phyRev; 775 u16 ah_phyRev;
799 u16 ah_analog5GhzRev; 776 u16 ah_analog5GhzRev;
800 u16 ah_analog2GhzRev; 777 u16 ah_analog2GhzRev;
801 u8 ah_decompMask[ATH9K_DECOMP_MASK_SIZE]; 778
802 u32 ah_flags; 779 void __iomem *ah_sh;
780 struct ath_softc *ah_sc;
803 enum ath9k_opmode ah_opmode; 781 enum ath9k_opmode ah_opmode;
804 struct ath9k_ops_config ah_config; 782 struct ath9k_ops_config ah_config;
805 struct ath9k_hw_capabilities ah_caps; 783 struct ath9k_hw_capabilities ah_caps;
784
785 u16 ah_countryCode;
786 u32 ah_flags;
806 int16_t ah_powerLimit; 787 int16_t ah_powerLimit;
807 u16 ah_maxPowerLevel; 788 u16 ah_maxPowerLevel;
808 u32 ah_tpScale; 789 u32 ah_tpScale;
@@ -812,15 +793,17 @@ struct ath_hal {
812 u16 ah_currentRD5G; 793 u16 ah_currentRD5G;
813 u16 ah_currentRD2G; 794 u16 ah_currentRD2G;
814 char ah_iso[4]; 795 char ah_iso[4];
815 enum start_adhoc_option ah_adHocMode; 796
816 bool ah_commonMode;
817 struct ath9k_channel ah_channels[150]; 797 struct ath9k_channel ah_channels[150];
818 u32 ah_nchan;
819 struct ath9k_channel *ah_curchan; 798 struct ath9k_channel *ah_curchan;
820 u16 ah_rfsilent; 799 u32 ah_nchan;
821 bool ah_rfkillEnabled; 800
822 bool ah_isPciExpress; 801 bool ah_isPciExpress;
823 u16 ah_txTrigLevel; 802 u16 ah_txTrigLevel;
803 u16 ah_rfsilent;
804 u32 ah_rfkill_gpio;
805 u32 ah_rfkill_polarity;
806
824#ifndef ATH_NF_PER_CHAN 807#ifndef ATH_NF_PER_CHAN
825 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; 808 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
826#endif 809#endif
@@ -853,7 +836,7 @@ bool ath9k_regd_init_channels(struct ath_hal *ah,
853u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags); 836u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
854enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, 837enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah,
855 enum ath9k_int ints); 838 enum ath9k_int ints);
856bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode, 839bool ath9k_hw_reset(struct ath_hal *ah,
857 struct ath9k_channel *chan, 840 struct ath9k_channel *chan,
858 enum ath9k_ht_macmode macmode, 841 enum ath9k_ht_macmode macmode,
859 u8 txchainmask, u8 rxchainmask, 842 u8 txchainmask, u8 rxchainmask,
@@ -1018,4 +1001,9 @@ void ath9k_hw_get_channel_centers(struct ath_hal *ah,
1018bool ath9k_get_channel_edges(struct ath_hal *ah, 1001bool ath9k_get_channel_edges(struct ath_hal *ah,
1019 u16 flags, u16 *low, 1002 u16 flags, u16 *low,
1020 u16 *high); 1003 u16 *high);
1004void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
1005 u32 ah_signal_type);
1006void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 value);
1007u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio);
1008void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio);
1021#endif 1009#endif
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 00a0eaa08866..eedb465d25d3 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -16,7 +16,6 @@
16 16
17 /* Implementation of beacon processing. */ 17 /* Implementation of beacon processing. */
18 18
19#include <asm/unaligned.h>
20#include "core.h" 19#include "core.h"
21 20
22/* 21/*
@@ -26,14 +25,13 @@
26 * the operating mode of the station (AP or AdHoc). Parameters are AIFS 25 * the operating mode of the station (AP or AdHoc). Parameters are AIFS
27 * settings and channel width min/max 26 * settings and channel width min/max
28*/ 27*/
29
30static int ath_beaconq_config(struct ath_softc *sc) 28static int ath_beaconq_config(struct ath_softc *sc)
31{ 29{
32 struct ath_hal *ah = sc->sc_ah; 30 struct ath_hal *ah = sc->sc_ah;
33 struct ath9k_tx_queue_info qi; 31 struct ath9k_tx_queue_info qi;
34 32
35 ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi); 33 ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi);
36 if (sc->sc_opmode == ATH9K_M_HOSTAP) { 34 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) {
37 /* Always burst out beacon and CAB traffic. */ 35 /* Always burst out beacon and CAB traffic. */
38 qi.tqi_aifs = 1; 36 qi.tqi_aifs = 1;
39 qi.tqi_cwmin = 0; 37 qi.tqi_cwmin = 0;
@@ -63,19 +61,18 @@ static int ath_beaconq_config(struct ath_softc *sc)
63 * up all required antenna switch parameters, rate codes, and channel flags. 61 * up all required antenna switch parameters, rate codes, and channel flags.
64 * Beacons are always sent out at the lowest rate, and are not retried. 62 * Beacons are always sent out at the lowest rate, and are not retried.
65*/ 63*/
66
67static void ath_beacon_setup(struct ath_softc *sc, 64static void ath_beacon_setup(struct ath_softc *sc,
68 struct ath_vap *avp, struct ath_buf *bf) 65 struct ath_vap *avp, struct ath_buf *bf)
69{ 66{
70 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 67 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
71 struct ath_hal *ah = sc->sc_ah; 68 struct ath_hal *ah = sc->sc_ah;
72 struct ath_desc *ds; 69 struct ath_desc *ds;
73 int flags, antenna; 70 struct ath9k_11n_rate_series series[4];
74 const struct ath9k_rate_table *rt; 71 const struct ath9k_rate_table *rt;
72 int flags, antenna;
75 u8 rix, rate; 73 u8 rix, rate;
76 int ctsrate = 0; 74 int ctsrate = 0;
77 int ctsduration = 0; 75 int ctsduration = 0;
78 struct ath9k_11n_rate_series series[4];
79 76
80 DPRINTF(sc, ATH_DBG_BEACON, "%s: m %p len %u\n", 77 DPRINTF(sc, ATH_DBG_BEACON, "%s: m %p len %u\n",
81 __func__, skb, skb->len); 78 __func__, skb, skb->len);
@@ -85,7 +82,7 @@ static void ath_beacon_setup(struct ath_softc *sc,
85 82
86 flags = ATH9K_TXDESC_NOACK; 83 flags = ATH9K_TXDESC_NOACK;
87 84
88 if (sc->sc_opmode == ATH9K_M_IBSS && 85 if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS &&
89 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 86 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
90 ds->ds_link = bf->bf_daddr; /* self-linked */ 87 ds->ds_link = bf->bf_daddr; /* self-linked */
91 flags |= ATH9K_TXDESC_VEOL; 88 flags |= ATH9K_TXDESC_VEOL;
@@ -111,24 +108,25 @@ static void ath_beacon_setup(struct ath_softc *sc,
111 rix = 0; 108 rix = 0;
112 rt = sc->sc_currates; 109 rt = sc->sc_currates;
113 rate = rt->info[rix].rateCode; 110 rate = rt->info[rix].rateCode;
114 if (sc->sc_flags & ATH_PREAMBLE_SHORT) 111 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
115 rate |= rt->info[rix].shortPreamble; 112 rate |= rt->info[rix].shortPreamble;
116 113
117 ath9k_hw_set11n_txdesc(ah, ds 114 ath9k_hw_set11n_txdesc(ah, ds,
118 , skb->len + FCS_LEN /* frame length */ 115 skb->len + FCS_LEN, /* frame length */
119 , ATH9K_PKT_TYPE_BEACON /* Atheros packet type */ 116 ATH9K_PKT_TYPE_BEACON, /* Atheros packet type */
120 , avp->av_btxctl.txpower /* txpower XXX */ 117 avp->av_btxctl.txpower, /* txpower XXX */
121 , ATH9K_TXKEYIX_INVALID /* no encryption */ 118 ATH9K_TXKEYIX_INVALID, /* no encryption */
122 , ATH9K_KEY_TYPE_CLEAR /* no encryption */ 119 ATH9K_KEY_TYPE_CLEAR, /* no encryption */
123 , flags /* no ack, veol for beacons */ 120 flags /* no ack,
121 veol for beacons */
124 ); 122 );
125 123
126 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 124 /* NB: beacon's BufLen must be a multiple of 4 bytes */
127 ath9k_hw_filltxdesc(ah, ds 125 ath9k_hw_filltxdesc(ah, ds,
128 , roundup(skb->len, 4) /* buffer length */ 126 roundup(skb->len, 4), /* buffer length */
129 , true /* first segment */ 127 true, /* first segment */
130 , true /* last segment */ 128 true, /* last segment */
131 , ds /* first descriptor */ 129 ds /* first descriptor */
132 ); 130 );
133 131
134 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4); 132 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4);
@@ -140,55 +138,6 @@ static void ath_beacon_setup(struct ath_softc *sc,
140 ctsrate, ctsduration, series, 4, 0); 138 ctsrate, ctsduration, series, 4, 0);
141} 139}
142 140
143/* Move everything from the vap's mcast queue to the hardware cab queue.
144 * Caller must hold mcasq lock and cabq lock
145 * XXX MORE_DATA bit?
146 */
147static void empty_mcastq_into_cabq(struct ath_hal *ah,
148 struct ath_txq *mcastq, struct ath_txq *cabq)
149{
150 struct ath_buf *bfmcast;
151
152 BUG_ON(list_empty(&mcastq->axq_q));
153
154 bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list);
155
156 /* link the descriptors */
157 if (!cabq->axq_link)
158 ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr);
159 else
160 *cabq->axq_link = bfmcast->bf_daddr;
161
162 /* append the private vap mcast list to the cabq */
163
164 cabq->axq_depth += mcastq->axq_depth;
165 cabq->axq_totalqueued += mcastq->axq_totalqueued;
166 cabq->axq_linkbuf = mcastq->axq_linkbuf;
167 cabq->axq_link = mcastq->axq_link;
168 list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q);
169 mcastq->axq_depth = 0;
170 mcastq->axq_totalqueued = 0;
171 mcastq->axq_linkbuf = NULL;
172 mcastq->axq_link = NULL;
173}
174
175/* This is only run at DTIM. We move everything from the vap's mcast queue
176 * to the hardware cab queue. Caller must hold the mcastq lock. */
177static void trigger_mcastq(struct ath_hal *ah,
178 struct ath_txq *mcastq, struct ath_txq *cabq)
179{
180 spin_lock_bh(&cabq->axq_lock);
181
182 if (!list_empty(&mcastq->axq_q))
183 empty_mcastq_into_cabq(ah, mcastq, cabq);
184
185 /* cabq is gated by beacon so it is safe to start here */
186 if (!list_empty(&cabq->axq_q))
187 ath9k_hw_txstart(ah, cabq->axq_qnum);
188
189 spin_unlock_bh(&cabq->axq_lock);
190}
191
192/* 141/*
193 * Generate beacon frame and queue cab data for a vap. 142 * Generate beacon frame and queue cab data for a vap.
194 * 143 *
@@ -199,39 +148,36 @@ static void trigger_mcastq(struct ath_hal *ah,
199*/ 148*/
200static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) 149static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
201{ 150{
202 struct ath_hal *ah = sc->sc_ah;
203 struct ath_buf *bf; 151 struct ath_buf *bf;
204 struct ath_vap *avp; 152 struct ath_vap *avp;
205 struct sk_buff *skb; 153 struct sk_buff *skb;
206 int cabq_depth;
207 int mcastq_depth;
208 int is_beacon_dtim = 0;
209 unsigned int curlen;
210 struct ath_txq *cabq; 154 struct ath_txq *cabq;
211 struct ath_txq *mcastq;
212 struct ieee80211_tx_info *info; 155 struct ieee80211_tx_info *info;
156 int cabq_depth;
157
213 avp = sc->sc_vaps[if_id]; 158 avp = sc->sc_vaps[if_id];
159 ASSERT(avp);
214 160
215 mcastq = &avp->av_mcastq;
216 cabq = sc->sc_cabq; 161 cabq = sc->sc_cabq;
217 162
218 ASSERT(avp);
219
220 if (avp->av_bcbuf == NULL) { 163 if (avp->av_bcbuf == NULL) {
221 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n", 164 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n",
222 __func__, avp, avp->av_bcbuf); 165 __func__, avp, avp->av_bcbuf);
223 return NULL; 166 return NULL;
224 } 167 }
168
225 bf = avp->av_bcbuf; 169 bf = avp->av_bcbuf;
226 skb = (struct sk_buff *) bf->bf_mpdu; 170 skb = (struct sk_buff *)bf->bf_mpdu;
171 if (skb) {
172 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
173 skb_end_pointer(skb) - skb->head,
174 PCI_DMA_TODEVICE);
175 }
227 176
228 /* 177 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
229 * Update dynamic beacon contents. If this returns 178 bf->bf_mpdu = skb;
230 * non-zero then we need to remap the memory because 179 if (skb == NULL)
231 * the beacon frame changed size (probably because 180 return NULL;
232 * of the TIM bitmap).
233 */
234 curlen = skb->len;
235 181
236 info = IEEE80211_SKB_CB(skb); 182 info = IEEE80211_SKB_CB(skb);
237 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 183 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -239,29 +185,18 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
239 * TODO: make sure the seq# gets assigned properly (vs. other 185 * TODO: make sure the seq# gets assigned properly (vs. other
240 * TX frames) 186 * TX frames)
241 */ 187 */
242 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 188 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
243 sc->seq_no += 0x10; 189 sc->seq_no += 0x10;
244 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 190 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
245 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); 191 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
246 } 192 }
247 193
248 /* XXX: spin_lock_bh should not be used here, but sparse bitches 194 bf->bf_buf_addr = bf->bf_dmacontext =
249 * otherwise. We should fix sparse :) */ 195 pci_map_single(sc->pdev, skb->data,
250 spin_lock_bh(&mcastq->axq_lock); 196 skb_end_pointer(skb) - skb->head,
251 mcastq_depth = avp->av_mcastq.axq_depth; 197 PCI_DMA_TODEVICE);
252 198
253 if (ath_update_beacon(sc, if_id, &avp->av_boff, skb, mcastq_depth) == 199 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
254 1) {
255 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
256 get_dma_mem_context(bf, bf_dmacontext));
257 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE,
258 get_dma_mem_context(bf, bf_dmacontext));
259 } else {
260 pci_dma_sync_single_for_cpu(sc->pdev,
261 bf->bf_buf_addr,
262 skb_tailroom(skb),
263 PCI_DMA_TODEVICE);
264 }
265 200
266 /* 201 /*
267 * if the CABQ traffic from previous DTIM is pending and the current 202 * if the CABQ traffic from previous DTIM is pending and the current
@@ -275,9 +210,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
275 cabq_depth = cabq->axq_depth; 210 cabq_depth = cabq->axq_depth;
276 spin_unlock_bh(&cabq->axq_lock); 211 spin_unlock_bh(&cabq->axq_lock);
277 212
278 is_beacon_dtim = avp->av_boff.bo_tim[4] & 1; 213 if (skb && cabq_depth) {
279
280 if (mcastq_depth && is_beacon_dtim && cabq_depth) {
281 /* 214 /*
282 * Unlock the cabq lock as ath_tx_draintxq acquires 215 * Unlock the cabq lock as ath_tx_draintxq acquires
283 * the lock again which is a common function and that 216 * the lock again which is a common function and that
@@ -297,10 +230,11 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
297 * Enable the CAB queue before the beacon queue to 230 * Enable the CAB queue before the beacon queue to
298 * insure cab frames are triggered by this beacon. 231 * insure cab frames are triggered by this beacon.
299 */ 232 */
300 if (is_beacon_dtim) 233 while (skb) {
301 trigger_mcastq(ah, mcastq, cabq); 234 ath_tx_cabq(sc, skb);
235 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
236 }
302 237
303 spin_unlock_bh(&mcastq->axq_lock);
304 return bf; 238 return bf;
305} 239}
306 240
@@ -308,7 +242,6 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
308 * Startup beacon transmission for adhoc mode when they are sent entirely 242 * Startup beacon transmission for adhoc mode when they are sent entirely
309 * by the hardware using the self-linked descriptor + veol trick. 243 * by the hardware using the self-linked descriptor + veol trick.
310*/ 244*/
311
312static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id) 245static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
313{ 246{
314 struct ath_hal *ah = sc->sc_ah; 247 struct ath_hal *ah = sc->sc_ah;
@@ -345,7 +278,6 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
345 * min/max, and enable aifs). The info structure does not need to be 278 * min/max, and enable aifs). The info structure does not need to be
346 * persistant. 279 * persistant.
347*/ 280*/
348
349int ath_beaconq_setup(struct ath_hal *ah) 281int ath_beaconq_setup(struct ath_hal *ah)
350{ 282{
351 struct ath9k_tx_queue_info qi; 283 struct ath9k_tx_queue_info qi;
@@ -366,29 +298,27 @@ int ath_beaconq_setup(struct ath_hal *ah)
366 * the ATH interface. This routine also calculates the beacon "slot" for 298 * the ATH interface. This routine also calculates the beacon "slot" for
367 * staggared beacons in the mBSSID case. 299 * staggared beacons in the mBSSID case.
368*/ 300*/
369
370int ath_beacon_alloc(struct ath_softc *sc, int if_id) 301int ath_beacon_alloc(struct ath_softc *sc, int if_id)
371{ 302{
372 struct ath_vap *avp; 303 struct ath_vap *avp;
373 struct ieee80211_hdr *wh; 304 struct ieee80211_hdr *hdr;
374 struct ath_buf *bf; 305 struct ath_buf *bf;
375 struct sk_buff *skb; 306 struct sk_buff *skb;
307 __le64 tstamp;
376 308
377 avp = sc->sc_vaps[if_id]; 309 avp = sc->sc_vaps[if_id];
378 ASSERT(avp); 310 ASSERT(avp);
379 311
380 /* Allocate a beacon descriptor if we haven't done so. */ 312 /* Allocate a beacon descriptor if we haven't done so. */
381 if (!avp->av_bcbuf) { 313 if (!avp->av_bcbuf) {
382 /* 314 /* Allocate beacon state for hostap/ibss. We know
383 * Allocate beacon state for hostap/ibss. We know 315 * a buffer is available. */
384 * a buffer is available.
385 */
386 316
387 avp->av_bcbuf = list_first_entry(&sc->sc_bbuf, 317 avp->av_bcbuf = list_first_entry(&sc->sc_bbuf,
388 struct ath_buf, list); 318 struct ath_buf, list);
389 list_del(&avp->av_bcbuf->list); 319 list_del(&avp->av_bcbuf->list);
390 320
391 if (sc->sc_opmode == ATH9K_M_HOSTAP || 321 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP ||
392 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 322 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
393 int slot; 323 int slot;
394 /* 324 /*
@@ -421,17 +351,16 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
421 bf = avp->av_bcbuf; 351 bf = avp->av_bcbuf;
422 if (bf->bf_mpdu != NULL) { 352 if (bf->bf_mpdu != NULL) {
423 skb = (struct sk_buff *)bf->bf_mpdu; 353 skb = (struct sk_buff *)bf->bf_mpdu;
424 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE, 354 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
425 get_dma_mem_context(bf, bf_dmacontext)); 355 skb_end_pointer(skb) - skb->head,
356 PCI_DMA_TODEVICE);
426 dev_kfree_skb_any(skb); 357 dev_kfree_skb_any(skb);
427 bf->bf_mpdu = NULL; 358 bf->bf_mpdu = NULL;
428 } 359 }
429 360
430 /* 361 /*
431 * NB: the beacon data buffer must be 32-bit aligned; 362 * NB: the beacon data buffer must be 32-bit aligned.
432 * we assume the wbuf routines will return us something 363 * FIXME: Fill avp->av_btxctl.txpower and
433 * with this alignment (perhaps should assert).
434 * FIXME: Fill avp->av_boff.bo_tim,avp->av_btxctl.txpower and
435 * avp->av_btxctl.shortPreamble 364 * avp->av_btxctl.shortPreamble
436 */ 365 */
437 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data); 366 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data);
@@ -441,6 +370,9 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
441 return -ENOMEM; 370 return -ENOMEM;
442 } 371 }
443 372
373 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
374 sc->bc_tstamp = le64_to_cpu(tstamp);
375
444 /* 376 /*
445 * Calculate a TSF adjustment factor required for 377 * Calculate a TSF adjustment factor required for
446 * staggered beacons. Note that we assume the format 378 * staggered beacons. Note that we assume the format
@@ -452,9 +384,8 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
452 __le64 val; 384 __le64 val;
453 int intval; 385 int intval;
454 386
455 /* FIXME: Use default value for now: Sujith */ 387 intval = sc->hw->conf.beacon_int ?
456 388 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
457 intval = ATH_DEFAULT_BINTVAL;
458 389
459 /* 390 /*
460 * The beacon interval is in TU's; the TSF in usecs. 391 * The beacon interval is in TU's; the TSF in usecs.
@@ -475,12 +406,14 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
475 __func__, "stagger", 406 __func__, "stagger",
476 avp->av_bslot, intval, (unsigned long long)tsfadjust); 407 avp->av_bslot, intval, (unsigned long long)tsfadjust);
477 408
478 wh = (struct ieee80211_hdr *)skb->data; 409 hdr = (struct ieee80211_hdr *)skb->data;
479 memcpy(&wh[1], &val, sizeof(val)); 410 memcpy(&hdr[1], &val, sizeof(val));
480 } 411 }
481 412
482 bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE, 413 bf->bf_buf_addr = bf->bf_dmacontext =
483 get_dma_mem_context(bf, bf_dmacontext)); 414 pci_map_single(sc->pdev, skb->data,
415 skb_end_pointer(skb) - skb->head,
416 PCI_DMA_TODEVICE);
484 bf->bf_mpdu = skb; 417 bf->bf_mpdu = skb;
485 418
486 return 0; 419 return 0;
@@ -490,9 +423,8 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
490 * Reclaim beacon resources and return buffer to the pool. 423 * Reclaim beacon resources and return buffer to the pool.
491 * 424 *
492 * Checks the VAP to put the beacon frame buffer back to the ATH object 425 * Checks the VAP to put the beacon frame buffer back to the ATH object
493 * queue, and de-allocates any wbuf frames that were sent as CAB traffic. 426 * queue, and de-allocates any skbs that were sent as CAB traffic.
494*/ 427*/
495
496void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp) 428void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
497{ 429{
498 if (avp->av_bcbuf != NULL) { 430 if (avp->av_bcbuf != NULL) {
@@ -506,8 +438,9 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
506 bf = avp->av_bcbuf; 438 bf = avp->av_bcbuf;
507 if (bf->bf_mpdu != NULL) { 439 if (bf->bf_mpdu != NULL) {
508 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; 440 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
509 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE, 441 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
510 get_dma_mem_context(bf, bf_dmacontext)); 442 skb_end_pointer(skb) - skb->head,
443 PCI_DMA_TODEVICE);
511 dev_kfree_skb_any(skb); 444 dev_kfree_skb_any(skb);
512 bf->bf_mpdu = NULL; 445 bf->bf_mpdu = NULL;
513 } 446 }
@@ -518,44 +451,14 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
518} 451}
519 452
520/* 453/*
521 * Reclaim beacon resources and return buffer to the pool.
522 *
523 * This function will free any wbuf frames that are still attached to the
524 * beacon buffers in the ATH object. Note that this does not de-allocate
525 * any wbuf objects that are in the transmit queue and have not yet returned
526 * to the ATH object.
527*/
528
529void ath_beacon_free(struct ath_softc *sc)
530{
531 struct ath_buf *bf;
532
533 list_for_each_entry(bf, &sc->sc_bbuf, list) {
534 if (bf->bf_mpdu != NULL) {
535 struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu;
536 ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE,
537 get_dma_mem_context(bf, bf_dmacontext));
538 dev_kfree_skb_any(skb);
539 bf->bf_mpdu = NULL;
540 }
541 }
542}
543
544/*
545 * Tasklet for Sending Beacons 454 * Tasklet for Sending Beacons
546 * 455 *
547 * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame 456 * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame
548 * contents are done as needed and the slot time is also adjusted based on 457 * contents are done as needed and the slot time is also adjusted based on
549 * current state. 458 * current state.
550 *
551 * This tasklet is not scheduled, it's called in ISR context.
552*/ 459*/
553
554void ath9k_beacon_tasklet(unsigned long data) 460void ath9k_beacon_tasklet(unsigned long data)
555{ 461{
556#define TSF_TO_TU(_h,_l) \
557 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
558
559 struct ath_softc *sc = (struct ath_softc *)data; 462 struct ath_softc *sc = (struct ath_softc *)data;
560 struct ath_hal *ah = sc->sc_ah; 463 struct ath_hal *ah = sc->sc_ah;
561 struct ath_buf *bf = NULL; 464 struct ath_buf *bf = NULL;
@@ -568,7 +471,7 @@ void ath9k_beacon_tasklet(unsigned long data)
568 u32 tsftu; 471 u32 tsftu;
569 u16 intval; 472 u16 intval;
570 473
571 if (sc->sc_noreset) { 474 if (sc->sc_flags & SC_OP_NO_RESET) {
572 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah, 475 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
573 &rx_clear, 476 &rx_clear,
574 &rx_frame, 477 &rx_frame,
@@ -581,6 +484,8 @@ void ath9k_beacon_tasklet(unsigned long data)
581 * and wait for the next. Missed beacons indicate 484 * and wait for the next. Missed beacons indicate
582 * a problem and should not occur. If we miss too 485 * a problem and should not occur. If we miss too
583 * many consecutive beacons reset the device. 486 * many consecutive beacons reset the device.
487 *
488 * FIXME: Clean up this mess !!
584 */ 489 */
585 if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) { 490 if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) {
586 sc->sc_bmisscount++; 491 sc->sc_bmisscount++;
@@ -590,25 +495,22 @@ void ath9k_beacon_tasklet(unsigned long data)
590 * (in that layer). 495 * (in that layer).
591 */ 496 */
592 if (sc->sc_bmisscount < BSTUCK_THRESH) { 497 if (sc->sc_bmisscount < BSTUCK_THRESH) {
593 if (sc->sc_noreset) { 498 if (sc->sc_flags & SC_OP_NO_RESET) {
594 DPRINTF(sc, ATH_DBG_BEACON, 499 DPRINTF(sc, ATH_DBG_BEACON,
595 "%s: missed %u consecutive beacons\n", 500 "%s: missed %u consecutive beacons\n",
596 __func__, sc->sc_bmisscount); 501 __func__, sc->sc_bmisscount);
597 if (show_cycles) { 502 if (show_cycles) {
598 /* 503 /*
599 * Display cycle counter stats 504 * Display cycle counter stats from HW
600 * from HW to aide in debug of 505 * to aide in debug of stickiness.
601 * stickiness.
602 */ 506 */
603 DPRINTF(sc, 507 DPRINTF(sc, ATH_DBG_BEACON,
604 ATH_DBG_BEACON,
605 "%s: busy times: rx_clear=%d, " 508 "%s: busy times: rx_clear=%d, "
606 "rx_frame=%d, tx_frame=%d\n", 509 "rx_frame=%d, tx_frame=%d\n",
607 __func__, rx_clear, rx_frame, 510 __func__, rx_clear, rx_frame,
608 tx_frame); 511 tx_frame);
609 } else { 512 } else {
610 DPRINTF(sc, 513 DPRINTF(sc, ATH_DBG_BEACON,
611 ATH_DBG_BEACON,
612 "%s: unable to obtain " 514 "%s: unable to obtain "
613 "busy times\n", __func__); 515 "busy times\n", __func__);
614 } 516 }
@@ -618,10 +520,9 @@ void ath9k_beacon_tasklet(unsigned long data)
618 __func__, sc->sc_bmisscount); 520 __func__, sc->sc_bmisscount);
619 } 521 }
620 } else if (sc->sc_bmisscount >= BSTUCK_THRESH) { 522 } else if (sc->sc_bmisscount >= BSTUCK_THRESH) {
621 if (sc->sc_noreset) { 523 if (sc->sc_flags & SC_OP_NO_RESET) {
622 if (sc->sc_bmisscount == BSTUCK_THRESH) { 524 if (sc->sc_bmisscount == BSTUCK_THRESH) {
623 DPRINTF(sc, 525 DPRINTF(sc, ATH_DBG_BEACON,
624 ATH_DBG_BEACON,
625 "%s: beacon is officially " 526 "%s: beacon is officially "
626 "stuck\n", __func__); 527 "stuck\n", __func__);
627 ath9k_hw_dmaRegDump(ah); 528 ath9k_hw_dmaRegDump(ah);
@@ -633,13 +534,12 @@ void ath9k_beacon_tasklet(unsigned long data)
633 ath_bstuck_process(sc); 534 ath_bstuck_process(sc);
634 } 535 }
635 } 536 }
636
637 return; 537 return;
638 } 538 }
539
639 if (sc->sc_bmisscount != 0) { 540 if (sc->sc_bmisscount != 0) {
640 if (sc->sc_noreset) { 541 if (sc->sc_flags & SC_OP_NO_RESET) {
641 DPRINTF(sc, 542 DPRINTF(sc, ATH_DBG_BEACON,
642 ATH_DBG_BEACON,
643 "%s: resume beacon xmit after %u misses\n", 543 "%s: resume beacon xmit after %u misses\n",
644 __func__, sc->sc_bmisscount); 544 __func__, sc->sc_bmisscount);
645 } else { 545 } else {
@@ -656,17 +556,19 @@ void ath9k_beacon_tasklet(unsigned long data)
656 * on the tsf to safeguard against missing an swba. 556 * on the tsf to safeguard against missing an swba.
657 */ 557 */
658 558
659 /* FIXME: Use default value for now - Sujith */ 559 intval = sc->hw->conf.beacon_int ?
660 intval = ATH_DEFAULT_BINTVAL; 560 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
661 561
662 tsf = ath9k_hw_gettsf64(ah); 562 tsf = ath9k_hw_gettsf64(ah);
663 tsftu = TSF_TO_TU(tsf>>32, tsf); 563 tsftu = TSF_TO_TU(tsf>>32, tsf);
664 slot = ((tsftu % intval) * ATH_BCBUF) / intval; 564 slot = ((tsftu % intval) * ATH_BCBUF) / intval;
665 if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF]; 565 if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF];
566
666 DPRINTF(sc, ATH_DBG_BEACON, 567 DPRINTF(sc, ATH_DBG_BEACON,
667 "%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n", 568 "%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n",
668 __func__, slot, (unsigned long long) tsf, tsftu, 569 __func__, slot, (unsigned long long)tsf, tsftu,
669 intval, if_id); 570 intval, if_id);
571
670 bfaddr = 0; 572 bfaddr = 0;
671 if (if_id != ATH_IF_ID_ANY) { 573 if (if_id != ATH_IF_ID_ANY) {
672 bf = ath_beacon_generate(sc, if_id); 574 bf = ath_beacon_generate(sc, if_id);
@@ -717,22 +619,20 @@ void ath9k_beacon_tasklet(unsigned long data)
717 619
718 sc->ast_be_xmit += bc; /* XXX per-vap? */ 620 sc->ast_be_xmit += bc; /* XXX per-vap? */
719 } 621 }
720#undef TSF_TO_TU
721} 622}
722 623
723/* 624/*
724 * Tasklet for Beacon Stuck processing 625 * Tasklet for Beacon Stuck processing
725 * 626 *
726 * Processing for Beacon Stuck. 627 * Processing for Beacon Stuck.
727 * Basically calls the ath_internal_reset function to reset the chip. 628 * Basically resets the chip.
728*/ 629*/
729
730void ath_bstuck_process(struct ath_softc *sc) 630void ath_bstuck_process(struct ath_softc *sc)
731{ 631{
732 DPRINTF(sc, ATH_DBG_BEACON, 632 DPRINTF(sc, ATH_DBG_BEACON,
733 "%s: stuck beacon; resetting (bmiss count %u)\n", 633 "%s: stuck beacon; resetting (bmiss count %u)\n",
734 __func__, sc->sc_bmisscount); 634 __func__, sc->sc_bmisscount);
735 ath_internal_reset(sc); 635 ath_reset(sc, false);
736} 636}
737 637
738/* 638/*
@@ -750,40 +650,32 @@ void ath_bstuck_process(struct ath_softc *sc)
750 * interrupt when we stop seeing beacons from the AP 650 * interrupt when we stop seeing beacons from the AP
751 * we've associated with. 651 * we've associated with.
752 */ 652 */
753
754void ath_beacon_config(struct ath_softc *sc, int if_id) 653void ath_beacon_config(struct ath_softc *sc, int if_id)
755{ 654{
756#define TSF_TO_TU(_h,_l) \
757 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
758 struct ath_hal *ah = sc->sc_ah; 655 struct ath_hal *ah = sc->sc_ah;
759 u32 nexttbtt, intval;
760 struct ath_beacon_config conf; 656 struct ath_beacon_config conf;
761 enum ath9k_opmode av_opmode; 657 enum ath9k_opmode av_opmode;
658 u32 nexttbtt, intval;
762 659
763 if (if_id != ATH_IF_ID_ANY) 660 if (if_id != ATH_IF_ID_ANY)
764 av_opmode = sc->sc_vaps[if_id]->av_opmode; 661 av_opmode = sc->sc_vaps[if_id]->av_opmode;
765 else 662 else
766 av_opmode = sc->sc_opmode; 663 av_opmode = sc->sc_ah->ah_opmode;
767 664
768 memzero(&conf, sizeof(struct ath_beacon_config)); 665 memzero(&conf, sizeof(struct ath_beacon_config));
769 666
770 /* FIXME: Use default values for now - Sujith */ 667 conf.beacon_interval = sc->hw->conf.beacon_int ?
771 /* Query beacon configuration first */ 668 sc->hw->conf.beacon_int : ATH_DEFAULT_BINTVAL;
772 /*
773 * Protocol stack doesn't support dynamic beacon configuration,
774 * use default configurations.
775 */
776 conf.beacon_interval = ATH_DEFAULT_BINTVAL;
777 conf.listen_interval = 1; 669 conf.listen_interval = 1;
778 conf.dtim_period = conf.beacon_interval; 670 conf.dtim_period = conf.beacon_interval;
779 conf.dtim_count = 1; 671 conf.dtim_count = 1;
780 conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval; 672 conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval;
781 673
782 /* extract tstamp from last beacon and convert to TU */ 674 /* extract tstamp from last beacon and convert to TU */
783 nexttbtt = TSF_TO_TU(get_unaligned_le32(conf.u.last_tstamp + 4), 675 nexttbtt = TSF_TO_TU(sc->bc_tstamp >> 32, sc->bc_tstamp);
784 get_unaligned_le32(conf.u.last_tstamp)); 676
785 /* XXX conditionalize multi-bss support? */ 677 /* XXX conditionalize multi-bss support? */
786 if (sc->sc_opmode == ATH9K_M_HOSTAP) { 678 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) {
787 /* 679 /*
788 * For multi-bss ap support beacons are either staggered 680 * For multi-bss ap support beacons are either staggered
789 * evenly over N slots or burst together. For the former 681 * evenly over N slots or burst together. For the former
@@ -797,14 +689,16 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
797 intval = conf.beacon_interval & ATH9K_BEACON_PERIOD; 689 intval = conf.beacon_interval & ATH9K_BEACON_PERIOD;
798 } 690 }
799 691
800 if (nexttbtt == 0) /* e.g. for ap mode */ 692 if (nexttbtt == 0) /* e.g. for ap mode */
801 nexttbtt = intval; 693 nexttbtt = intval;
802 else if (intval) /* NB: can be 0 for monitor mode */ 694 else if (intval) /* NB: can be 0 for monitor mode */
803 nexttbtt = roundup(nexttbtt, intval); 695 nexttbtt = roundup(nexttbtt, intval);
696
804 DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 697 DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
805 __func__, nexttbtt, intval, conf.beacon_interval); 698 __func__, nexttbtt, intval, conf.beacon_interval);
699
806 /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */ 700 /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */
807 if (sc->sc_opmode == ATH9K_M_STA) { 701 if (sc->sc_ah->ah_opmode == ATH9K_M_STA) {
808 struct ath9k_beacon_state bs; 702 struct ath9k_beacon_state bs;
809 u64 tsf; 703 u64 tsf;
810 u32 tsftu; 704 u32 tsftu;
@@ -816,19 +710,19 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
816 * last beacon we received (which may be none). 710 * last beacon we received (which may be none).
817 */ 711 */
818 dtimperiod = conf.dtim_period; 712 dtimperiod = conf.dtim_period;
819 if (dtimperiod <= 0) /* NB: 0 if not known */ 713 if (dtimperiod <= 0) /* NB: 0 if not known */
820 dtimperiod = 1; 714 dtimperiod = 1;
821 dtimcount = conf.dtim_count; 715 dtimcount = conf.dtim_count;
822 if (dtimcount >= dtimperiod) /* NB: sanity check */ 716 if (dtimcount >= dtimperiod) /* NB: sanity check */
823 dtimcount = 0; /* XXX? */ 717 dtimcount = 0;
824 cfpperiod = 1; /* NB: no PCF support yet */ 718 cfpperiod = 1; /* NB: no PCF support yet */
825 cfpcount = 0; 719 cfpcount = 0;
826 720
827 sleepduration = conf.listen_interval * intval; 721 sleepduration = conf.listen_interval * intval;
828 if (sleepduration <= 0) 722 if (sleepduration <= 0)
829 sleepduration = intval; 723 sleepduration = intval;
830 724
831#define FUDGE 2 725#define FUDGE 2
832 /* 726 /*
833 * Pull nexttbtt forward to reflect the current 727 * Pull nexttbtt forward to reflect the current
834 * TSF and calculate dtim+cfp state for the result. 728 * TSF and calculate dtim+cfp state for the result.
@@ -852,6 +746,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
852 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; 746 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
853 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; 747 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
854 bs.bs_cfpmaxduration = 0; 748 bs.bs_cfpmaxduration = 0;
749
855 /* 750 /*
856 * Calculate the number of consecutive beacons to miss 751 * Calculate the number of consecutive beacons to miss
857 * before taking a BMISS interrupt. The configuration 752 * before taking a BMISS interrupt. The configuration
@@ -860,9 +755,8 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
860 * result to at most 15 beacons. 755 * result to at most 15 beacons.
861 */ 756 */
862 if (sleepduration > intval) { 757 if (sleepduration > intval) {
863 bs.bs_bmissthreshold = 758 bs.bs_bmissthreshold = conf.listen_interval *
864 conf.listen_interval * 759 ATH_DEFAULT_BMISS_LIMIT / 2;
865 ATH_DEFAULT_BMISS_LIMIT / 2;
866 } else { 760 } else {
867 bs.bs_bmissthreshold = 761 bs.bs_bmissthreshold =
868 DIV_ROUND_UP(conf.bmiss_timeout, intval); 762 DIV_ROUND_UP(conf.bmiss_timeout, intval);
@@ -882,8 +776,8 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
882 * XXX fixed at 100ms 776 * XXX fixed at 100ms
883 */ 777 */
884 778
885 bs.bs_sleepduration = 779 bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100),
886 roundup(IEEE80211_MS_TO_TU(100), sleepduration); 780 sleepduration);
887 if (bs.bs_sleepduration > bs.bs_dtimperiod) 781 if (bs.bs_sleepduration > bs.bs_dtimperiod)
888 bs.bs_sleepduration = bs.bs_dtimperiod; 782 bs.bs_sleepduration = bs.bs_dtimperiod;
889 783
@@ -899,19 +793,19 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
899 "cfp:period %u " 793 "cfp:period %u "
900 "maxdur %u " 794 "maxdur %u "
901 "next %u " 795 "next %u "
902 "timoffset %u\n" 796 "timoffset %u\n",
903 , __func__ 797 __func__,
904 , (unsigned long long)tsf, tsftu 798 (unsigned long long)tsf, tsftu,
905 , bs.bs_intval 799 bs.bs_intval,
906 , bs.bs_nexttbtt 800 bs.bs_nexttbtt,
907 , bs.bs_dtimperiod 801 bs.bs_dtimperiod,
908 , bs.bs_nextdtim 802 bs.bs_nextdtim,
909 , bs.bs_bmissthreshold 803 bs.bs_bmissthreshold,
910 , bs.bs_sleepduration 804 bs.bs_sleepduration,
911 , bs.bs_cfpperiod 805 bs.bs_cfpperiod,
912 , bs.bs_cfpmaxduration 806 bs.bs_cfpmaxduration,
913 , bs.bs_cfpnext 807 bs.bs_cfpnext,
914 , bs.bs_timoffset 808 bs.bs_timoffset
915 ); 809 );
916 810
917 ath9k_hw_set_interrupts(ah, 0); 811 ath9k_hw_set_interrupts(ah, 0);
@@ -924,12 +818,12 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
924 ath9k_hw_set_interrupts(ah, 0); 818 ath9k_hw_set_interrupts(ah, 0);
925 if (nexttbtt == intval) 819 if (nexttbtt == intval)
926 intval |= ATH9K_BEACON_RESET_TSF; 820 intval |= ATH9K_BEACON_RESET_TSF;
927 if (sc->sc_opmode == ATH9K_M_IBSS) { 821 if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS) {
928 /* 822 /*
929 * Pull nexttbtt forward to reflect the current 823 * Pull nexttbtt forward to reflect the current
930 * TSF . 824 * TSF
931 */ 825 */
932#define FUDGE 2 826#define FUDGE 2
933 if (!(intval & ATH9K_BEACON_RESET_TSF)) { 827 if (!(intval & ATH9K_BEACON_RESET_TSF)) {
934 tsf = ath9k_hw_gettsf64(ah); 828 tsf = ath9k_hw_gettsf64(ah);
935 tsftu = TSF_TO_TU((u32)(tsf>>32), 829 tsftu = TSF_TO_TU((u32)(tsf>>32),
@@ -956,7 +850,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
956 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) 850 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
957 sc->sc_imask |= ATH9K_INT_SWBA; 851 sc->sc_imask |= ATH9K_INT_SWBA;
958 ath_beaconq_config(sc); 852 ath_beaconq_config(sc);
959 } else if (sc->sc_opmode == ATH9K_M_HOSTAP) { 853 } else if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) {
960 /* 854 /*
961 * In AP mode we enable the beacon timers and 855 * In AP mode we enable the beacon timers and
962 * SWBA interrupts to prepare beacon frames. 856 * SWBA interrupts to prepare beacon frames.
@@ -972,11 +866,10 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
972 * When using a self-linked beacon descriptor in 866 * When using a self-linked beacon descriptor in
973 * ibss mode load it once here. 867 * ibss mode load it once here.
974 */ 868 */
975 if (sc->sc_opmode == ATH9K_M_IBSS && 869 if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS &&
976 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) 870 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
977 ath_beacon_start_adhoc(sc, 0); 871 ath_beacon_start_adhoc(sc, 0);
978 } 872 }
979#undef TSF_TO_TU
980} 873}
981 874
982/* Function to collect beacon rssi data and resync beacon if necessary */ 875/* Function to collect beacon rssi data and resync beacon if necessary */
@@ -988,5 +881,5 @@ void ath_beacon_sync(struct ath_softc *sc, int if_id)
988 * beacon frame we just received. 881 * beacon frame we just received.
989 */ 882 */
990 ath_beacon_config(sc, if_id); 883 ath_beacon_config(sc, if_id);
991 sc->sc_beacons = 1; 884 sc->sc_flags |= SC_OP_BEACONS;
992} 885}
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
index 87e37bc39145..6c433a4d003e 100644
--- a/drivers/net/wireless/ath9k/core.c
+++ b/drivers/net/wireless/ath9k/core.c
@@ -21,9 +21,6 @@
21 21
22static int ath_outdoor; /* enable outdoor use */ 22static int ath_outdoor; /* enable outdoor use */
23 23
24static const u8 ath_bcast_mac[ETH_ALEN] =
25 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
26
27static u32 ath_chainmask_sel_up_rssi_thres = 24static u32 ath_chainmask_sel_up_rssi_thres =
28 ATH_CHAINMASK_SEL_UP_RSSI_THRES; 25 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
29static u32 ath_chainmask_sel_down_rssi_thres = 26static u32 ath_chainmask_sel_down_rssi_thres =
@@ -54,10 +51,8 @@ static void bus_read_cachesize(struct ath_softc *sc, int *csz)
54 * Set current operating mode 51 * Set current operating mode
55 * 52 *
56 * This function initializes and fills the rate table in the ATH object based 53 * This function initializes and fills the rate table in the ATH object based
57 * on the operating mode. The blink rates are also set up here, although 54 * on the operating mode.
58 * they have been superceeded by the ath_led module.
59*/ 55*/
60
61static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode) 56static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
62{ 57{
63 const struct ath9k_rate_table *rt; 58 const struct ath9k_rate_table *rt;
@@ -235,7 +230,7 @@ static int ath_setup_channels(struct ath_softc *sc)
235 * Determine mode from channel flags 230 * Determine mode from channel flags
236 * 231 *
237 * This routine will provide the enumerated WIRELESSS_MODE value based 232 * This routine will provide the enumerated WIRELESSS_MODE value based
238 * on the settings of the channel flags. If ho valid set of flags 233 * on the settings of the channel flags. If no valid set of flags
239 * exist, the lowest mode (11b) is selected. 234 * exist, the lowest mode (11b) is selected.
240*/ 235*/
241 236
@@ -260,7 +255,8 @@ static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
260 else if (chan->chanmode == CHANNEL_G_HT40MINUS) 255 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
261 return ATH9K_MODE_11NG_HT40MINUS; 256 return ATH9K_MODE_11NG_HT40MINUS;
262 257
263 /* NB: should not get here */ 258 WARN_ON(1); /* should not get here */
259
264 return ATH9K_MODE_11B; 260 return ATH9K_MODE_11B;
265} 261}
266 262
@@ -275,14 +271,12 @@ static int ath_stop(struct ath_softc *sc)
275{ 271{
276 struct ath_hal *ah = sc->sc_ah; 272 struct ath_hal *ah = sc->sc_ah;
277 273
278 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n", 274 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
279 __func__, sc->sc_invalid); 275 __func__, sc->sc_flags & SC_OP_INVALID);
280 276
281 /* 277 /*
282 * Shutdown the hardware and driver: 278 * Shutdown the hardware and driver:
283 * stop output from above 279 * stop output from above
284 * reset 802.11 state machine
285 * (sends station deassoc/deauth frames)
286 * turn off timers 280 * turn off timers
287 * disable interrupts 281 * disable interrupts
288 * clear transmit machinery 282 * clear transmit machinery
@@ -294,8 +288,10 @@ static int ath_stop(struct ath_softc *sc)
294 * hardware is gone (invalid). 288 * hardware is gone (invalid).
295 */ 289 */
296 290
291 if (!(sc->sc_flags & SC_OP_INVALID))
292 ath9k_hw_set_interrupts(ah, 0);
297 ath_draintxq(sc, false); 293 ath_draintxq(sc, false);
298 if (!sc->sc_invalid) { 294 if (!(sc->sc_flags & SC_OP_INVALID)) {
299 ath_stoprecv(sc); 295 ath_stoprecv(sc);
300 ath9k_hw_phy_disable(ah); 296 ath9k_hw_phy_disable(ah);
301 } else 297 } else
@@ -305,56 +301,6 @@ static int ath_stop(struct ath_softc *sc)
305} 301}
306 302
307/* 303/*
308 * Start Scan
309 *
310 * This function is called when starting a channel scan. It will perform
311 * power save wakeup processing, set the filter for the scan, and get the
312 * chip ready to send broadcast packets out during the scan.
313*/
314
315void ath_scan_start(struct ath_softc *sc)
316{
317 struct ath_hal *ah = sc->sc_ah;
318 u32 rfilt;
319 u32 now = (u32) jiffies_to_msecs(get_timestamp());
320
321 sc->sc_scanning = 1;
322 rfilt = ath_calcrxfilter(sc);
323 ath9k_hw_setrxfilter(ah, rfilt);
324 ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
325
326 /* Restore previous power management state. */
327
328 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
329 now / 1000, now % 1000, __func__, rfilt);
330}
331
332/*
333 * Scan End
334 *
335 * This routine is called by the upper layer when the scan is completed. This
336 * will set the filters back to normal operating mode, set the BSSID to the
337 * correct value, and restore the power save state.
338*/
339
340void ath_scan_end(struct ath_softc *sc)
341{
342 struct ath_hal *ah = sc->sc_ah;
343 u32 rfilt;
344 u32 now = (u32) jiffies_to_msecs(get_timestamp());
345
346 sc->sc_scanning = 0;
347 /* Request for a full reset due to rx packet filter changes */
348 sc->sc_full_reset = 1;
349 rfilt = ath_calcrxfilter(sc);
350 ath9k_hw_setrxfilter(ah, rfilt);
351 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
352
353 DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
354 now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
355}
356
357/*
358 * Set the current channel 304 * Set the current channel
359 * 305 *
360 * Set/change channels. If the channel is really being changed, it's done 306 * Set/change channels. If the channel is really being changed, it's done
@@ -365,25 +311,23 @@ int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
365{ 311{
366 struct ath_hal *ah = sc->sc_ah; 312 struct ath_hal *ah = sc->sc_ah;
367 bool fastcc = true, stopped; 313 bool fastcc = true, stopped;
368 enum ath9k_ht_macmode ht_macmode;
369 314
370 if (sc->sc_invalid) /* if the device is invalid or removed */ 315 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
371 return -EIO; 316 return -EIO;
372 317
373 DPRINTF(sc, ATH_DBG_CONFIG, 318 DPRINTF(sc, ATH_DBG_CONFIG,
374 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n", 319 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
375 __func__, 320 __func__,
376 ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel, 321 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
377 sc->sc_curchan.channelFlags), 322 sc->sc_ah->ah_curchan->channelFlags),
378 sc->sc_curchan.channel, 323 sc->sc_ah->ah_curchan->channel,
379 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags), 324 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
380 hchan->channel, hchan->channelFlags); 325 hchan->channel, hchan->channelFlags);
381 326
382 ht_macmode = ath_cwm_macmode(sc); 327 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
383 328 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
384 if (hchan->channel != sc->sc_curchan.channel || 329 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
385 hchan->channelFlags != sc->sc_curchan.channelFlags || 330 (sc->sc_flags & SC_OP_FULL_RESET)) {
386 sc->sc_update_chainmask || sc->sc_full_reset) {
387 int status; 331 int status;
388 /* 332 /*
389 * This is only performed if the channel settings have 333 * This is only performed if the channel settings have
@@ -402,15 +346,16 @@ int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
402 * to flush data frames already in queue because of 346 * to flush data frames already in queue because of
403 * changing channel. */ 347 * changing channel. */
404 348
405 if (!stopped || sc->sc_full_reset) 349 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
406 fastcc = false; 350 fastcc = false;
407 351
408 spin_lock_bh(&sc->sc_resetlock); 352 spin_lock_bh(&sc->sc_resetlock);
409 if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan, 353 if (!ath9k_hw_reset(ah, hchan,
410 ht_macmode, sc->sc_tx_chainmask, 354 sc->sc_ht_info.tx_chan_width,
411 sc->sc_rx_chainmask, 355 sc->sc_tx_chainmask,
412 sc->sc_ht_extprotspacing, 356 sc->sc_rx_chainmask,
413 fastcc, &status)) { 357 sc->sc_ht_extprotspacing,
358 fastcc, &status)) {
414 DPRINTF(sc, ATH_DBG_FATAL, 359 DPRINTF(sc, ATH_DBG_FATAL,
415 "%s: unable to reset channel %u (%uMhz) " 360 "%s: unable to reset channel %u (%uMhz) "
416 "flags 0x%x hal status %u\n", __func__, 361 "flags 0x%x hal status %u\n", __func__,
@@ -422,9 +367,8 @@ int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
422 } 367 }
423 spin_unlock_bh(&sc->sc_resetlock); 368 spin_unlock_bh(&sc->sc_resetlock);
424 369
425 sc->sc_curchan = *hchan; 370 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
426 sc->sc_update_chainmask = 0; 371 sc->sc_flags &= ~SC_OP_FULL_RESET;
427 sc->sc_full_reset = 0;
428 372
429 /* Re-enable rx framework */ 373 /* Re-enable rx framework */
430 if (ath_startrecv(sc) != 0) { 374 if (ath_startrecv(sc) != 0) {
@@ -535,7 +479,7 @@ int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
535 479
536void ath_update_chainmask(struct ath_softc *sc, int is_ht) 480void ath_update_chainmask(struct ath_softc *sc, int is_ht)
537{ 481{
538 sc->sc_update_chainmask = 1; 482 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
539 if (is_ht) { 483 if (is_ht) {
540 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask; 484 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
541 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask; 485 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
@@ -552,62 +496,6 @@ void ath_update_chainmask(struct ath_softc *sc, int is_ht)
552/* VAP management */ 496/* VAP management */
553/******************/ 497/******************/
554 498
555/*
556 * VAP in Listen mode
557 *
558 * This routine brings the VAP out of the down state into a "listen" state
559 * where it waits for association requests. This is used in AP and AdHoc
560 * modes.
561*/
562
563int ath_vap_listen(struct ath_softc *sc, int if_id)
564{
565 struct ath_hal *ah = sc->sc_ah;
566 struct ath_vap *avp;
567 u32 rfilt = 0;
568 DECLARE_MAC_BUF(mac);
569
570 avp = sc->sc_vaps[if_id];
571 if (avp == NULL) {
572 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
573 __func__, if_id);
574 return -EINVAL;
575 }
576
577#ifdef CONFIG_SLOW_ANT_DIV
578 ath_slow_ant_div_stop(&sc->sc_antdiv);
579#endif
580
581 /* update ratectrl about the new state */
582 ath_rate_newstate(sc, avp);
583
584 rfilt = ath_calcrxfilter(sc);
585 ath9k_hw_setrxfilter(ah, rfilt);
586
587 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
588 memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
589 ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
590 } else
591 sc->sc_curaid = 0;
592
593 DPRINTF(sc, ATH_DBG_CONFIG,
594 "%s: RX filter 0x%x bssid %s aid 0x%x\n",
595 __func__, rfilt, print_mac(mac,
596 sc->sc_curbssid), sc->sc_curaid);
597
598 /*
599 * XXXX
600 * Disable BMISS interrupt when we're not associated
601 */
602 ath9k_hw_set_interrupts(ah,
603 sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
604 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
605 /* need to reconfigure the beacons when it moves to RUN */
606 sc->sc_beacons = 0;
607
608 return 0;
609}
610
611int ath_vap_attach(struct ath_softc *sc, 499int ath_vap_attach(struct ath_softc *sc,
612 int if_id, 500 int if_id,
613 struct ieee80211_vif *if_data, 501 struct ieee80211_vif *if_data,
@@ -645,16 +533,14 @@ int ath_vap_attach(struct ath_softc *sc,
645 /* Set the VAP opmode */ 533 /* Set the VAP opmode */
646 avp->av_opmode = opmode; 534 avp->av_opmode = opmode;
647 avp->av_bslot = -1; 535 avp->av_bslot = -1;
648 INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
649 INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
650 spin_lock_init(&avp->av_mcastq.axq_lock);
651 536
652 ath9k_hw_set_tsfadjust(sc->sc_ah, 1); 537 if (opmode == ATH9K_M_HOSTAP)
538 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
653 539
654 sc->sc_vaps[if_id] = avp; 540 sc->sc_vaps[if_id] = avp;
655 sc->sc_nvaps++; 541 sc->sc_nvaps++;
656 /* Set the device opmode */ 542 /* Set the device opmode */
657 sc->sc_opmode = opmode; 543 sc->sc_ah->ah_opmode = opmode;
658 544
659 /* default VAP configuration */ 545 /* default VAP configuration */
660 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE; 546 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
@@ -687,9 +573,6 @@ int ath_vap_detach(struct ath_softc *sc, int if_id)
687 ath_stoprecv(sc); /* stop recv side */ 573 ath_stoprecv(sc); /* stop recv side */
688 ath_flushrecv(sc); /* flush recv queue */ 574 ath_flushrecv(sc); /* flush recv queue */
689 575
690 /* Reclaim any pending mcast bufs on the vap. */
691 ath_tx_draintxq(sc, &avp->av_mcastq, false);
692
693 kfree(avp); 576 kfree(avp);
694 sc->sc_vaps[if_id] = NULL; 577 sc->sc_vaps[if_id] = NULL;
695 sc->sc_nvaps--; 578 sc->sc_nvaps--;
@@ -726,9 +609,9 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
726 struct ath_hal *ah = sc->sc_ah; 609 struct ath_hal *ah = sc->sc_ah;
727 int status; 610 int status;
728 int error = 0; 611 int error = 0;
729 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
730 612
731 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode); 613 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
614 __func__, sc->sc_ah->ah_opmode);
732 615
733 /* 616 /*
734 * Stop anything previously setup. This is safe 617 * Stop anything previously setup. This is safe
@@ -750,16 +633,16 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
750 * be followed by initialization of the appropriate bits 633 * be followed by initialization of the appropriate bits
751 * and then setup of the interrupt mask. 634 * and then setup of the interrupt mask.
752 */ 635 */
753 sc->sc_curchan = *initial_chan;
754 636
755 spin_lock_bh(&sc->sc_resetlock); 637 spin_lock_bh(&sc->sc_resetlock);
756 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode, 638 if (!ath9k_hw_reset(ah, initial_chan,
757 sc->sc_tx_chainmask, sc->sc_rx_chainmask, 639 sc->sc_ht_info.tx_chan_width,
758 sc->sc_ht_extprotspacing, false, &status)) { 640 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
641 sc->sc_ht_extprotspacing, false, &status)) {
759 DPRINTF(sc, ATH_DBG_FATAL, 642 DPRINTF(sc, ATH_DBG_FATAL,
760 "%s: unable to reset hardware; hal status %u " 643 "%s: unable to reset hardware; hal status %u "
761 "(freq %u flags 0x%x)\n", __func__, status, 644 "(freq %u flags 0x%x)\n", __func__, status,
762 sc->sc_curchan.channel, sc->sc_curchan.channelFlags); 645 initial_chan->channel, initial_chan->channelFlags);
763 error = -EIO; 646 error = -EIO;
764 spin_unlock_bh(&sc->sc_resetlock); 647 spin_unlock_bh(&sc->sc_resetlock);
765 goto done; 648 goto done;
@@ -806,7 +689,8 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
806 * Note we only do this (at the moment) for station mode. 689 * Note we only do this (at the moment) for station mode.
807 */ 690 */
808 if (ath9k_hw_phycounters(ah) && 691 if (ath9k_hw_phycounters(ah) &&
809 ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS))) 692 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
693 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
810 sc->sc_imask |= ATH9K_INT_MIB; 694 sc->sc_imask |= ATH9K_INT_MIB;
811#endif 695#endif
812 /* 696 /*
@@ -816,7 +700,7 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
816 * enable the TIM interrupt when operating as station. 700 * enable the TIM interrupt when operating as station.
817 */ 701 */
818 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) && 702 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
819 (sc->sc_opmode == ATH9K_M_STA) && 703 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
820 !sc->sc_config.swBeaconProcess) 704 !sc->sc_config.swBeaconProcess)
821 sc->sc_imask |= ATH9K_INT_TIM; 705 sc->sc_imask |= ATH9K_INT_TIM;
822 /* 706 /*
@@ -828,34 +712,34 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
828 712
829 /* XXX: we must make sure h/w is ready and clear invalid flag 713 /* XXX: we must make sure h/w is ready and clear invalid flag
830 * before turning on interrupt. */ 714 * before turning on interrupt. */
831 sc->sc_invalid = 0; 715 sc->sc_flags &= ~SC_OP_INVALID;
832done: 716done:
833 return error; 717 return error;
834} 718}
835 719
836/* 720int ath_reset(struct ath_softc *sc, bool retry_tx)
837 * Reset the hardware w/o losing operational state. This is
838 * basically a more efficient way of doing ath_stop, ath_init,
839 * followed by state transitions to the current 802.11
840 * operational state. Used to recover from errors rx overrun
841 * and to reset the hardware when rf gain settings must be reset.
842 */
843
844static int ath_reset_start(struct ath_softc *sc, u32 flag)
845{ 721{
846 struct ath_hal *ah = sc->sc_ah; 722 struct ath_hal *ah = sc->sc_ah;
723 int status;
724 int error = 0;
847 725
848 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */ 726 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
849 ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */ 727 ath_draintxq(sc, retry_tx); /* stop xmit */
850 ath_stoprecv(sc); /* stop recv side */ 728 ath_stoprecv(sc); /* stop recv */
851 ath_flushrecv(sc); /* flush recv queue */ 729 ath_flushrecv(sc); /* flush recv queue */
852 730
853 return 0; 731 /* Reset chip */
854} 732 spin_lock_bh(&sc->sc_resetlock);
855 733 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
856static int ath_reset_end(struct ath_softc *sc, u32 flag) 734 sc->sc_ht_info.tx_chan_width,
857{ 735 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
858 struct ath_hal *ah = sc->sc_ah; 736 sc->sc_ht_extprotspacing, false, &status)) {
737 DPRINTF(sc, ATH_DBG_FATAL,
738 "%s: unable to reset hardware; hal status %u\n",
739 __func__, status);
740 error = -EIO;
741 }
742 spin_unlock_bh(&sc->sc_resetlock);
859 743
860 if (ath_startrecv(sc) != 0) /* restart recv */ 744 if (ath_startrecv(sc) != 0) /* restart recv */
861 DPRINTF(sc, ATH_DBG_FATAL, 745 DPRINTF(sc, ATH_DBG_FATAL,
@@ -866,16 +750,17 @@ static int ath_reset_end(struct ath_softc *sc, u32 flag)
866 * that changes the channel so update any state that 750 * that changes the channel so update any state that
867 * might change as a result. 751 * might change as a result.
868 */ 752 */
869 ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan)); 753 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
870 754
871 ath_update_txpow(sc); /* update tx power state */ 755 ath_update_txpow(sc);
872 756
873 if (sc->sc_beacons) 757 if (sc->sc_flags & SC_OP_BEACONS)
874 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */ 758 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
759
875 ath9k_hw_set_interrupts(ah, sc->sc_imask); 760 ath9k_hw_set_interrupts(ah, sc->sc_imask);
876 761
877 /* Restart the txq */ 762 /* Restart the txq */
878 if (flag & RESET_RETRY_TXQ) { 763 if (retry_tx) {
879 int i; 764 int i;
880 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 765 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
881 if (ATH_TXQ_SETUP(sc, i)) { 766 if (ATH_TXQ_SETUP(sc, i)) {
@@ -885,28 +770,6 @@ static int ath_reset_end(struct ath_softc *sc, u32 flag)
885 } 770 }
886 } 771 }
887 } 772 }
888 return 0;
889}
890
891int ath_reset(struct ath_softc *sc)
892{
893 struct ath_hal *ah = sc->sc_ah;
894 int status;
895 int error = 0;
896 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
897
898 /* NB: indicate channel change so we do a full reset */
899 spin_lock_bh(&sc->sc_resetlock);
900 if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
901 ht_macmode,
902 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
903 sc->sc_ht_extprotspacing, false, &status)) {
904 DPRINTF(sc, ATH_DBG_FATAL,
905 "%s: unable to reset hardware; hal status %u\n",
906 __func__, status);
907 error = -EIO;
908 }
909 spin_unlock_bh(&sc->sc_resetlock);
910 773
911 return error; 774 return error;
912} 775}
@@ -916,7 +779,7 @@ int ath_suspend(struct ath_softc *sc)
916 struct ath_hal *ah = sc->sc_ah; 779 struct ath_hal *ah = sc->sc_ah;
917 780
918 /* No I/O if device has been surprise removed */ 781 /* No I/O if device has been surprise removed */
919 if (sc->sc_invalid) 782 if (sc->sc_flags & SC_OP_INVALID)
920 return -EIO; 783 return -EIO;
921 784
922 /* Shut off the interrupt before setting sc->sc_invalid to '1' */ 785 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
@@ -924,7 +787,7 @@ int ath_suspend(struct ath_softc *sc)
924 787
925 /* XXX: we must make sure h/w will not generate any interrupt 788 /* XXX: we must make sure h/w will not generate any interrupt
926 * before setting the invalid flag. */ 789 * before setting the invalid flag. */
927 sc->sc_invalid = 1; 790 sc->sc_flags |= SC_OP_INVALID;
928 791
929 /* disable HAL and put h/w to sleep */ 792 /* disable HAL and put h/w to sleep */
930 ath9k_hw_disable(sc->sc_ah); 793 ath9k_hw_disable(sc->sc_ah);
@@ -945,7 +808,7 @@ irqreturn_t ath_isr(int irq, void *dev)
945 bool sched = false; 808 bool sched = false;
946 809
947 do { 810 do {
948 if (sc->sc_invalid) { 811 if (sc->sc_flags & SC_OP_INVALID) {
949 /* 812 /*
950 * The hardware is not ready/present, don't 813 * The hardware is not ready/present, don't
951 * touch anything. Note this can happen early 814 * touch anything. Note this can happen early
@@ -1055,7 +918,7 @@ static void ath9k_tasklet(unsigned long data)
1055 918
1056 if (status & ATH9K_INT_FATAL) { 919 if (status & ATH9K_INT_FATAL) {
1057 /* need a chip reset */ 920 /* need a chip reset */
1058 ath_internal_reset(sc); 921 ath_reset(sc, false);
1059 return; 922 return;
1060 } else { 923 } else {
1061 924
@@ -1098,10 +961,9 @@ int ath_init(u16 devid, struct ath_softc *sc)
1098 int status; 961 int status;
1099 int error = 0, i; 962 int error = 0, i;
1100 int csz = 0; 963 int csz = 0;
1101 u32 rd;
1102 964
1103 /* XXX: hardware will not be ready until ath_open() being called */ 965 /* XXX: hardware will not be ready until ath_open() being called */
1104 sc->sc_invalid = 1; 966 sc->sc_flags |= SC_OP_INVALID;
1105 967
1106 sc->sc_debug = DBG_DEFAULT; 968 sc->sc_debug = DBG_DEFAULT;
1107 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid); 969 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
@@ -1131,9 +993,6 @@ int ath_init(u16 devid, struct ath_softc *sc)
1131 } 993 }
1132 sc->sc_ah = ah; 994 sc->sc_ah = ah;
1133 995
1134 /* Get the chipset-specific aggr limit. */
1135 sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit;
1136
1137 /* Get the hardware key cache size. */ 996 /* Get the hardware key cache size. */
1138 sc->sc_keymax = ah->ah_caps.keycache_size; 997 sc->sc_keymax = ah->ah_caps.keycache_size;
1139 if (sc->sc_keymax > ATH_KEYMAX) { 998 if (sc->sc_keymax > ATH_KEYMAX) {
@@ -1167,14 +1026,12 @@ int ath_init(u16 devid, struct ath_softc *sc)
1167 * is resposible for filtering this list based on settings 1026 * is resposible for filtering this list based on settings
1168 * like the phy mode. 1027 * like the phy mode.
1169 */ 1028 */
1170 rd = ah->ah_currentRD;
1171
1172 error = ath_setup_channels(sc); 1029 error = ath_setup_channels(sc);
1173 if (error) 1030 if (error)
1174 goto bad; 1031 goto bad;
1175 1032
1176 /* default to STA mode */ 1033 /* default to STA mode */
1177 sc->sc_opmode = ATH9K_M_MONITOR; 1034 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1178 1035
1179 /* Setup rate tables */ 1036 /* Setup rate tables */
1180 1037
@@ -1245,7 +1102,7 @@ int ath_init(u16 devid, struct ath_softc *sc)
1245 1102
1246 sc->sc_rc = ath_rate_attach(ah); 1103 sc->sc_rc = ath_rate_attach(ah);
1247 if (sc->sc_rc == NULL) { 1104 if (sc->sc_rc == NULL) {
1248 error = EIO; 1105 error = -EIO;
1249 goto bad2; 1106 goto bad2;
1250 } 1107 }
1251 1108
@@ -1285,20 +1142,13 @@ int ath_init(u16 devid, struct ath_softc *sc)
1285 1142
1286 /* 11n Capabilities */ 1143 /* 11n Capabilities */
1287 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { 1144 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1288 sc->sc_txaggr = 1; 1145 sc->sc_flags |= SC_OP_TXAGGR;
1289 sc->sc_rxaggr = 1; 1146 sc->sc_flags |= SC_OP_RXAGGR;
1290 } 1147 }
1291 1148
1292 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask; 1149 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1293 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask; 1150 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1294 1151
1295 /* Configuration for rx chain detection */
1296 sc->sc_rxchaindetect_ref = 0;
1297 sc->sc_rxchaindetect_thresh5GHz = 35;
1298 sc->sc_rxchaindetect_thresh2GHz = 35;
1299 sc->sc_rxchaindetect_delta5GHz = 30;
1300 sc->sc_rxchaindetect_delta2GHz = 30;
1301
1302 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); 1152 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1303 sc->sc_defant = ath9k_hw_getdefantenna(ah); 1153 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1304 1154
@@ -1344,7 +1194,7 @@ void ath_deinit(struct ath_softc *sc)
1344 tasklet_kill(&sc->intr_tq); 1194 tasklet_kill(&sc->intr_tq);
1345 tasklet_kill(&sc->bcon_tasklet); 1195 tasklet_kill(&sc->bcon_tasklet);
1346 ath_stop(sc); 1196 ath_stop(sc);
1347 if (!sc->sc_invalid) 1197 if (!(sc->sc_flags & SC_OP_INVALID))
1348 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 1198 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1349 ath_rate_detach(sc->sc_rc); 1199 ath_rate_detach(sc->sc_rc);
1350 /* cleanup tx queues */ 1200 /* cleanup tx queues */
@@ -1471,9 +1321,9 @@ void ath_newassoc(struct ath_softc *sc,
1471 /* if station reassociates, tear down the aggregation state. */ 1321 /* if station reassociates, tear down the aggregation state. */
1472 if (!isnew) { 1322 if (!isnew) {
1473 for (tidno = 0; tidno < WME_NUM_TID; tidno++) { 1323 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1474 if (sc->sc_txaggr) 1324 if (sc->sc_flags & SC_OP_TXAGGR)
1475 ath_tx_aggr_teardown(sc, an, tidno); 1325 ath_tx_aggr_teardown(sc, an, tidno);
1476 if (sc->sc_rxaggr) 1326 if (sc->sc_flags & SC_OP_RXAGGR)
1477 ath_rx_aggr_teardown(sc, an, tidno); 1327 ath_rx_aggr_teardown(sc, an, tidno);
1478 } 1328 }
1479 } 1329 }
@@ -1822,13 +1672,6 @@ void ath_descdma_cleanup(struct ath_softc *sc,
1822/* Utilities */ 1672/* Utilities */
1823/*************/ 1673/*************/
1824 1674
1825void ath_internal_reset(struct ath_softc *sc)
1826{
1827 ath_reset_start(sc, 0);
1828 ath_reset(sc);
1829 ath_reset_end(sc, 0);
1830}
1831
1832int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) 1675int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1833{ 1676{
1834 int qnum; 1677 int qnum;
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index 2f84093331ee..872f0c5a0b0e 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -39,6 +39,8 @@
39#include <linux/scatterlist.h> 39#include <linux/scatterlist.h>
40#include <asm/page.h> 40#include <asm/page.h>
41#include <net/mac80211.h> 41#include <net/mac80211.h>
42#include <linux/leds.h>
43#include <linux/rfkill.h>
42 44
43#include "ath9k.h" 45#include "ath9k.h"
44#include "rc.h" 46#include "rc.h"
@@ -79,12 +81,12 @@ struct ath_node;
79 } \ 81 } \
80 } while (0) 82 } while (0)
81 83
84#define TSF_TO_TU(_h,_l) \
85 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
86
82/* XXX: remove */ 87/* XXX: remove */
83#define memzero(_buf, _len) memset(_buf, 0, _len) 88#define memzero(_buf, _len) memset(_buf, 0, _len)
84 89
85#define get_dma_mem_context(var, field) (&((var)->field))
86#define copy_dma_mem_context(dst, src) (*dst = *src)
87
88#define ATH9K_BH_STATUS_INTACT 0 90#define ATH9K_BH_STATUS_INTACT 0
89#define ATH9K_BH_STATUS_CHANGE 1 91#define ATH9K_BH_STATUS_CHANGE 1
90 92
@@ -95,6 +97,8 @@ static inline unsigned long get_timestamp(void)
95 return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ); 97 return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
96} 98}
97 99
100static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
101
98/*************/ 102/*************/
99/* Debugging */ 103/* Debugging */
100/*************/ 104/*************/
@@ -175,11 +179,6 @@ void ath_update_chainmask(struct ath_softc *sc, int is_ht);
175/* Descriptor Management */ 179/* Descriptor Management */
176/*************************/ 180/*************************/
177 181
178/* Number of descriptors per buffer. The only case where we see skbuff
179chains is due to FF aggregation in the driver. */
180#define ATH_TXDESC 1
181/* if there's more fragment for this MSDU */
182#define ATH_BF_MORE_MPDU 1
183#define ATH_TXBUF_RESET(_bf) do { \ 182#define ATH_TXBUF_RESET(_bf) do { \
184 (_bf)->bf_status = 0; \ 183 (_bf)->bf_status = 0; \
185 (_bf)->bf_lastbf = NULL; \ 184 (_bf)->bf_lastbf = NULL; \
@@ -189,28 +188,29 @@ chains is due to FF aggregation in the driver. */
189 sizeof(struct ath_buf_state)); \ 188 sizeof(struct ath_buf_state)); \
190 } while (0) 189 } while (0)
191 190
191enum buffer_type {
192 BUF_DATA = BIT(0),
193 BUF_AGGR = BIT(1),
194 BUF_AMPDU = BIT(2),
195 BUF_HT = BIT(3),
196 BUF_RETRY = BIT(4),
197 BUF_XRETRY = BIT(5),
198 BUF_SHORT_PREAMBLE = BIT(6),
199 BUF_BAR = BIT(7),
200 BUF_PSPOLL = BIT(8),
201 BUF_AGGR_BURST = BIT(9),
202 BUF_CALC_AIRTIME = BIT(10),
203};
204
192struct ath_buf_state { 205struct ath_buf_state {
193 int bfs_nframes; /* # frames in aggregate */ 206 int bfs_nframes; /* # frames in aggregate */
194 u16 bfs_al; /* length of aggregate */ 207 u16 bfs_al; /* length of aggregate */
195 u16 bfs_frmlen; /* length of frame */ 208 u16 bfs_frmlen; /* length of frame */
196 int bfs_seqno; /* sequence number */ 209 int bfs_seqno; /* sequence number */
197 int bfs_tidno; /* tid of this frame */ 210 int bfs_tidno; /* tid of this frame */
198 int bfs_retries; /* current retries */ 211 int bfs_retries; /* current retries */
199 struct ath_rc_series bfs_rcs[4]; /* rate series */ 212 struct ath_rc_series bfs_rcs[4]; /* rate series */
200 u8 bfs_isdata:1; /* is a data frame/aggregate */ 213 u32 bf_type; /* BUF_* (enum buffer_type) */
201 u8 bfs_isaggr:1; /* is an aggregate */
202 u8 bfs_isampdu:1; /* is an a-mpdu, aggregate or not */
203 u8 bfs_ht:1; /* is an HT frame */
204 u8 bfs_isretried:1; /* is retried */
205 u8 bfs_isxretried:1; /* is excessive retried */
206 u8 bfs_shpreamble:1; /* is short preamble */
207 u8 bfs_isbar:1; /* is a BAR */
208 u8 bfs_ispspoll:1; /* is a PS-Poll */
209 u8 bfs_aggrburst:1; /* is a aggr burst */
210 u8 bfs_calcairtime:1; /* requests airtime be calculated
211 when set for tx frame */
212 int bfs_rifsburst_elem; /* RIFS burst/bar */
213 int bfs_nrifsubframes; /* # of elements in burst */
214 /* key type use to encrypt this frame */ 214 /* key type use to encrypt this frame */
215 enum ath9k_key_type bfs_keytype; 215 enum ath9k_key_type bfs_keytype;
216}; 216};
@@ -222,26 +222,22 @@ struct ath_buf_state {
222#define bf_seqno bf_state.bfs_seqno 222#define bf_seqno bf_state.bfs_seqno
223#define bf_tidno bf_state.bfs_tidno 223#define bf_tidno bf_state.bfs_tidno
224#define bf_rcs bf_state.bfs_rcs 224#define bf_rcs bf_state.bfs_rcs
225#define bf_isdata bf_state.bfs_isdata
226#define bf_isaggr bf_state.bfs_isaggr
227#define bf_isampdu bf_state.bfs_isampdu
228#define bf_ht bf_state.bfs_ht
229#define bf_isretried bf_state.bfs_isretried
230#define bf_isxretried bf_state.bfs_isxretried
231#define bf_shpreamble bf_state.bfs_shpreamble
232#define bf_rifsburst_elem bf_state.bfs_rifsburst_elem
233#define bf_nrifsubframes bf_state.bfs_nrifsubframes
234#define bf_keytype bf_state.bfs_keytype 225#define bf_keytype bf_state.bfs_keytype
235#define bf_isbar bf_state.bfs_isbar 226#define bf_isdata(bf) (bf->bf_state.bf_type & BUF_DATA)
236#define bf_ispspoll bf_state.bfs_ispspoll 227#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
237#define bf_aggrburst bf_state.bfs_aggrburst 228#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
238#define bf_calcairtime bf_state.bfs_calcairtime 229#define bf_isht(bf) (bf->bf_state.bf_type & BUF_HT)
230#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
231#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
232#define bf_isshpreamble(bf) (bf->bf_state.bf_type & BUF_SHORT_PREAMBLE)
233#define bf_isbar(bf) (bf->bf_state.bf_type & BUF_BAR)
234#define bf_ispspoll(bf) (bf->bf_state.bf_type & BUF_PSPOLL)
235#define bf_isaggrburst(bf) (bf->bf_state.bf_type & BUF_AGGR_BURST)
239 236
240/* 237/*
241 * Abstraction of a contiguous buffer to transmit/receive. There is only 238 * Abstraction of a contiguous buffer to transmit/receive. There is only
242 * a single hw descriptor encapsulated here. 239 * a single hw descriptor encapsulated here.
243 */ 240 */
244
245struct ath_buf { 241struct ath_buf {
246 struct list_head list; 242 struct list_head list;
247 struct list_head *last; 243 struct list_head *last;
@@ -391,10 +387,10 @@ int ath_rx_input(struct ath_softc *sc,
391 struct sk_buff *skb, 387 struct sk_buff *skb,
392 struct ath_recv_status *rx_status, 388 struct ath_recv_status *rx_status,
393 enum ATH_RX_TYPE *status); 389 enum ATH_RX_TYPE *status);
394int ath__rx_indicate(struct ath_softc *sc, 390int _ath_rx_indicate(struct ath_softc *sc,
395 struct sk_buff *skb, 391 struct sk_buff *skb,
396 struct ath_recv_status *status, 392 struct ath_recv_status *status,
397 u16 keyix); 393 u16 keyix);
398int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb, 394int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
399 struct ath_recv_status *status); 395 struct ath_recv_status *status);
400 396
@@ -402,8 +398,7 @@ int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
402/* TX */ 398/* TX */
403/******/ 399/******/
404 400
405#define ATH_FRAG_PER_MSDU 1 401#define ATH_TXBUF 512
406#define ATH_TXBUF (512/ATH_FRAG_PER_MSDU)
407/* max number of transmit attempts (tries) */ 402/* max number of transmit attempts (tries) */
408#define ATH_TXMAXTRY 13 403#define ATH_TXMAXTRY 13
409/* max number of 11n transmit attempts (tries) */ 404/* max number of 11n transmit attempts (tries) */
@@ -522,7 +517,6 @@ struct ath_tx_control {
522 u32 keyix; 517 u32 keyix;
523 int min_rate; 518 int min_rate;
524 int mcast_rate; 519 int mcast_rate;
525 u16 nextfraglen;
526 struct ath_softc *dev; 520 struct ath_softc *dev;
527 dma_addr_t dmacontext; 521 dma_addr_t dmacontext;
528}; 522};
@@ -557,10 +551,10 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
557int ath_tx_setup(struct ath_softc *sc, int haltype); 551int ath_tx_setup(struct ath_softc *sc, int haltype);
558void ath_draintxq(struct ath_softc *sc, bool retry_tx); 552void ath_draintxq(struct ath_softc *sc, bool retry_tx);
559void ath_tx_draintxq(struct ath_softc *sc, 553void ath_tx_draintxq(struct ath_softc *sc,
560 struct ath_txq *txq, bool retry_tx); 554 struct ath_txq *txq, bool retry_tx);
561void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); 555void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
562void ath_tx_node_cleanup(struct ath_softc *sc, 556void ath_tx_node_cleanup(struct ath_softc *sc,
563 struct ath_node *an, bool bh_flag); 557 struct ath_node *an, bool bh_flag);
564void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an); 558void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
565void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq); 559void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
566int ath_tx_init(struct ath_softc *sc, int nbufs); 560int ath_tx_init(struct ath_softc *sc, int nbufs);
@@ -575,6 +569,7 @@ u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
575void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth); 569void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
576void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 570void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
577 struct ath_xmit_status *tx_status, struct ath_node *an); 571 struct ath_xmit_status *tx_status, struct ath_node *an);
572void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb);
578 573
579/**********************/ 574/**********************/
580/* Node / Aggregation */ 575/* Node / Aggregation */
@@ -585,7 +580,6 @@ void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
585/* indicates the node is 80211 power save */ 580/* indicates the node is 80211 power save */
586#define ATH_NODE_PWRSAVE 0x2 581#define ATH_NODE_PWRSAVE 0x2
587 582
588#define ADDBA_TIMEOUT 200 /* 200 milliseconds */
589#define ADDBA_EXCHANGE_ATTEMPTS 10 583#define ADDBA_EXCHANGE_ATTEMPTS 10
590#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */ 584#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
591#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ 585#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
@@ -705,9 +699,6 @@ struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr);
705#define ATH_BCBUF 4 /* number of beacon buffers */ 699#define ATH_BCBUF 4 /* number of beacon buffers */
706#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */ 700#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */
707#define ATH_DEFAULT_BMISS_LIMIT 10 701#define ATH_DEFAULT_BMISS_LIMIT 10
708#define ATH_BEACON_AIFS_DEFAULT 0 /* Default aifs for ap beacon q */
709#define ATH_BEACON_CWMIN_DEFAULT 0 /* Default cwmin for ap beacon q */
710#define ATH_BEACON_CWMAX_DEFAULT 0 /* Default cwmax for ap beacon q */
711#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) 702#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
712 703
713/* beacon configuration */ 704/* beacon configuration */
@@ -724,30 +715,16 @@ struct ath_beacon_config {
724 } u; /* last received beacon/probe response timestamp of this BSS. */ 715 } u; /* last received beacon/probe response timestamp of this BSS. */
725}; 716};
726 717
727/* offsets in a beacon frame for
728 * quick acess of beacon content by low-level driver */
729struct ath_beacon_offset {
730 u8 *bo_tim; /* start of atim/dtim */
731};
732
733void ath9k_beacon_tasklet(unsigned long data); 718void ath9k_beacon_tasklet(unsigned long data);
734void ath_beacon_config(struct ath_softc *sc, int if_id); 719void ath_beacon_config(struct ath_softc *sc, int if_id);
735int ath_beaconq_setup(struct ath_hal *ah); 720int ath_beaconq_setup(struct ath_hal *ah);
736int ath_beacon_alloc(struct ath_softc *sc, int if_id); 721int ath_beacon_alloc(struct ath_softc *sc, int if_id);
737void ath_bstuck_process(struct ath_softc *sc); 722void ath_bstuck_process(struct ath_softc *sc);
738void ath_beacon_tasklet(struct ath_softc *sc, int *needmark);
739void ath_beacon_free(struct ath_softc *sc);
740void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp); 723void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
741void ath_beacon_sync(struct ath_softc *sc, int if_id); 724void ath_beacon_sync(struct ath_softc *sc, int if_id);
742void ath_update_beacon_info(struct ath_softc *sc, int avgbrssi);
743void ath_get_beaconconfig(struct ath_softc *sc, 725void ath_get_beaconconfig(struct ath_softc *sc,
744 int if_id, 726 int if_id,
745 struct ath_beacon_config *conf); 727 struct ath_beacon_config *conf);
746int ath_update_beacon(struct ath_softc *sc,
747 int if_id,
748 struct ath_beacon_offset *bo,
749 struct sk_buff *skb,
750 int mcast);
751/********/ 728/********/
752/* VAPs */ 729/* VAPs */
753/********/ 730/********/
@@ -774,10 +751,8 @@ struct ath_vap {
774 struct ieee80211_vif *av_if_data; 751 struct ieee80211_vif *av_if_data;
775 enum ath9k_opmode av_opmode; /* VAP operational mode */ 752 enum ath9k_opmode av_opmode; /* VAP operational mode */
776 struct ath_buf *av_bcbuf; /* beacon buffer */ 753 struct ath_buf *av_bcbuf; /* beacon buffer */
777 struct ath_beacon_offset av_boff; /* dynamic update state */
778 struct ath_tx_control av_btxctl; /* txctl information for beacon */ 754 struct ath_tx_control av_btxctl; /* txctl information for beacon */
779 int av_bslot; /* beacon slot index */ 755 int av_bslot; /* beacon slot index */
780 struct ath_txq av_mcastq; /* multicast transmit queue */
781 struct ath_vap_config av_config;/* vap configuration parameters*/ 756 struct ath_vap_config av_config;/* vap configuration parameters*/
782 struct ath_rate_node *rc_node; 757 struct ath_rate_node *rc_node;
783}; 758};
@@ -788,8 +763,7 @@ int ath_vap_attach(struct ath_softc *sc,
788 enum ath9k_opmode opmode); 763 enum ath9k_opmode opmode);
789int ath_vap_detach(struct ath_softc *sc, int if_id); 764int ath_vap_detach(struct ath_softc *sc, int if_id);
790int ath_vap_config(struct ath_softc *sc, 765int ath_vap_config(struct ath_softc *sc,
791 int if_id, struct ath_vap_config *if_config); 766 int if_id, struct ath_vap_config *if_config);
792int ath_vap_listen(struct ath_softc *sc, int if_id);
793 767
794/*********************/ 768/*********************/
795/* Antenna diversity */ 769/* Antenna diversity */
@@ -830,6 +804,36 @@ void ath_slow_ant_div(struct ath_antdiv *antdiv,
830void ath_setdefantenna(void *sc, u32 antenna); 804void ath_setdefantenna(void *sc, u32 antenna);
831 805
832/********************/ 806/********************/
807/* LED Control */
808/********************/
809
810#define ATH_LED_PIN 1
811
812enum ath_led_type {
813 ATH_LED_RADIO,
814 ATH_LED_ASSOC,
815 ATH_LED_TX,
816 ATH_LED_RX
817};
818
819struct ath_led {
820 struct ath_softc *sc;
821 struct led_classdev led_cdev;
822 enum ath_led_type led_type;
823 char name[32];
824 bool registered;
825};
826
827/* Rfkill */
828#define ATH_RFKILL_POLL_INTERVAL 2000 /* msecs */
829
830struct ath_rfkill {
831 struct rfkill *rfkill;
832 struct delayed_work rfkill_poll;
833 char rfkill_name[32];
834};
835
836/********************/
833/* Main driver core */ 837/* Main driver core */
834/********************/ 838/********************/
835 839
@@ -841,11 +845,7 @@ void ath_setdefantenna(void *sc, u32 antenna);
841#define ATH_DEFAULT_NOISE_FLOOR -95 845#define ATH_DEFAULT_NOISE_FLOOR -95
842#define ATH_REGCLASSIDS_MAX 10 846#define ATH_REGCLASSIDS_MAX 10
843#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ 847#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
844#define ATH_PREAMBLE_SHORT (1<<0)
845#define ATH_PROTECT_ENABLE (1<<1)
846#define ATH_MAX_SW_RETRIES 10 848#define ATH_MAX_SW_RETRIES 10
847/* Num farmes difference in tx to flip default recv */
848#define ATH_ANTENNA_DIFF 2
849#define ATH_CHAN_MAX 255 849#define ATH_CHAN_MAX 255
850#define IEEE80211_WEP_NKID 4 /* number of key ids */ 850#define IEEE80211_WEP_NKID 4 /* number of key ids */
851#define IEEE80211_RATE_VAL 0x7f 851#define IEEE80211_RATE_VAL 0x7f
@@ -859,9 +859,7 @@ void ath_setdefantenna(void *sc, u32 antenna);
859 */ 859 */
860#define ATH_KEYMAX 128 /* max key cache size we handle */ 860#define ATH_KEYMAX 128 /* max key cache size we handle */
861 861
862#define RESET_RETRY_TXQ 0x00000001
863#define ATH_IF_ID_ANY 0xff 862#define ATH_IF_ID_ANY 0xff
864
865#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 863#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
866 864
867#define RSSI_LPF_THRESHOLD -20 865#define RSSI_LPF_THRESHOLD -20
@@ -907,60 +905,64 @@ struct ath_ht_info {
907 u8 ext_chan_offset; 905 u8 ext_chan_offset;
908}; 906};
909 907
908#define SC_OP_INVALID BIT(0)
909#define SC_OP_BEACONS BIT(1)
910#define SC_OP_RXAGGR BIT(2)
911#define SC_OP_TXAGGR BIT(3)
912#define SC_OP_CHAINMASK_UPDATE BIT(4)
913#define SC_OP_FULL_RESET BIT(5)
914#define SC_OP_NO_RESET BIT(6)
915#define SC_OP_PREAMBLE_SHORT BIT(7)
916#define SC_OP_PROTECT_ENABLE BIT(8)
917#define SC_OP_RXFLUSH BIT(9)
918#define SC_OP_LED_ASSOCIATED BIT(10)
919#define SC_OP_RFKILL_REGISTERED BIT(11)
920#define SC_OP_RFKILL_SW_BLOCKED BIT(12)
921#define SC_OP_RFKILL_HW_BLOCKED BIT(13)
922
910struct ath_softc { 923struct ath_softc {
911 struct ieee80211_hw *hw; 924 struct ieee80211_hw *hw;
912 struct pci_dev *pdev; 925 struct pci_dev *pdev;
913 void __iomem *mem;
914 struct tasklet_struct intr_tq; 926 struct tasklet_struct intr_tq;
915 struct tasklet_struct bcon_tasklet; 927 struct tasklet_struct bcon_tasklet;
916 struct ath_config sc_config; /* load-time parameters */ 928 struct ath_config sc_config;
917 int sc_debug;
918 struct ath_hal *sc_ah; 929 struct ath_hal *sc_ah;
919 struct ath_rate_softc *sc_rc; /* tx rate control support */ 930 struct ath_rate_softc *sc_rc;
931 void __iomem *mem;
932
933 u8 sc_curbssid[ETH_ALEN];
934 u8 sc_myaddr[ETH_ALEN];
935 u8 sc_bssidmask[ETH_ALEN];
936
937 int sc_debug;
920 u32 sc_intrstatus; 938 u32 sc_intrstatus;
921 enum ath9k_opmode sc_opmode; /* current operating mode */ 939 u32 sc_flags; /* SC_OP_* */
922 940 unsigned int rx_filter;
923 u8 sc_invalid; /* being detached */
924 u8 sc_beacons; /* beacons running */
925 u8 sc_scanning; /* scanning active */
926 u8 sc_txaggr; /* enable 11n tx aggregation */
927 u8 sc_rxaggr; /* enable 11n rx aggregation */
928 u8 sc_update_chainmask; /* change chain mask */
929 u8 sc_full_reset; /* force full reset */
930 enum wireless_mode sc_curmode; /* current phy mode */
931 u16 sc_curtxpow; 941 u16 sc_curtxpow;
932 u16 sc_curaid; 942 u16 sc_curaid;
933 u8 sc_curbssid[ETH_ALEN]; 943 u16 sc_cachelsz;
934 u8 sc_myaddr[ETH_ALEN]; 944 int sc_slotupdate; /* slot to next advance fsm */
945 int sc_slottime;
946 int sc_bslot[ATH_BCBUF];
947 u8 sc_tx_chainmask;
948 u8 sc_rx_chainmask;
949 enum ath9k_int sc_imask;
950 enum wireless_mode sc_curmode; /* current phy mode */
935 enum PROT_MODE sc_protmode; 951 enum PROT_MODE sc_protmode;
936 u8 sc_mcastantenna; 952
937 u8 sc_txantenna; /* data tx antenna (fixed or auto) */
938 u8 sc_nbcnvaps; /* # of vaps sending beacons */ 953 u8 sc_nbcnvaps; /* # of vaps sending beacons */
939 u16 sc_nvaps; /* # of active virtual ap's */ 954 u16 sc_nvaps; /* # of active virtual ap's */
940 struct ath_vap *sc_vaps[ATH_BCBUF]; 955 struct ath_vap *sc_vaps[ATH_BCBUF];
941 enum ath9k_int sc_imask; 956
942 u8 sc_bssidmask[ETH_ALEN]; 957 u8 sc_mcastantenna;
943 u8 sc_defant; /* current default antenna */ 958 u8 sc_defant; /* current default antenna */
944 u8 sc_rxotherant; /* rx's on non-default antenna */ 959 u8 sc_rxotherant; /* rx's on non-default antenna */
945 u16 sc_cachelsz; 960
946 int sc_slotupdate; /* slot to next advance fsm */
947 int sc_slottime;
948 u8 sc_noreset;
949 int sc_bslot[ATH_BCBUF];
950 struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */ 961 struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */
951 struct list_head node_list; 962 struct list_head node_list;
952 struct ath_ht_info sc_ht_info; 963 struct ath_ht_info sc_ht_info;
953 int16_t sc_noise_floor; /* signal noise floor in dBm */
954 enum ath9k_ht_extprotspacing sc_ht_extprotspacing; 964 enum ath9k_ht_extprotspacing sc_ht_extprotspacing;
955 u8 sc_tx_chainmask; 965
956 u8 sc_rx_chainmask;
957 u8 sc_rxchaindetect_ref;
958 u8 sc_rxchaindetect_thresh5GHz;
959 u8 sc_rxchaindetect_thresh2GHz;
960 u8 sc_rxchaindetect_delta5GHz;
961 u8 sc_rxchaindetect_delta2GHz;
962 u32 sc_rtsaggrlimit; /* Chipset specific aggr limit */
963 u32 sc_flags;
964#ifdef CONFIG_SLOW_ANT_DIV 966#ifdef CONFIG_SLOW_ANT_DIV
965 struct ath_antdiv sc_antdiv; 967 struct ath_antdiv sc_antdiv;
966#endif 968#endif
@@ -980,8 +982,6 @@ struct ath_softc {
980 struct ath_descdma sc_rxdma; 982 struct ath_descdma sc_rxdma;
981 int sc_rxbufsize; /* rx size based on mtu */ 983 int sc_rxbufsize; /* rx size based on mtu */
982 u32 *sc_rxlink; /* link ptr in last RX desc */ 984 u32 *sc_rxlink; /* link ptr in last RX desc */
983 u32 sc_rxflush; /* rx flush in progress */
984 u64 sc_lastrx; /* tsf of last rx'd frame */
985 985
986 /* TX */ 986 /* TX */
987 struct list_head sc_txbuf; 987 struct list_head sc_txbuf;
@@ -990,7 +990,6 @@ struct ath_softc {
990 u32 sc_txqsetup; 990 u32 sc_txqsetup;
991 u32 sc_txintrperiod; /* tx interrupt batching */ 991 u32 sc_txintrperiod; /* tx interrupt batching */
992 int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */ 992 int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */
993 u32 sc_ant_tx[8]; /* recent tx frames/antenna */
994 u16 seq_no; /* TX sequence number */ 993 u16 seq_no; /* TX sequence number */
995 994
996 /* Beacon */ 995 /* Beacon */
@@ -1001,6 +1000,7 @@ struct ath_softc {
1001 u32 sc_bhalq; 1000 u32 sc_bhalq;
1002 u32 sc_bmisscount; 1001 u32 sc_bmisscount;
1003 u32 ast_be_xmit; /* beacons transmitted */ 1002 u32 ast_be_xmit; /* beacons transmitted */
1003 u64 bc_tstamp;
1004 1004
1005 /* Rate */ 1005 /* Rate */
1006 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX]; 1006 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
@@ -1015,7 +1015,6 @@ struct ath_softc {
1015 /* Channel, Band */ 1015 /* Channel, Band */
1016 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX]; 1016 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
1017 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 1017 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
1018 struct ath9k_channel sc_curchan;
1019 1018
1020 /* Locks */ 1019 /* Locks */
1021 spinlock_t sc_rxflushlock; 1020 spinlock_t sc_rxflushlock;
@@ -1023,6 +1022,15 @@ struct ath_softc {
1023 spinlock_t sc_txbuflock; 1022 spinlock_t sc_txbuflock;
1024 spinlock_t sc_resetlock; 1023 spinlock_t sc_resetlock;
1025 spinlock_t node_lock; 1024 spinlock_t node_lock;
1025
1026 /* LEDs */
1027 struct ath_led radio_led;
1028 struct ath_led assoc_led;
1029 struct ath_led tx_led;
1030 struct ath_led rx_led;
1031
1032 /* Rfkill */
1033 struct ath_rfkill rf_kill;
1026}; 1034};
1027 1035
1028int ath_init(u16 devid, struct ath_softc *sc); 1036int ath_init(u16 devid, struct ath_softc *sc);
@@ -1030,14 +1038,8 @@ void ath_deinit(struct ath_softc *sc);
1030int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan); 1038int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan);
1031int ath_suspend(struct ath_softc *sc); 1039int ath_suspend(struct ath_softc *sc);
1032irqreturn_t ath_isr(int irq, void *dev); 1040irqreturn_t ath_isr(int irq, void *dev);
1033int ath_reset(struct ath_softc *sc); 1041int ath_reset(struct ath_softc *sc, bool retry_tx);
1034void ath_scan_start(struct ath_softc *sc);
1035void ath_scan_end(struct ath_softc *sc);
1036int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan); 1042int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan);
1037void ath_setup_rate(struct ath_softc *sc,
1038 enum wireless_mode wMode,
1039 enum RATE_TYPE type,
1040 const struct ath9k_rate_table *rt);
1041 1043
1042/*********************/ 1044/*********************/
1043/* Utility Functions */ 1045/* Utility Functions */
@@ -1056,17 +1058,5 @@ int ath_cabq_update(struct ath_softc *);
1056void ath_get_currentCountry(struct ath_softc *sc, 1058void ath_get_currentCountry(struct ath_softc *sc,
1057 struct ath9k_country_entry *ctry); 1059 struct ath9k_country_entry *ctry);
1058u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp); 1060u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
1059void ath_internal_reset(struct ath_softc *sc);
1060u32 ath_chan2flags(struct ieee80211_channel *chan, struct ath_softc *sc);
1061dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1062 struct sk_buff *skb,
1063 int direction,
1064 dma_addr_t *pa);
1065void ath_skb_unmap_single(struct ath_softc *sc,
1066 struct sk_buff *skb,
1067 int direction,
1068 dma_addr_t *pa);
1069void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]);
1070enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
1071 1061
1072#endif /* CORE_H */ 1062#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index 6dbfed0b4149..272c75816609 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -85,29 +85,6 @@ static const struct hal_percal_data adc_init_dc_cal = {
85 ath9k_hw_adc_dccal_calibrate 85 ath9k_hw_adc_dccal_calibrate
86}; 86};
87 87
88static const struct ath_hal ar5416hal = {
89 AR5416_MAGIC,
90 0,
91 0,
92 NULL,
93 NULL,
94 CTRY_DEFAULT,
95 0,
96 0,
97 0,
98 0,
99 0,
100 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 },
109};
110
111static struct ath9k_rate_table ar5416_11a_table = { 88static struct ath9k_rate_table ar5416_11a_table = {
112 8, 89 8,
113 {0}, 90 {0},
@@ -371,7 +348,7 @@ static void ath9k_hw_set_defaults(struct ath_hal *ah)
371 ah->ah_config.intr_mitigation = 0; 348 ah->ah_config.intr_mitigation = 0;
372} 349}
373 350
374static inline void ath9k_hw_override_ini(struct ath_hal *ah, 351static void ath9k_hw_override_ini(struct ath_hal *ah,
375 struct ath9k_channel *chan) 352 struct ath9k_channel *chan)
376{ 353{
377 if (!AR_SREV_5416_V20_OR_LATER(ah) 354 if (!AR_SREV_5416_V20_OR_LATER(ah)
@@ -381,8 +358,8 @@ static inline void ath9k_hw_override_ini(struct ath_hal *ah,
381 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11); 358 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
382} 359}
383 360
384static inline void ath9k_hw_init_bb(struct ath_hal *ah, 361static void ath9k_hw_init_bb(struct ath_hal *ah,
385 struct ath9k_channel *chan) 362 struct ath9k_channel *chan)
386{ 363{
387 u32 synthDelay; 364 u32 synthDelay;
388 365
@@ -397,8 +374,8 @@ static inline void ath9k_hw_init_bb(struct ath_hal *ah,
397 udelay(synthDelay + BASE_ACTIVATE_DELAY); 374 udelay(synthDelay + BASE_ACTIVATE_DELAY);
398} 375}
399 376
400static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah, 377static void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
401 enum ath9k_opmode opmode) 378 enum ath9k_opmode opmode)
402{ 379{
403 struct ath_hal_5416 *ahp = AH5416(ah); 380 struct ath_hal_5416 *ahp = AH5416(ah);
404 381
@@ -428,7 +405,7 @@ static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
428 } 405 }
429} 406}
430 407
431static inline void ath9k_hw_init_qos(struct ath_hal *ah) 408static void ath9k_hw_init_qos(struct ath_hal *ah)
432{ 409{
433 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); 410 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
434 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); 411 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
@@ -523,7 +500,7 @@ static inline bool ath9k_hw_nvram_read(struct ath_hal *ah,
523 return ath9k_hw_eeprom_read(ah, off, data); 500 return ath9k_hw_eeprom_read(ah, off, data);
524} 501}
525 502
526static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah) 503static bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
527{ 504{
528 struct ath_hal_5416 *ahp = AH5416(ah); 505 struct ath_hal_5416 *ahp = AH5416(ah);
529 struct ar5416_eeprom *eep = &ahp->ah_eeprom; 506 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
@@ -790,7 +767,7 @@ ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
790 return true; 767 return true;
791} 768}
792 769
793static inline int ath9k_hw_check_eeprom(struct ath_hal *ah) 770static int ath9k_hw_check_eeprom(struct ath_hal *ah)
794{ 771{
795 u32 sum = 0, el; 772 u32 sum = 0, el;
796 u16 *eepdata; 773 u16 *eepdata;
@@ -1196,11 +1173,12 @@ static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
1196 1173
1197 ah = &ahp->ah; 1174 ah = &ahp->ah;
1198 1175
1199 memcpy(&ahp->ah, &ar5416hal, sizeof(struct ath_hal));
1200
1201 ah->ah_sc = sc; 1176 ah->ah_sc = sc;
1202 ah->ah_sh = mem; 1177 ah->ah_sh = mem;
1203 1178
1179 ah->ah_magic = AR5416_MAGIC;
1180 ah->ah_countryCode = CTRY_DEFAULT;
1181
1204 ah->ah_devid = devid; 1182 ah->ah_devid = devid;
1205 ah->ah_subvendorid = 0; 1183 ah->ah_subvendorid = 0;
1206 1184
@@ -1294,7 +1272,7 @@ u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
1294 } 1272 }
1295} 1273}
1296 1274
1297static inline int ath9k_hw_get_radiorev(struct ath_hal *ah) 1275static int ath9k_hw_get_radiorev(struct ath_hal *ah)
1298{ 1276{
1299 u32 val; 1277 u32 val;
1300 int i; 1278 int i;
@@ -1307,7 +1285,7 @@ static inline int ath9k_hw_get_radiorev(struct ath_hal *ah)
1307 return ath9k_hw_reverse_bits(val, 8); 1285 return ath9k_hw_reverse_bits(val, 8);
1308} 1286}
1309 1287
1310static inline int ath9k_hw_init_macaddr(struct ath_hal *ah) 1288static int ath9k_hw_init_macaddr(struct ath_hal *ah)
1311{ 1289{
1312 u32 sum; 1290 u32 sum;
1313 int i; 1291 int i;
@@ -1389,7 +1367,7 @@ static u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah,
1389 return spur_val; 1367 return spur_val;
1390} 1368}
1391 1369
1392static inline int ath9k_hw_rfattach(struct ath_hal *ah) 1370static int ath9k_hw_rfattach(struct ath_hal *ah)
1393{ 1371{
1394 bool rfStatus = false; 1372 bool rfStatus = false;
1395 int ecode = 0; 1373 int ecode = 0;
@@ -1434,8 +1412,8 @@ static int ath9k_hw_rf_claim(struct ath_hal *ah)
1434 return 0; 1412 return 0;
1435} 1413}
1436 1414
1437static inline void ath9k_hw_init_pll(struct ath_hal *ah, 1415static void ath9k_hw_init_pll(struct ath_hal *ah,
1438 struct ath9k_channel *chan) 1416 struct ath9k_channel *chan)
1439{ 1417{
1440 u32 pll; 1418 u32 pll;
1441 1419
@@ -1553,7 +1531,7 @@ static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
1553 } 1531 }
1554} 1532}
1555 1533
1556static inline void 1534static void
1557ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan) 1535ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
1558{ 1536{
1559 u32 rfMode = 0; 1537 u32 rfMode = 0;
@@ -1623,7 +1601,7 @@ static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
1623 return true; 1601 return true;
1624} 1602}
1625 1603
1626static inline bool ath9k_hw_set_reset_power_on(struct ath_hal *ah) 1604static bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
1627{ 1605{
1628 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1606 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1629 AR_RTC_FORCE_WAKE_ON_INT); 1607 AR_RTC_FORCE_WAKE_ON_INT);
@@ -1664,7 +1642,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hal *ah,
1664 } 1642 }
1665} 1643}
1666 1644
1667static inline 1645static
1668struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah, 1646struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah,
1669 struct ath9k_channel *chan) 1647 struct ath9k_channel *chan)
1670{ 1648{
@@ -2098,7 +2076,7 @@ static void ath9k_hw_ani_attach(struct ath_hal *ah)
2098 ahp->ah_procPhyErr |= HAL_PROCESS_ANI; 2076 ahp->ah_procPhyErr |= HAL_PROCESS_ANI;
2099} 2077}
2100 2078
2101static inline void ath9k_hw_ani_setup(struct ath_hal *ah) 2079static void ath9k_hw_ani_setup(struct ath_hal *ah)
2102{ 2080{
2103 struct ath_hal_5416 *ahp = AH5416(ah); 2081 struct ath_hal_5416 *ahp = AH5416(ah);
2104 int i; 2082 int i;
@@ -2548,6 +2526,11 @@ static void ath9k_ani_reset(struct ath_hal *ah)
2548 } 2526 }
2549} 2527}
2550 2528
2529/*
2530 * Process a MIB interrupt. We may potentially be invoked because
2531 * any of the MIB counters overflow/trigger so don't assume we're
2532 * here because a PHY error counter triggered.
2533 */
2551void ath9k_hw_procmibevent(struct ath_hal *ah, 2534void ath9k_hw_procmibevent(struct ath_hal *ah,
2552 const struct ath9k_node_stats *stats) 2535 const struct ath9k_node_stats *stats)
2553{ 2536{
@@ -2555,18 +2538,20 @@ void ath9k_hw_procmibevent(struct ath_hal *ah,
2555 u32 phyCnt1, phyCnt2; 2538 u32 phyCnt1, phyCnt2;
2556 2539
2557 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Processing Mib Intr\n"); 2540 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Processing Mib Intr\n");
2558 2541 /* Reset these counters regardless */
2559 REG_WRITE(ah, AR_FILT_OFDM, 0); 2542 REG_WRITE(ah, AR_FILT_OFDM, 0);
2560 REG_WRITE(ah, AR_FILT_CCK, 0); 2543 REG_WRITE(ah, AR_FILT_CCK, 0);
2561 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING)) 2544 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
2562 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR); 2545 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
2563 2546
2547 /* Clear the mib counters and save them in the stats */
2564 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); 2548 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2565 ahp->ah_stats.ast_nodestats = *stats; 2549 ahp->ah_stats.ast_nodestats = *stats;
2566 2550
2567 if (!DO_ANI(ah)) 2551 if (!DO_ANI(ah))
2568 return; 2552 return;
2569 2553
2554 /* NB: these are not reset-on-read */
2570 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); 2555 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
2571 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); 2556 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
2572 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) || 2557 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
@@ -2574,6 +2559,7 @@ void ath9k_hw_procmibevent(struct ath_hal *ah,
2574 struct ar5416AniState *aniState = ahp->ah_curani; 2559 struct ar5416AniState *aniState = ahp->ah_curani;
2575 u32 ofdmPhyErrCnt, cckPhyErrCnt; 2560 u32 ofdmPhyErrCnt, cckPhyErrCnt;
2576 2561
2562 /* NB: only use ast_ani_*errs with AH_PRIVATE_DIAG */
2577 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase; 2563 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
2578 ahp->ah_stats.ast_ani_ofdmerrs += 2564 ahp->ah_stats.ast_ani_ofdmerrs +=
2579 ofdmPhyErrCnt - aniState->ofdmPhyErrCount; 2565 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
@@ -2584,11 +2570,17 @@ void ath9k_hw_procmibevent(struct ath_hal *ah,
2584 cckPhyErrCnt - aniState->cckPhyErrCount; 2570 cckPhyErrCnt - aniState->cckPhyErrCount;
2585 aniState->cckPhyErrCount = cckPhyErrCnt; 2571 aniState->cckPhyErrCount = cckPhyErrCnt;
2586 2572
2573 /*
2574 * NB: figure out which counter triggered. If both
2575 * trigger we'll only deal with one as the processing
2576 * clobbers the error counter so the trigger threshold
2577 * check will never be true.
2578 */
2587 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh) 2579 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
2588 ath9k_hw_ani_ofdm_err_trigger(ah); 2580 ath9k_hw_ani_ofdm_err_trigger(ah);
2589 if (aniState->cckPhyErrCount > aniState->cckTrigHigh) 2581 if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
2590 ath9k_hw_ani_cck_err_trigger(ah); 2582 ath9k_hw_ani_cck_err_trigger(ah);
2591 2583 /* NB: always restart to insure the h/w counters are reset */
2592 ath9k_ani_restart(ah); 2584 ath9k_ani_restart(ah);
2593 } 2585 }
2594} 2586}
@@ -2822,32 +2814,11 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah,
2822 } 2814 }
2823} 2815}
2824 2816
2825static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio, 2817void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
2826 enum ath9k_gpio_output_mux_type 2818 u32 ah_signal_type)
2827 halSignalType)
2828{ 2819{
2829 u32 ah_signal_type;
2830 u32 gpio_shift; 2820 u32 gpio_shift;
2831 2821
2832 static u32 MuxSignalConversionTable[] = {
2833
2834 AR_GPIO_OUTPUT_MUX_AS_OUTPUT,
2835
2836 AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED,
2837
2838 AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED,
2839
2840 AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED,
2841
2842 AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED,
2843 };
2844
2845 if ((halSignalType >= 0)
2846 && (halSignalType < ARRAY_SIZE(MuxSignalConversionTable)))
2847 ah_signal_type = MuxSignalConversionTable[halSignalType];
2848 else
2849 return false;
2850
2851 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); 2822 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
2852 2823
2853 gpio_shift = 2 * gpio; 2824 gpio_shift = 2 * gpio;
@@ -2856,19 +2827,46 @@ static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
2856 AR_GPIO_OE_OUT, 2827 AR_GPIO_OE_OUT,
2857 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), 2828 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
2858 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 2829 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2859
2860 return true;
2861} 2830}
2862 2831
2863static bool ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, 2832void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 val)
2864 u32 val)
2865{ 2833{
2866 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), 2834 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
2867 AR_GPIO_BIT(gpio)); 2835 AR_GPIO_BIT(gpio));
2868 return true;
2869} 2836}
2870 2837
2871static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio) 2838/*
2839 * Configure GPIO Input lines
2840 */
2841void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio)
2842{
2843 u32 gpio_shift;
2844
2845 ASSERT(gpio < ah->ah_caps.num_gpio_pins);
2846
2847 gpio_shift = gpio << 1;
2848
2849 REG_RMW(ah,
2850 AR_GPIO_OE_OUT,
2851 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
2852 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2853}
2854
2855#ifdef CONFIG_RFKILL
2856static void ath9k_enable_rfkill(struct ath_hal *ah)
2857{
2858 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
2859 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
2860
2861 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
2862 AR_GPIO_INPUT_MUX2_RFSILENT);
2863
2864 ath9k_hw_cfg_gpio_input(ah, ah->ah_rfkill_gpio);
2865 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
2866}
2867#endif
2868
2869u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
2872{ 2870{
2873 if (gpio >= ah->ah_caps.num_gpio_pins) 2871 if (gpio >= ah->ah_caps.num_gpio_pins)
2874 return 0xffffffff; 2872 return 0xffffffff;
@@ -2883,7 +2881,7 @@ static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
2883 } 2881 }
2884} 2882}
2885 2883
2886static inline int ath9k_hw_post_attach(struct ath_hal *ah) 2884static int ath9k_hw_post_attach(struct ath_hal *ah)
2887{ 2885{
2888 int ecode; 2886 int ecode;
2889 2887
@@ -3081,17 +3079,17 @@ static bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
3081 3079
3082 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM; 3080 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
3083 3081
3082#ifdef CONFIG_RFKILL
3084 ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT); 3083 ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT);
3085 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) { 3084 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) {
3086 ahp->ah_gpioSelect = 3085 ah->ah_rfkill_gpio =
3087 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL); 3086 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL);
3088 ahp->ah_polarity = 3087 ah->ah_rfkill_polarity =
3089 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY); 3088 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY);
3090 3089
3091 ath9k_hw_setcapability(ah, ATH9K_CAP_RFSILENT, 1, true,
3092 NULL);
3093 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; 3090 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
3094 } 3091 }
3092#endif
3095 3093
3096 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) || 3094 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) ||
3097 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) || 3095 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) ||
@@ -3595,7 +3593,7 @@ static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin,
3595 return true; 3593 return true;
3596} 3594}
3597 3595
3598static inline void 3596static void
3599ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah, 3597ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah,
3600 struct ath9k_channel *chan, 3598 struct ath9k_channel *chan,
3601 struct cal_data_per_freq *pRawDataSet, 3599 struct cal_data_per_freq *pRawDataSet,
@@ -3777,7 +3775,7 @@ ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah,
3777 return; 3775 return;
3778} 3776}
3779 3777
3780static inline bool 3778static bool
3781ath9k_hw_set_power_cal_table(struct ath_hal *ah, 3779ath9k_hw_set_power_cal_table(struct ath_hal *ah,
3782 struct ar5416_eeprom *pEepData, 3780 struct ar5416_eeprom *pEepData,
3783 struct ath9k_channel *chan, 3781 struct ath9k_channel *chan,
@@ -3980,7 +3978,7 @@ void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore)
3980 } 3978 }
3981} 3979}
3982 3980
3983static inline void 3981static void
3984ath9k_hw_get_legacy_target_powers(struct ath_hal *ah, 3982ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
3985 struct ath9k_channel *chan, 3983 struct ath9k_channel *chan,
3986 struct cal_target_power_leg *powInfo, 3984 struct cal_target_power_leg *powInfo,
@@ -4046,7 +4044,7 @@ ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
4046 } 4044 }
4047} 4045}
4048 4046
4049static inline void 4047static void
4050ath9k_hw_get_target_powers(struct ath_hal *ah, 4048ath9k_hw_get_target_powers(struct ath_hal *ah,
4051 struct ath9k_channel *chan, 4049 struct ath9k_channel *chan,
4052 struct cal_target_power_ht *powInfo, 4050 struct cal_target_power_ht *powInfo,
@@ -4113,7 +4111,7 @@ ath9k_hw_get_target_powers(struct ath_hal *ah,
4113 } 4111 }
4114} 4112}
4115 4113
4116static inline u16 4114static u16
4117ath9k_hw_get_max_edge_power(u16 freq, 4115ath9k_hw_get_max_edge_power(u16 freq,
4118 struct cal_ctl_edges *pRdEdgesPower, 4116 struct cal_ctl_edges *pRdEdgesPower,
4119 bool is2GHz) 4117 bool is2GHz)
@@ -4143,7 +4141,7 @@ ath9k_hw_get_max_edge_power(u16 freq,
4143 return twiceMaxEdgePower; 4141 return twiceMaxEdgePower;
4144} 4142}
4145 4143
4146static inline bool 4144static bool
4147ath9k_hw_set_power_per_rate_table(struct ath_hal *ah, 4145ath9k_hw_set_power_per_rate_table(struct ath_hal *ah,
4148 struct ar5416_eeprom *pEepData, 4146 struct ar5416_eeprom *pEepData,
4149 struct ath9k_channel *chan, 4147 struct ath9k_channel *chan,
@@ -5122,7 +5120,7 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah,
5122 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); 5120 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
5123} 5121}
5124 5122
5125static inline void ath9k_hw_init_chain_masks(struct ath_hal *ah) 5123static void ath9k_hw_init_chain_masks(struct ath_hal *ah)
5126{ 5124{
5127 struct ath_hal_5416 *ahp = AH5416(ah); 5125 struct ath_hal_5416 *ahp = AH5416(ah);
5128 int rx_chainmask, tx_chainmask; 5126 int rx_chainmask, tx_chainmask;
@@ -5326,7 +5324,7 @@ bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us)
5326 } 5324 }
5327} 5325}
5328 5326
5329static inline void ath9k_hw_init_user_settings(struct ath_hal *ah) 5327static void ath9k_hw_init_user_settings(struct ath_hal *ah)
5330{ 5328{
5331 struct ath_hal_5416 *ahp = AH5416(ah); 5329 struct ath_hal_5416 *ahp = AH5416(ah);
5332 5330
@@ -5345,7 +5343,7 @@ static inline void ath9k_hw_init_user_settings(struct ath_hal *ah)
5345 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout); 5343 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout);
5346} 5344}
5347 5345
5348static inline int 5346static int
5349ath9k_hw_process_ini(struct ath_hal *ah, 5347ath9k_hw_process_ini(struct ath_hal *ah,
5350 struct ath9k_channel *chan, 5348 struct ath9k_channel *chan,
5351 enum ath9k_ht_macmode macmode) 5349 enum ath9k_ht_macmode macmode)
@@ -5476,7 +5474,7 @@ ath9k_hw_process_ini(struct ath_hal *ah,
5476 return 0; 5474 return 0;
5477} 5475}
5478 5476
5479static inline void ath9k_hw_setup_calibration(struct ath_hal *ah, 5477static void ath9k_hw_setup_calibration(struct ath_hal *ah,
5480 struct hal_cal_list *currCal) 5478 struct hal_cal_list *currCal)
5481{ 5479{
5482 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0), 5480 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
@@ -5512,8 +5510,8 @@ static inline void ath9k_hw_setup_calibration(struct ath_hal *ah,
5512 AR_PHY_TIMING_CTRL4_DO_CAL); 5510 AR_PHY_TIMING_CTRL4_DO_CAL);
5513} 5511}
5514 5512
5515static inline void ath9k_hw_reset_calibration(struct ath_hal *ah, 5513static void ath9k_hw_reset_calibration(struct ath_hal *ah,
5516 struct hal_cal_list *currCal) 5514 struct hal_cal_list *currCal)
5517{ 5515{
5518 struct ath_hal_5416 *ahp = AH5416(ah); 5516 struct ath_hal_5416 *ahp = AH5416(ah);
5519 int i; 5517 int i;
@@ -5532,7 +5530,7 @@ static inline void ath9k_hw_reset_calibration(struct ath_hal *ah,
5532 ahp->ah_CalSamples = 0; 5530 ahp->ah_CalSamples = 0;
5533} 5531}
5534 5532
5535static inline void 5533static void
5536ath9k_hw_per_calibration(struct ath_hal *ah, 5534ath9k_hw_per_calibration(struct ath_hal *ah,
5537 struct ath9k_channel *ichan, 5535 struct ath9k_channel *ichan,
5538 u8 rxchainmask, 5536 u8 rxchainmask,
@@ -5622,7 +5620,7 @@ static inline bool ath9k_hw_run_init_cals(struct ath_hal *ah,
5622 return true; 5620 return true;
5623} 5621}
5624 5622
5625static inline bool 5623static bool
5626ath9k_hw_channel_change(struct ath_hal *ah, 5624ath9k_hw_channel_change(struct ath_hal *ah,
5627 struct ath9k_channel *chan, 5625 struct ath9k_channel *chan,
5628 enum ath9k_ht_macmode macmode) 5626 enum ath9k_ht_macmode macmode)
@@ -5799,8 +5797,8 @@ static bool ath9k_hw_iscal_supported(struct ath_hal *ah,
5799 return retval; 5797 return retval;
5800} 5798}
5801 5799
5802static inline bool ath9k_hw_init_cal(struct ath_hal *ah, 5800static bool ath9k_hw_init_cal(struct ath_hal *ah,
5803 struct ath9k_channel *chan) 5801 struct ath9k_channel *chan)
5804{ 5802{
5805 struct ath_hal_5416 *ahp = AH5416(ah); 5803 struct ath_hal_5416 *ahp = AH5416(ah);
5806 struct ath9k_channel *ichan = 5804 struct ath9k_channel *ichan =
@@ -5861,7 +5859,7 @@ static inline bool ath9k_hw_init_cal(struct ath_hal *ah,
5861} 5859}
5862 5860
5863 5861
5864bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode, 5862bool ath9k_hw_reset(struct ath_hal *ah,
5865 struct ath9k_channel *chan, 5863 struct ath9k_channel *chan,
5866 enum ath9k_ht_macmode macmode, 5864 enum ath9k_ht_macmode macmode,
5867 u8 txchainmask, u8 rxchainmask, 5865 u8 txchainmask, u8 rxchainmask,
@@ -5945,7 +5943,7 @@ bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
5945 else 5943 else
5946 ath9k_hw_set_gpio(ah, 9, 1); 5944 ath9k_hw_set_gpio(ah, 9, 1);
5947 } 5945 }
5948 ath9k_hw_cfg_output(ah, 9, ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT); 5946 ath9k_hw_cfg_output(ah, 9, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
5949 } 5947 }
5950 5948
5951 ecode = ath9k_hw_process_ini(ah, chan, macmode); 5949 ecode = ath9k_hw_process_ini(ah, chan, macmode);
@@ -5975,7 +5973,7 @@ bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
5975 | (ah->ah_config. 5973 | (ah->ah_config.
5976 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) 5974 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
5977 | ahp->ah_staId1Defaults); 5975 | ahp->ah_staId1Defaults);
5978 ath9k_hw_set_operating_mode(ah, opmode); 5976 ath9k_hw_set_operating_mode(ah, ah->ah_opmode);
5979 5977
5980 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask)); 5978 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
5981 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4)); 5979 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
@@ -6005,13 +6003,15 @@ bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode,
6005 for (i = 0; i < ah->ah_caps.total_queues; i++) 6003 for (i = 0; i < ah->ah_caps.total_queues; i++)
6006 ath9k_hw_resettxqueue(ah, i); 6004 ath9k_hw_resettxqueue(ah, i);
6007 6005
6008 ath9k_hw_init_interrupt_masks(ah, opmode); 6006 ath9k_hw_init_interrupt_masks(ah, ah->ah_opmode);
6009 ath9k_hw_init_qos(ah); 6007 ath9k_hw_init_qos(ah);
6010 6008
6009#ifdef CONFIG_RFKILL
6010 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
6011 ath9k_enable_rfkill(ah);
6012#endif
6011 ath9k_hw_init_user_settings(ah); 6013 ath9k_hw_init_user_settings(ah);
6012 6014
6013 ah->ah_opmode = opmode;
6014
6015 REG_WRITE(ah, AR_STA_ID1, 6015 REG_WRITE(ah, AR_STA_ID1,
6016 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM); 6016 REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM);
6017 6017
@@ -6539,31 +6539,6 @@ ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask)
6539 return true; 6539 return true;
6540} 6540}
6541 6541
6542#ifdef CONFIG_ATH9K_RFKILL
6543static void ath9k_enable_rfkill(struct ath_hal *ah)
6544{
6545 struct ath_hal_5416 *ahp = AH5416(ah);
6546
6547 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
6548 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
6549
6550 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
6551 AR_GPIO_INPUT_MUX2_RFSILENT);
6552
6553 ath9k_hw_cfg_gpio_input(ah, ahp->ah_gpioSelect);
6554 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
6555
6556 if (ahp->ah_gpioBit == ath9k_hw_gpio_get(ah, ahp->ah_gpioSelect)) {
6557
6558 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6559 !ahp->ah_gpioBit);
6560 } else {
6561 ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect,
6562 ahp->ah_gpioBit);
6563 }
6564}
6565#endif
6566
6567void 6542void
6568ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, 6543ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
6569 u16 assocId) 6544 u16 assocId)
@@ -7678,8 +7653,7 @@ bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
7678 REG_WRITE(ah, AR_DRETRY_LIMIT(q), 7653 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
7679 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) 7654 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
7680 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) 7655 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
7681 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH) 7656 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
7682 );
7683 7657
7684 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 7658 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
7685 REG_WRITE(ah, AR_DMISC(q), 7659 REG_WRITE(ah, AR_DMISC(q),
@@ -8324,15 +8298,7 @@ struct ath_hal *ath9k_hw_attach(u16 devid,
8324 *error = -ENXIO; 8298 *error = -ENXIO;
8325 break; 8299 break;
8326 } 8300 }
8327 if (ah != NULL) { 8301
8328 ah->ah_devid = ah->ah_devid;
8329 ah->ah_subvendorid = ah->ah_subvendorid;
8330 ah->ah_macVersion = ah->ah_macVersion;
8331 ah->ah_macRev = ah->ah_macRev;
8332 ah->ah_phyRev = ah->ah_phyRev;
8333 ah->ah_analog5GhzRev = ah->ah_analog5GhzRev;
8334 ah->ah_analog2GhzRev = ah->ah_analog2GhzRev;
8335 }
8336 return ah; 8302 return ah;
8337} 8303}
8338 8304
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
index ae680f21ba7e..2113818ee934 100644
--- a/drivers/net/wireless/ath9k/hw.h
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -314,14 +314,11 @@ struct ar5416_desc {
314#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \ 314#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
315 MS(ads->ds_rxstatus0, AR_RxRate) : \ 315 MS(ads->ds_rxstatus0, AR_RxRate) : \
316 (ads->ds_rxstatus3 >> 2) & 0xFF) 316 (ads->ds_rxstatus3 >> 2) & 0xFF)
317#define RXSTATUS_DUPLICATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \
318 MS(ads->ds_rxstatus3, AR_Parallel40) : \
319 (ads->ds_rxstatus3 >> 10) & 0x1)
320 317
321#define set11nTries(_series, _index) \ 318#define set11nTries(_series, _index) \
322 (SM((_series)[_index].Tries, AR_XmitDataTries##_index)) 319 (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
323 320
324#define set11nRate(_series, _index) \ 321#define set11nRate(_series, _index) \
325 (SM((_series)[_index].Rate, AR_XmitRate##_index)) 322 (SM((_series)[_index].Rate, AR_XmitRate##_index))
326 323
327#define set11nPktDurRTSCTS(_series, _index) \ 324#define set11nPktDurRTSCTS(_series, _index) \
@@ -330,11 +327,11 @@ struct ar5416_desc {
330 AR_RTSCTSQual##_index : 0)) 327 AR_RTSCTSQual##_index : 0))
331 328
332#define set11nRateFlags(_series, _index) \ 329#define set11nRateFlags(_series, _index) \
333 (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \ 330 (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \
334 AR_2040_##_index : 0) \ 331 AR_2040_##_index : 0) \
335 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \ 332 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
336 AR_GI##_index : 0) \ 333 AR_GI##_index : 0) \
337 |SM((_series)[_index].ChSel, AR_ChainSel##_index)) 334 |SM((_series)[_index].ChSel, AR_ChainSel##_index))
338 335
339#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100) 336#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100)
340 337
@@ -346,9 +343,6 @@ struct ar5416_desc {
346#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1) 343#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1)
347#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD 344#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD
348 345
349#define NUM_CORNER_FIX_BITS_2133 7
350#define CCK_OFDM_GAIN_DELTA 15
351
352struct ar5416AniState { 346struct ar5416AniState {
353 struct ath9k_channel c; 347 struct ath9k_channel c;
354 u8 noiseImmunityLevel; 348 u8 noiseImmunityLevel;
@@ -377,11 +371,8 @@ struct ar5416AniState {
377}; 371};
378 372
379#define HAL_PROCESS_ANI 0x00000001 373#define HAL_PROCESS_ANI 0x00000001
380#define HAL_RADAR_EN 0x80000000
381#define HAL_AR_EN 0x40000000
382
383#define DO_ANI(ah) \ 374#define DO_ANI(ah) \
384 ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI)) 375 ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI))
385 376
386struct ar5416Stats { 377struct ar5416Stats {
387 u32 ast_ani_niup; 378 u32 ast_ani_niup;
@@ -425,7 +416,6 @@ struct ar5416Stats {
425#define AR5416_EEP_MINOR_VER_7 0x7 416#define AR5416_EEP_MINOR_VER_7 0x7
426#define AR5416_EEP_MINOR_VER_9 0x9 417#define AR5416_EEP_MINOR_VER_9 0x9
427 418
428#define AR5416_EEP_START_LOC 256
429#define AR5416_NUM_5G_CAL_PIERS 8 419#define AR5416_NUM_5G_CAL_PIERS 8
430#define AR5416_NUM_2G_CAL_PIERS 4 420#define AR5416_NUM_2G_CAL_PIERS 4
431#define AR5416_NUM_5G_20_TARGET_POWERS 8 421#define AR5416_NUM_5G_20_TARGET_POWERS 8
@@ -441,25 +431,10 @@ struct ar5416Stats {
441#define AR5416_EEPROM_MODAL_SPURS 5 431#define AR5416_EEPROM_MODAL_SPURS 5
442#define AR5416_MAX_RATE_POWER 63 432#define AR5416_MAX_RATE_POWER 63
443#define AR5416_NUM_PDADC_VALUES 128 433#define AR5416_NUM_PDADC_VALUES 128
444#define AR5416_NUM_RATES 16
445#define AR5416_BCHAN_UNUSED 0xFF 434#define AR5416_BCHAN_UNUSED 0xFF
446#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64 435#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
447#define AR5416_EEPMISC_BIG_ENDIAN 0x01
448#define AR5416_MAX_CHAINS 3 436#define AR5416_MAX_CHAINS 3
449#define AR5416_ANT_16S 25
450
451#define AR5416_NUM_ANT_CHAIN_FIELDS 7
452#define AR5416_NUM_ANT_COMMON_FIELDS 4
453#define AR5416_SIZE_ANT_CHAIN_FIELD 3
454#define AR5416_SIZE_ANT_COMMON_FIELD 4
455#define AR5416_ANT_CHAIN_MASK 0x7
456#define AR5416_ANT_COMMON_MASK 0xf
457#define AR5416_CHAIN_0_IDX 0
458#define AR5416_CHAIN_1_IDX 1
459#define AR5416_CHAIN_2_IDX 2
460
461#define AR5416_PWR_TABLE_OFFSET -5 437#define AR5416_PWR_TABLE_OFFSET -5
462#define AR5416_LEGACY_CHAINMASK 1
463 438
464enum eeprom_param { 439enum eeprom_param {
465 EEP_NFTHRESH_5, 440 EEP_NFTHRESH_5,
@@ -633,7 +608,7 @@ struct ar5416IniArray {
633}; 608};
634 609
635#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \ 610#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
636 (iniarray)->ia_array = (u32 *)(array); \ 611 (iniarray)->ia_array = (u32 *)(array); \
637 (iniarray)->ia_rows = (rows); \ 612 (iniarray)->ia_rows = (rows); \
638 (iniarray)->ia_columns = (columns); \ 613 (iniarray)->ia_columns = (columns); \
639 } while (0) 614 } while (0)
@@ -641,16 +616,16 @@ struct ar5416IniArray {
641#define INI_RA(iniarray, row, column) \ 616#define INI_RA(iniarray, row, column) \
642 (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)]) 617 (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)])
643 618
644#define INIT_CAL(_perCal) do { \ 619#define INIT_CAL(_perCal) do { \
645 (_perCal)->calState = CAL_WAITING; \ 620 (_perCal)->calState = CAL_WAITING; \
646 (_perCal)->calNext = NULL; \ 621 (_perCal)->calNext = NULL; \
647 } while (0) 622 } while (0)
648 623
649#define INSERT_CAL(_ahp, _perCal) \ 624#define INSERT_CAL(_ahp, _perCal) \
650 do { \ 625 do { \
651 if ((_ahp)->ah_cal_list_last == NULL) { \ 626 if ((_ahp)->ah_cal_list_last == NULL) { \
652 (_ahp)->ah_cal_list = \ 627 (_ahp)->ah_cal_list = \
653 (_ahp)->ah_cal_list_last = (_perCal); \ 628 (_ahp)->ah_cal_list_last = (_perCal); \
654 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \ 629 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
655 } else { \ 630 } else { \
656 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \ 631 ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \
@@ -696,25 +671,29 @@ struct hal_cal_list {
696struct ath_hal_5416 { 671struct ath_hal_5416 {
697 struct ath_hal ah; 672 struct ath_hal ah;
698 struct ar5416_eeprom ah_eeprom; 673 struct ar5416_eeprom ah_eeprom;
674 struct ar5416Stats ah_stats;
675 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
676 void __iomem *ah_cal_mem;
677
699 u8 ah_macaddr[ETH_ALEN]; 678 u8 ah_macaddr[ETH_ALEN];
700 u8 ah_bssid[ETH_ALEN]; 679 u8 ah_bssid[ETH_ALEN];
701 u8 ah_bssidmask[ETH_ALEN]; 680 u8 ah_bssidmask[ETH_ALEN];
702 u16 ah_assocId; 681 u16 ah_assocId;
682
703 int16_t ah_curchanRadIndex; 683 int16_t ah_curchanRadIndex;
704 u32 ah_maskReg; 684 u32 ah_maskReg;
705 struct ar5416Stats ah_stats;
706 u32 ah_txDescMask;
707 u32 ah_txOkInterruptMask; 685 u32 ah_txOkInterruptMask;
708 u32 ah_txErrInterruptMask; 686 u32 ah_txErrInterruptMask;
709 u32 ah_txDescInterruptMask; 687 u32 ah_txDescInterruptMask;
710 u32 ah_txEolInterruptMask; 688 u32 ah_txEolInterruptMask;
711 u32 ah_txUrnInterruptMask; 689 u32 ah_txUrnInterruptMask;
712 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
713 enum ath9k_power_mode ah_powerMode;
714 bool ah_chipFullSleep; 690 bool ah_chipFullSleep;
715 u32 ah_atimWindow; 691 u32 ah_atimWindow;
716 enum ath9k_ant_setting ah_diversityControl;
717 u16 ah_antennaSwitchSwap; 692 u16 ah_antennaSwitchSwap;
693 enum ath9k_power_mode ah_powerMode;
694 enum ath9k_ant_setting ah_diversityControl;
695
696 /* Calibration */
718 enum hal_cal_types ah_suppCals; 697 enum hal_cal_types ah_suppCals;
719 struct hal_cal_list ah_iqCalData; 698 struct hal_cal_list ah_iqCalData;
720 struct hal_cal_list ah_adcGainCalData; 699 struct hal_cal_list ah_adcGainCalData;
@@ -751,16 +730,16 @@ struct ath_hal_5416 {
751 int32_t sign[AR5416_MAX_CHAINS]; 730 int32_t sign[AR5416_MAX_CHAINS];
752 } ah_Meas3; 731 } ah_Meas3;
753 u16 ah_CalSamples; 732 u16 ah_CalSamples;
754 u32 ah_tx6PowerInHalfDbm; 733
755 u32 ah_staId1Defaults; 734 u32 ah_staId1Defaults;
756 u32 ah_miscMode; 735 u32 ah_miscMode;
757 bool ah_tpcEnabled;
758 u32 ah_beaconInterval;
759 enum { 736 enum {
760 AUTO_32KHZ, 737 AUTO_32KHZ,
761 USE_32KHZ, 738 USE_32KHZ,
762 DONT_USE_32KHZ, 739 DONT_USE_32KHZ,
763 } ah_enable32kHzClock; 740 } ah_enable32kHzClock;
741
742 /* RF */
764 u32 *ah_analogBank0Data; 743 u32 *ah_analogBank0Data;
765 u32 *ah_analogBank1Data; 744 u32 *ah_analogBank1Data;
766 u32 *ah_analogBank2Data; 745 u32 *ah_analogBank2Data;
@@ -770,8 +749,9 @@ struct ath_hal_5416 {
770 u32 *ah_analogBank7Data; 749 u32 *ah_analogBank7Data;
771 u32 *ah_addac5416_21; 750 u32 *ah_addac5416_21;
772 u32 *ah_bank6Temp; 751 u32 *ah_bank6Temp;
773 u32 ah_ofdmTxPower; 752
774 int16_t ah_txPowerIndexOffset; 753 int16_t ah_txPowerIndexOffset;
754 u32 ah_beaconInterval;
775 u32 ah_slottime; 755 u32 ah_slottime;
776 u32 ah_acktimeout; 756 u32 ah_acktimeout;
777 u32 ah_ctstimeout; 757 u32 ah_ctstimeout;
@@ -780,7 +760,8 @@ struct ath_hal_5416 {
780 u32 ah_gpioSelect; 760 u32 ah_gpioSelect;
781 u32 ah_polarity; 761 u32 ah_polarity;
782 u32 ah_gpioBit; 762 u32 ah_gpioBit;
783 bool ah_eepEnabled; 763
764 /* ANI */
784 u32 ah_procPhyErr; 765 u32 ah_procPhyErr;
785 bool ah_hasHwPhyCounters; 766 bool ah_hasHwPhyCounters;
786 u32 ah_aniPeriod; 767 u32 ah_aniPeriod;
@@ -790,18 +771,14 @@ struct ath_hal_5416 {
790 int ah_coarseHigh[5]; 771 int ah_coarseHigh[5];
791 int ah_coarseLow[5]; 772 int ah_coarseLow[5];
792 int ah_firpwr[5]; 773 int ah_firpwr[5];
793 u16 ah_ratesArray[16]; 774 enum ath9k_ani_cmd ah_ani_function;
775
794 u32 ah_intrTxqs; 776 u32 ah_intrTxqs;
795 bool ah_intrMitigation; 777 bool ah_intrMitigation;
796 u32 ah_cycleCount;
797 u32 ah_ctlBusy;
798 u32 ah_extBusy;
799 enum ath9k_ht_extprotspacing ah_extprotspacing; 778 enum ath9k_ht_extprotspacing ah_extprotspacing;
800 u8 ah_txchainmask; 779 u8 ah_txchainmask;
801 u8 ah_rxchainmask; 780 u8 ah_rxchainmask;
802 int ah_hwp; 781
803 void __iomem *ah_cal_mem;
804 enum ath9k_ani_cmd ah_ani_function;
805 struct ar5416IniArray ah_iniModes; 782 struct ar5416IniArray ah_iniModes;
806 struct ar5416IniArray ah_iniCommon; 783 struct ar5416IniArray ah_iniCommon;
807 struct ar5416IniArray ah_iniBank0; 784 struct ar5416IniArray ah_iniBank0;
@@ -820,10 +797,6 @@ struct ath_hal_5416 {
820 797
821#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) 798#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
822 799
823#define IS_5416_EMU(ah) \
824 ((ah->ah_devid == AR5416_DEVID_EMU) || \
825 (ah->ah_devid == AR5416_DEVID_EMU_PCIE))
826
827#define ar5416RfDetach(ah) do { \ 800#define ar5416RfDetach(ah) do { \
828 if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \ 801 if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \
829 AH5416(ah)->ah_rfHal.rfDetach(ah); \ 802 AH5416(ah)->ah_rfHal.rfDetach(ah); \
@@ -841,8 +814,8 @@ struct ath_hal_5416 {
841#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \ 814#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \
842 int r; \ 815 int r; \
843 for (r = 0; r < ((iniarray)->ia_rows); r++) { \ 816 for (r = 0; r < ((iniarray)->ia_rows); r++) { \
844 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \ 817 REG_WRITE(ah, INI_RA((iniarray), (r), 0), \
845 INI_RA((iniarray), r, (column))); \ 818 INI_RA((iniarray), r, (column))); \
846 DO_DELAY(regWr); \ 819 DO_DELAY(regWr); \
847 } \ 820 } \
848 } while (0) 821 } while (0)
@@ -852,30 +825,21 @@ struct ath_hal_5416 {
852#define COEF_SCALE_S 24 825#define COEF_SCALE_S 24
853#define HT40_CHANNEL_CENTER_SHIFT 10 826#define HT40_CHANNEL_CENTER_SHIFT 10
854 827
855#define ar5416CheckOpMode(_opmode) \
856 ((_opmode == ATH9K_M_STA) || (_opmode == ATH9K_M_IBSS) || \
857 (_opmode == ATH9K_M_HOSTAP) || (_opmode == ATH9K_M_MONITOR))
858
859#define AR5416_EEPROM_MAGIC_OFFSET 0x0 828#define AR5416_EEPROM_MAGIC_OFFSET 0x0
860 829
861#define AR5416_EEPROM_S 2 830#define AR5416_EEPROM_S 2
862#define AR5416_EEPROM_OFFSET 0x2000 831#define AR5416_EEPROM_OFFSET 0x2000
863#define AR5416_EEPROM_START_ADDR \ 832#define AR5416_EEPROM_START_ADDR \
864 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200 833 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
865#define AR5416_EEPROM_MAX 0xae0 834#define AR5416_EEPROM_MAX 0xae0
866#define ar5416_get_eep_ver(_ahp) \ 835#define ar5416_get_eep_ver(_ahp) \
867 (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF) 836 (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF)
868#define ar5416_get_eep_rev(_ahp) \ 837#define ar5416_get_eep_rev(_ahp) \
869 (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF) 838 (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF)
870#define ar5416_get_ntxchains(_txchainmask) \ 839#define ar5416_get_ntxchains(_txchainmask) \
871 (((_txchainmask >> 2) & 1) + \ 840 (((_txchainmask >> 2) & 1) + \
872 ((_txchainmask >> 1) & 1) + (_txchainmask & 1)) 841 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
873 842
874#define IS_EEP_MINOR_V3(_ahp) \
875 (ath9k_hw_get_eeprom((_ahp), EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_3)
876
877#define FIXED_CCA_THRESHOLD 15
878
879#ifdef __BIG_ENDIAN 843#ifdef __BIG_ENDIAN
880#define AR5416_EEPROM_MAGIC 0x5aa5 844#define AR5416_EEPROM_MAGIC 0x5aa5
881#else 845#else
@@ -910,8 +874,6 @@ struct ath_hal_5416 {
910#define AR_GPIOD_MASK 0x00001FFF 874#define AR_GPIOD_MASK 0x00001FFF
911#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) 875#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
912 876
913#define MAX_ANALOG_START 319
914
915#define HAL_EP_RND(x, mul) \ 877#define HAL_EP_RND(x, mul) \
916 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 878 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
917#define BEACON_RSSI(ahp) \ 879#define BEACON_RSSI(ahp) \
@@ -923,8 +885,6 @@ struct ath_hal_5416 {
923#define AH_TIMEOUT 100000 885#define AH_TIMEOUT 100000
924#define AH_TIME_QUANTUM 10 886#define AH_TIME_QUANTUM 10
925 887
926#define IS(_c, _f) (((_c)->channelFlags & _f) || 0)
927
928#define AR_KEYTABLE_SIZE 128 888#define AR_KEYTABLE_SIZE 128
929#define POWER_UP_TIME 200000 889#define POWER_UP_TIME 200000
930 890
@@ -964,6 +924,6 @@ struct ath_hal_5416 {
964#define OFDM_SYMBOL_TIME_QUARTER 16 924#define OFDM_SYMBOL_TIME_QUARTER 16
965 925
966u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp, 926u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
967 enum eeprom_param param); 927 enum eeprom_param param);
968 928
969#endif 929#endif
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index acebdf1d20a8..2caba4403167 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -22,8 +22,6 @@
22#define ATH_PCI_VERSION "0.1" 22#define ATH_PCI_VERSION "0.1"
23 23
24#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13 24#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
25#define IEEE80211_ACTION_CAT_HT 7
26#define IEEE80211_ACTION_HT_TXCHWIDTH 0
27 25
28static char *dev_info = "ath9k"; 26static char *dev_info = "ath9k";
29 27
@@ -142,7 +140,7 @@ static int ath_key_config(struct ath_softc *sc,
142 struct ath9k_keyval hk; 140 struct ath9k_keyval hk;
143 const u8 *mac = NULL; 141 const u8 *mac = NULL;
144 int ret = 0; 142 int ret = 0;
145 enum ieee80211_if_types opmode; 143 enum nl80211_iftype opmode;
146 144
147 memset(&hk, 0, sizeof(hk)); 145 memset(&hk, 0, sizeof(hk));
148 146
@@ -181,14 +179,14 @@ static int ath_key_config(struct ath_softc *sc,
181 */ 179 */
182 if (is_broadcast_ether_addr(addr)) { 180 if (is_broadcast_ether_addr(addr)) {
183 switch (opmode) { 181 switch (opmode) {
184 case IEEE80211_IF_TYPE_STA: 182 case NL80211_IFTYPE_STATION:
185 /* default key: could be group WPA key 183 /* default key: could be group WPA key
186 * or could be static WEP key */ 184 * or could be static WEP key */
187 mac = NULL; 185 mac = NULL;
188 break; 186 break;
189 case IEEE80211_IF_TYPE_IBSS: 187 case NL80211_IFTYPE_ADHOC:
190 break; 188 break;
191 case IEEE80211_IF_TYPE_AP: 189 case NL80211_IFTYPE_AP:
192 break; 190 break;
193 default: 191 default:
194 ASSERT(0); 192 ASSERT(0);
@@ -211,30 +209,25 @@ static int ath_key_config(struct ath_softc *sc,
211 209
212static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key) 210static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
213{ 211{
214#define ATH_MAX_NUM_KEYS 4
215 int freeslot; 212 int freeslot;
216 213
217 freeslot = (key->keyidx >= ATH_MAX_NUM_KEYS) ? 1 : 0; 214 freeslot = (key->keyidx >= 4) ? 1 : 0;
218 ath_key_reset(sc, key->keyidx, freeslot); 215 ath_key_reset(sc, key->keyidx, freeslot);
219#undef ATH_MAX_NUM_KEYS
220} 216}
221 217
222static void setup_ht_cap(struct ieee80211_ht_info *ht_info) 218static void setup_ht_cap(struct ieee80211_ht_info *ht_info)
223{ 219{
224/* Until mac80211 includes these fields */ 220#define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
225 221#define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
226#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
227#define IEEE80211_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
228#define IEEE80211_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
229 222
230 ht_info->ht_supported = 1; 223 ht_info->ht_supported = 1;
231 ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH 224 ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH
232 |(u16)IEEE80211_HT_CAP_MIMO_PS 225 |(u16)IEEE80211_HT_CAP_SM_PS
233 |(u16)IEEE80211_HT_CAP_SGI_40 226 |(u16)IEEE80211_HT_CAP_SGI_40
234 |(u16)IEEE80211_HT_CAP_DSSSCCK40; 227 |(u16)IEEE80211_HT_CAP_DSSSCCK40;
235 228
236 ht_info->ampdu_factor = IEEE80211_HT_CAP_MAXRXAMPDU_65536; 229 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
237 ht_info->ampdu_density = IEEE80211_HT_CAP_MPDUDENSITY_8; 230 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
238 /* setup supported mcs set */ 231 /* setup supported mcs set */
239 memset(ht_info->supp_mcs_set, 0, 16); 232 memset(ht_info->supp_mcs_set, 0, 16);
240 ht_info->supp_mcs_set[0] = 0xff; 233 ht_info->supp_mcs_set[0] = 0xff;
@@ -330,6 +323,693 @@ static u8 parse_mpdudensity(u8 mpdudensity)
330 } 323 }
331} 324}
332 325
326static void ath9k_ht_conf(struct ath_softc *sc,
327 struct ieee80211_bss_conf *bss_conf)
328{
329#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14)
330 struct ath_ht_info *ht_info = &sc->sc_ht_info;
331
332 if (bss_conf->assoc_ht) {
333 ht_info->ext_chan_offset =
334 bss_conf->ht_bss_conf->bss_cap &
335 IEEE80211_HT_IE_CHA_SEC_OFFSET;
336
337 if (!(bss_conf->ht_conf->cap &
338 IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
339 (bss_conf->ht_bss_conf->bss_cap &
340 IEEE80211_HT_IE_CHA_WIDTH))
341 ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040;
342 else
343 ht_info->tx_chan_width = ATH9K_HT_MACMODE_20;
344
345 ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width);
346 ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
347 bss_conf->ht_conf->ampdu_factor);
348 ht_info->mpdudensity =
349 parse_mpdudensity(bss_conf->ht_conf->ampdu_density);
350
351 }
352
353#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT
354}
355
356static void ath9k_bss_assoc_info(struct ath_softc *sc,
357 struct ieee80211_bss_conf *bss_conf)
358{
359 struct ieee80211_hw *hw = sc->hw;
360 struct ieee80211_channel *curchan = hw->conf.channel;
361 struct ath_vap *avp;
362 int pos;
363 DECLARE_MAC_BUF(mac);
364
365 if (bss_conf->assoc) {
366 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n",
367 __func__,
368 bss_conf->aid);
369
370 avp = sc->sc_vaps[0];
371 if (avp == NULL) {
372 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
373 __func__);
374 return;
375 }
376
377 /* New association, store aid */
378 if (avp->av_opmode == ATH9K_M_STA) {
379 sc->sc_curaid = bss_conf->aid;
380 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
381 sc->sc_curaid);
382 }
383
384 /* Configure the beacon */
385 ath_beacon_config(sc, 0);
386 sc->sc_flags |= SC_OP_BEACONS;
387
388 /* Reset rssi stats */
389 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
390 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
391 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
392 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
393
394 /* Update chainmask */
395 ath_update_chainmask(sc, bss_conf->assoc_ht);
396
397 DPRINTF(sc, ATH_DBG_CONFIG,
398 "%s: bssid %s aid 0x%x\n",
399 __func__,
400 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
401
402 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
403 __func__,
404 curchan->center_freq);
405
406 pos = ath_get_channel(sc, curchan);
407 if (pos == -1) {
408 DPRINTF(sc, ATH_DBG_FATAL,
409 "%s: Invalid channel\n", __func__);
410 return;
411 }
412
413 if (hw->conf.ht_conf.ht_supported)
414 sc->sc_ah->ah_channels[pos].chanmode =
415 ath_get_extchanmode(sc, curchan);
416 else
417 sc->sc_ah->ah_channels[pos].chanmode =
418 (curchan->band == IEEE80211_BAND_2GHZ) ?
419 CHANNEL_G : CHANNEL_A;
420
421 /* set h/w channel */
422 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
423 DPRINTF(sc, ATH_DBG_FATAL,
424 "%s: Unable to set channel\n",
425 __func__);
426
427 ath_rate_newstate(sc, avp);
428 /* Update ratectrl about the new state */
429 ath_rc_node_update(hw, avp->rc_node);
430 } else {
431 DPRINTF(sc, ATH_DBG_CONFIG,
432 "%s: Bss Info DISSOC\n", __func__);
433 sc->sc_curaid = 0;
434 }
435}
436
437void ath_get_beaconconfig(struct ath_softc *sc,
438 int if_id,
439 struct ath_beacon_config *conf)
440{
441 struct ieee80211_hw *hw = sc->hw;
442
443 /* fill in beacon config data */
444
445 conf->beacon_interval = hw->conf.beacon_int;
446 conf->listen_interval = 100;
447 conf->dtim_count = 1;
448 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
449}
450
451void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
452 struct ath_xmit_status *tx_status, struct ath_node *an)
453{
454 struct ieee80211_hw *hw = sc->hw;
455 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
456
457 DPRINTF(sc, ATH_DBG_XMIT,
458 "%s: TX complete: skb: %p\n", __func__, skb);
459
460 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
461 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
462 /* free driver's private data area of tx_info */
463 if (tx_info->driver_data[0] != NULL)
464 kfree(tx_info->driver_data[0]);
465 tx_info->driver_data[0] = NULL;
466 }
467
468 if (tx_status->flags & ATH_TX_BAR) {
469 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
470 tx_status->flags &= ~ATH_TX_BAR;
471 }
472
473 if (tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY)) {
474 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
475 /* Frame was not ACKed, but an ACK was expected */
476 tx_info->status.excessive_retries = 1;
477 }
478 } else {
479 /* Frame was ACKed */
480 tx_info->flags |= IEEE80211_TX_STAT_ACK;
481 }
482
483 tx_info->status.retry_count = tx_status->retries;
484
485 ieee80211_tx_status(hw, skb);
486 if (an)
487 ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
488}
489
490int _ath_rx_indicate(struct ath_softc *sc,
491 struct sk_buff *skb,
492 struct ath_recv_status *status,
493 u16 keyix)
494{
495 struct ieee80211_hw *hw = sc->hw;
496 struct ath_node *an = NULL;
497 struct ieee80211_rx_status rx_status;
498 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
499 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
500 int padsize;
501 enum ATH_RX_TYPE st;
502
503 /* see if any padding is done by the hw and remove it */
504 if (hdrlen & 3) {
505 padsize = hdrlen % 4;
506 memmove(skb->data + padsize, skb->data, hdrlen);
507 skb_pull(skb, padsize);
508 }
509
510 /* Prepare rx status */
511 ath9k_rx_prepare(sc, skb, status, &rx_status);
512
513 if (!(keyix == ATH9K_RXKEYIX_INVALID) &&
514 !(status->flags & ATH_RX_DECRYPT_ERROR)) {
515 rx_status.flag |= RX_FLAG_DECRYPTED;
516 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
517 && !(status->flags & ATH_RX_DECRYPT_ERROR)
518 && skb->len >= hdrlen + 4) {
519 keyix = skb->data[hdrlen + 3] >> 6;
520
521 if (test_bit(keyix, sc->sc_keymap))
522 rx_status.flag |= RX_FLAG_DECRYPTED;
523 }
524
525 spin_lock_bh(&sc->node_lock);
526 an = ath_node_find(sc, hdr->addr2);
527 spin_unlock_bh(&sc->node_lock);
528
529 if (an) {
530 ath_rx_input(sc, an,
531 hw->conf.ht_conf.ht_supported,
532 skb, status, &st);
533 }
534 if (!an || (st != ATH_RX_CONSUMED))
535 __ieee80211_rx(hw, skb, &rx_status);
536
537 return 0;
538}
539
540int ath_rx_subframe(struct ath_node *an,
541 struct sk_buff *skb,
542 struct ath_recv_status *status)
543{
544 struct ath_softc *sc = an->an_sc;
545 struct ieee80211_hw *hw = sc->hw;
546 struct ieee80211_rx_status rx_status;
547
548 /* Prepare rx status */
549 ath9k_rx_prepare(sc, skb, status, &rx_status);
550 if (!(status->flags & ATH_RX_DECRYPT_ERROR))
551 rx_status.flag |= RX_FLAG_DECRYPTED;
552
553 __ieee80211_rx(hw, skb, &rx_status);
554
555 return 0;
556}
557
558/********************************/
559/* LED functions */
560/********************************/
561
562static void ath_led_brightness(struct led_classdev *led_cdev,
563 enum led_brightness brightness)
564{
565 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
566 struct ath_softc *sc = led->sc;
567
568 switch (brightness) {
569 case LED_OFF:
570 if (led->led_type == ATH_LED_ASSOC ||
571 led->led_type == ATH_LED_RADIO)
572 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
573 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
574 (led->led_type == ATH_LED_RADIO) ? 1 :
575 !!(sc->sc_flags & SC_OP_LED_ASSOCIATED));
576 break;
577 case LED_FULL:
578 if (led->led_type == ATH_LED_ASSOC)
579 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
580 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
581 break;
582 default:
583 break;
584 }
585}
586
587static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
588 char *trigger)
589{
590 int ret;
591
592 led->sc = sc;
593 led->led_cdev.name = led->name;
594 led->led_cdev.default_trigger = trigger;
595 led->led_cdev.brightness_set = ath_led_brightness;
596
597 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
598 if (ret)
599 DPRINTF(sc, ATH_DBG_FATAL,
600 "Failed to register led:%s", led->name);
601 else
602 led->registered = 1;
603 return ret;
604}
605
606static void ath_unregister_led(struct ath_led *led)
607{
608 if (led->registered) {
609 led_classdev_unregister(&led->led_cdev);
610 led->registered = 0;
611 }
612}
613
614static void ath_deinit_leds(struct ath_softc *sc)
615{
616 ath_unregister_led(&sc->assoc_led);
617 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
618 ath_unregister_led(&sc->tx_led);
619 ath_unregister_led(&sc->rx_led);
620 ath_unregister_led(&sc->radio_led);
621 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
622}
623
624static void ath_init_leds(struct ath_softc *sc)
625{
626 char *trigger;
627 int ret;
628
629 /* Configure gpio 1 for output */
630 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
631 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
632 /* LED off, active low */
633 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
634
635 trigger = ieee80211_get_radio_led_name(sc->hw);
636 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
637 "ath9k-%s:radio", wiphy_name(sc->hw->wiphy));
638 ret = ath_register_led(sc, &sc->radio_led, trigger);
639 sc->radio_led.led_type = ATH_LED_RADIO;
640 if (ret)
641 goto fail;
642
643 trigger = ieee80211_get_assoc_led_name(sc->hw);
644 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
645 "ath9k-%s:assoc", wiphy_name(sc->hw->wiphy));
646 ret = ath_register_led(sc, &sc->assoc_led, trigger);
647 sc->assoc_led.led_type = ATH_LED_ASSOC;
648 if (ret)
649 goto fail;
650
651 trigger = ieee80211_get_tx_led_name(sc->hw);
652 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
653 "ath9k-%s:tx", wiphy_name(sc->hw->wiphy));
654 ret = ath_register_led(sc, &sc->tx_led, trigger);
655 sc->tx_led.led_type = ATH_LED_TX;
656 if (ret)
657 goto fail;
658
659 trigger = ieee80211_get_rx_led_name(sc->hw);
660 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
661 "ath9k-%s:rx", wiphy_name(sc->hw->wiphy));
662 ret = ath_register_led(sc, &sc->rx_led, trigger);
663 sc->rx_led.led_type = ATH_LED_RX;
664 if (ret)
665 goto fail;
666
667 return;
668
669fail:
670 ath_deinit_leds(sc);
671}
672
673#ifdef CONFIG_RFKILL
674/*******************/
675/* Rfkill */
676/*******************/
677
678static void ath_radio_enable(struct ath_softc *sc)
679{
680 struct ath_hal *ah = sc->sc_ah;
681 int status;
682
683 spin_lock_bh(&sc->sc_resetlock);
684 if (!ath9k_hw_reset(ah, ah->ah_curchan,
685 sc->sc_ht_info.tx_chan_width,
686 sc->sc_tx_chainmask,
687 sc->sc_rx_chainmask,
688 sc->sc_ht_extprotspacing,
689 false, &status)) {
690 DPRINTF(sc, ATH_DBG_FATAL,
691 "%s: unable to reset channel %u (%uMhz) "
692 "flags 0x%x hal status %u\n", __func__,
693 ath9k_hw_mhz2ieee(ah,
694 ah->ah_curchan->channel,
695 ah->ah_curchan->channelFlags),
696 ah->ah_curchan->channel,
697 ah->ah_curchan->channelFlags, status);
698 }
699 spin_unlock_bh(&sc->sc_resetlock);
700
701 ath_update_txpow(sc);
702 if (ath_startrecv(sc) != 0) {
703 DPRINTF(sc, ATH_DBG_FATAL,
704 "%s: unable to restart recv logic\n", __func__);
705 return;
706 }
707
708 if (sc->sc_flags & SC_OP_BEACONS)
709 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
710
711 /* Re-Enable interrupts */
712 ath9k_hw_set_interrupts(ah, sc->sc_imask);
713
714 /* Enable LED */
715 ath9k_hw_cfg_output(ah, ATH_LED_PIN,
716 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
717 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0);
718
719 ieee80211_wake_queues(sc->hw);
720}
721
722static void ath_radio_disable(struct ath_softc *sc)
723{
724 struct ath_hal *ah = sc->sc_ah;
725 int status;
726
727
728 ieee80211_stop_queues(sc->hw);
729
730 /* Disable LED */
731 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 1);
732 ath9k_hw_cfg_gpio_input(ah, ATH_LED_PIN);
733
734 /* Disable interrupts */
735 ath9k_hw_set_interrupts(ah, 0);
736
737 ath_draintxq(sc, false); /* clear pending tx frames */
738 ath_stoprecv(sc); /* turn off frame recv */
739 ath_flushrecv(sc); /* flush recv queue */
740
741 spin_lock_bh(&sc->sc_resetlock);
742 if (!ath9k_hw_reset(ah, ah->ah_curchan,
743 sc->sc_ht_info.tx_chan_width,
744 sc->sc_tx_chainmask,
745 sc->sc_rx_chainmask,
746 sc->sc_ht_extprotspacing,
747 false, &status)) {
748 DPRINTF(sc, ATH_DBG_FATAL,
749 "%s: unable to reset channel %u (%uMhz) "
750 "flags 0x%x hal status %u\n", __func__,
751 ath9k_hw_mhz2ieee(ah,
752 ah->ah_curchan->channel,
753 ah->ah_curchan->channelFlags),
754 ah->ah_curchan->channel,
755 ah->ah_curchan->channelFlags, status);
756 }
757 spin_unlock_bh(&sc->sc_resetlock);
758
759 ath9k_hw_phy_disable(ah);
760 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
761}
762
763static bool ath_is_rfkill_set(struct ath_softc *sc)
764{
765 struct ath_hal *ah = sc->sc_ah;
766
767 return ath9k_hw_gpio_get(ah, ah->ah_rfkill_gpio) ==
768 ah->ah_rfkill_polarity;
769}
770
771/* h/w rfkill poll function */
772static void ath_rfkill_poll(struct work_struct *work)
773{
774 struct ath_softc *sc = container_of(work, struct ath_softc,
775 rf_kill.rfkill_poll.work);
776 bool radio_on;
777
778 if (sc->sc_flags & SC_OP_INVALID)
779 return;
780
781 radio_on = !ath_is_rfkill_set(sc);
782
783 /*
784 * enable/disable radio only when there is a
785 * state change in RF switch
786 */
787 if (radio_on == !!(sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED)) {
788 enum rfkill_state state;
789
790 if (sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED) {
791 state = radio_on ? RFKILL_STATE_SOFT_BLOCKED
792 : RFKILL_STATE_HARD_BLOCKED;
793 } else if (radio_on) {
794 ath_radio_enable(sc);
795 state = RFKILL_STATE_UNBLOCKED;
796 } else {
797 ath_radio_disable(sc);
798 state = RFKILL_STATE_HARD_BLOCKED;
799 }
800
801 if (state == RFKILL_STATE_HARD_BLOCKED)
802 sc->sc_flags |= SC_OP_RFKILL_HW_BLOCKED;
803 else
804 sc->sc_flags &= ~SC_OP_RFKILL_HW_BLOCKED;
805
806 rfkill_force_state(sc->rf_kill.rfkill, state);
807 }
808
809 queue_delayed_work(sc->hw->workqueue, &sc->rf_kill.rfkill_poll,
810 msecs_to_jiffies(ATH_RFKILL_POLL_INTERVAL));
811}
812
813/* s/w rfkill handler */
814static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
815{
816 struct ath_softc *sc = data;
817
818 switch (state) {
819 case RFKILL_STATE_SOFT_BLOCKED:
820 if (!(sc->sc_flags & (SC_OP_RFKILL_HW_BLOCKED |
821 SC_OP_RFKILL_SW_BLOCKED)))
822 ath_radio_disable(sc);
823 sc->sc_flags |= SC_OP_RFKILL_SW_BLOCKED;
824 return 0;
825 case RFKILL_STATE_UNBLOCKED:
826 if ((sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED)) {
827 sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
828 if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
829 DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
830 "radio as it is disabled by h/w \n");
831 return -EPERM;
832 }
833 ath_radio_enable(sc);
834 }
835 return 0;
836 default:
837 return -EINVAL;
838 }
839}
840
841/* Init s/w rfkill */
842static int ath_init_sw_rfkill(struct ath_softc *sc)
843{
844 sc->rf_kill.rfkill = rfkill_allocate(wiphy_dev(sc->hw->wiphy),
845 RFKILL_TYPE_WLAN);
846 if (!sc->rf_kill.rfkill) {
847 DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
848 return -ENOMEM;
849 }
850
851 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
852 "ath9k-%s:rfkill", wiphy_name(sc->hw->wiphy));
853 sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
854 sc->rf_kill.rfkill->data = sc;
855 sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
856 sc->rf_kill.rfkill->state = RFKILL_STATE_UNBLOCKED;
857 sc->rf_kill.rfkill->user_claim_unsupported = 1;
858
859 return 0;
860}
861
862/* Deinitialize rfkill */
863static void ath_deinit_rfkill(struct ath_softc *sc)
864{
865 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
866 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
867
868 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
869 rfkill_unregister(sc->rf_kill.rfkill);
870 sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
871 sc->rf_kill.rfkill = NULL;
872 }
873}
874#endif /* CONFIG_RFKILL */
875
876static int ath_detach(struct ath_softc *sc)
877{
878 struct ieee80211_hw *hw = sc->hw;
879
880 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__);
881
882 /* Deinit LED control */
883 ath_deinit_leds(sc);
884
885#ifdef CONFIG_RFKILL
886 /* deinit rfkill */
887 ath_deinit_rfkill(sc);
888#endif
889
890 /* Unregister hw */
891
892 ieee80211_unregister_hw(hw);
893
894 /* unregister Rate control */
895 ath_rate_control_unregister();
896
897 /* tx/rx cleanup */
898
899 ath_rx_cleanup(sc);
900 ath_tx_cleanup(sc);
901
902 /* Deinit */
903
904 ath_deinit(sc);
905
906 return 0;
907}
908
909static int ath_attach(u16 devid,
910 struct ath_softc *sc)
911{
912 struct ieee80211_hw *hw = sc->hw;
913 int error = 0;
914
915 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__);
916
917 error = ath_init(devid, sc);
918 if (error != 0)
919 return error;
920
921 /* Init nodes */
922
923 INIT_LIST_HEAD(&sc->node_list);
924 spin_lock_init(&sc->node_lock);
925
926 /* get mac address from hardware and set in mac80211 */
927
928 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr);
929
930 /* setup channels and rates */
931
932 sc->sbands[IEEE80211_BAND_2GHZ].channels =
933 sc->channels[IEEE80211_BAND_2GHZ];
934 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
935 sc->rates[IEEE80211_BAND_2GHZ];
936 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
937
938 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
939 /* Setup HT capabilities for 2.4Ghz*/
940 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info);
941
942 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
943 &sc->sbands[IEEE80211_BAND_2GHZ];
944
945 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
946 sc->sbands[IEEE80211_BAND_5GHZ].channels =
947 sc->channels[IEEE80211_BAND_5GHZ];
948 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
949 sc->rates[IEEE80211_BAND_5GHZ];
950 sc->sbands[IEEE80211_BAND_5GHZ].band =
951 IEEE80211_BAND_5GHZ;
952
953 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
954 /* Setup HT capabilities for 5Ghz*/
955 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info);
956
957 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
958 &sc->sbands[IEEE80211_BAND_5GHZ];
959 }
960
961 /* FIXME: Have to figure out proper hw init values later */
962
963 hw->queues = 4;
964 hw->ampdu_queues = 1;
965
966 /* Register rate control */
967 hw->rate_control_algorithm = "ath9k_rate_control";
968 error = ath_rate_control_register();
969 if (error != 0) {
970 DPRINTF(sc, ATH_DBG_FATAL,
971 "%s: Unable to register rate control "
972 "algorithm:%d\n", __func__, error);
973 ath_rate_control_unregister();
974 goto bad;
975 }
976
977 error = ieee80211_register_hw(hw);
978 if (error != 0) {
979 ath_rate_control_unregister();
980 goto bad;
981 }
982
983 /* Initialize LED control */
984 ath_init_leds(sc);
985
986#ifdef CONFIG_RFKILL
987 /* Initialze h/w Rfkill */
988 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
989 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
990
991 /* Initialize s/w rfkill */
992 if (ath_init_sw_rfkill(sc))
993 goto detach;
994#endif
995
996 /* initialize tx/rx engine */
997
998 error = ath_tx_init(sc, ATH_TXBUF);
999 if (error != 0)
1000 goto detach;
1001
1002 error = ath_rx_init(sc, ATH_RXBUF);
1003 if (error != 0)
1004 goto detach;
1005
1006 return 0;
1007detach:
1008 ath_detach(sc);
1009bad:
1010 return error;
1011}
1012
333static int ath9k_start(struct ieee80211_hw *hw) 1013static int ath9k_start(struct ieee80211_hw *hw)
334{ 1014{
335 struct ath_softc *sc = hw->priv; 1015 struct ath_softc *sc = hw->priv;
@@ -358,6 +1038,33 @@ static int ath9k_start(struct ieee80211_hw *hw)
358 return error; 1038 return error;
359 } 1039 }
360 1040
1041#ifdef CONFIG_RFKILL
1042 /* Start rfkill polling */
1043 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1044 queue_delayed_work(sc->hw->workqueue,
1045 &sc->rf_kill.rfkill_poll, 0);
1046
1047 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
1048 if (rfkill_register(sc->rf_kill.rfkill)) {
1049 DPRINTF(sc, ATH_DBG_FATAL,
1050 "Unable to register rfkill\n");
1051 rfkill_free(sc->rf_kill.rfkill);
1052
1053 /* Deinitialize the device */
1054 if (sc->pdev->irq)
1055 free_irq(sc->pdev->irq, sc);
1056 ath_detach(sc);
1057 pci_iounmap(sc->pdev, sc->mem);
1058 pci_release_region(sc->pdev, 0);
1059 pci_disable_device(sc->pdev);
1060 ieee80211_free_hw(hw);
1061 return -EIO;
1062 } else {
1063 sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
1064 }
1065 }
1066#endif
1067
361 ieee80211_wake_queues(hw); 1068 ieee80211_wake_queues(hw);
362 return 0; 1069 return 0;
363} 1070}
@@ -419,6 +1126,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
419 "%s: Device is no longer present\n", __func__); 1126 "%s: Device is no longer present\n", __func__);
420 1127
421 ieee80211_stop_queues(hw); 1128 ieee80211_stop_queues(hw);
1129
1130#ifdef CONFIG_RFKILL
1131 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1132 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1133#endif
422} 1134}
423 1135
424static int ath9k_add_interface(struct ieee80211_hw *hw, 1136static int ath9k_add_interface(struct ieee80211_hw *hw,
@@ -433,16 +1145,19 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
433 return -ENOBUFS; 1145 return -ENOBUFS;
434 1146
435 switch (conf->type) { 1147 switch (conf->type) {
436 case IEEE80211_IF_TYPE_STA: 1148 case NL80211_IFTYPE_STATION:
437 ic_opmode = ATH9K_M_STA; 1149 ic_opmode = ATH9K_M_STA;
438 break; 1150 break;
439 case IEEE80211_IF_TYPE_IBSS: 1151 case NL80211_IFTYPE_ADHOC:
440 ic_opmode = ATH9K_M_IBSS; 1152 ic_opmode = ATH9K_M_IBSS;
441 break; 1153 break;
1154 case NL80211_IFTYPE_AP:
1155 ic_opmode = ATH9K_M_HOSTAP;
1156 break;
442 default: 1157 default:
443 DPRINTF(sc, ATH_DBG_FATAL, 1158 DPRINTF(sc, ATH_DBG_FATAL,
444 "%s: Only STA and IBSS are supported currently\n", 1159 "%s: Interface type %d not yet supported\n",
445 __func__); 1160 __func__, conf->type);
446 return -EOPNOTSUPP; 1161 return -EOPNOTSUPP;
447 } 1162 }
448 1163
@@ -485,7 +1200,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
485 ath_rate_newstate(sc, avp); 1200 ath_rate_newstate(sc, avp);
486 1201
487 /* Reclaim beacon resources */ 1202 /* Reclaim beacon resources */
488 if (sc->sc_opmode == ATH9K_M_HOSTAP || sc->sc_opmode == ATH9K_M_IBSS) { 1203 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP ||
1204 sc->sc_ah->ah_opmode == ATH9K_M_IBSS) {
489 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 1205 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
490 ath_beacon_return(sc, avp); 1206 ath_beacon_return(sc, avp);
491 } 1207 }
@@ -493,7 +1209,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
493 /* Set interrupt mask */ 1209 /* Set interrupt mask */
494 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 1210 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
495 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL); 1211 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL);
496 sc->sc_beacons = 0; 1212 sc->sc_flags &= ~SC_OP_BEACONS;
497 1213
498 error = ath_vap_detach(sc, 0); 1214 error = ath_vap_detach(sc, 0);
499 if (error) 1215 if (error)
@@ -542,6 +1258,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
542 struct ieee80211_if_conf *conf) 1258 struct ieee80211_if_conf *conf)
543{ 1259{
544 struct ath_softc *sc = hw->priv; 1260 struct ath_softc *sc = hw->priv;
1261 struct ath_hal *ah = sc->sc_ah;
545 struct ath_vap *avp; 1262 struct ath_vap *avp;
546 u32 rfilt = 0; 1263 u32 rfilt = 0;
547 int error, i; 1264 int error, i;
@@ -554,18 +1271,25 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
554 return -EINVAL; 1271 return -EINVAL;
555 } 1272 }
556 1273
1274 /* TODO: Need to decide which hw opmode to use for multi-interface
1275 * cases */
1276 if (vif->type == NL80211_IFTYPE_AP &&
1277 ah->ah_opmode != ATH9K_M_HOSTAP) {
1278 ah->ah_opmode = ATH9K_M_HOSTAP;
1279 ath9k_hw_setopmode(ah);
1280 ath9k_hw_write_associd(ah, sc->sc_myaddr, 0);
1281 /* Request full reset to get hw opmode changed properly */
1282 sc->sc_flags |= SC_OP_FULL_RESET;
1283 }
1284
557 if ((conf->changed & IEEE80211_IFCC_BSSID) && 1285 if ((conf->changed & IEEE80211_IFCC_BSSID) &&
558 !is_zero_ether_addr(conf->bssid)) { 1286 !is_zero_ether_addr(conf->bssid)) {
559 switch (vif->type) { 1287 switch (vif->type) {
560 case IEEE80211_IF_TYPE_STA: 1288 case NL80211_IFTYPE_STATION:
561 case IEEE80211_IF_TYPE_IBSS: 1289 case NL80211_IFTYPE_ADHOC:
562 /* Update ratectrl about the new state */ 1290 /* Update ratectrl about the new state */
563 ath_rate_newstate(sc, avp); 1291 ath_rate_newstate(sc, avp);
564 1292
565 /* Set rx filter */
566 rfilt = ath_calcrxfilter(sc);
567 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
568
569 /* Set BSSID */ 1293 /* Set BSSID */
570 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN); 1294 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN);
571 sc->sc_curaid = 0; 1295 sc->sc_curaid = 0;
@@ -598,7 +1322,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
598 print_mac(mac, sc->sc_curbssid), sc->sc_curaid); 1322 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
599 1323
600 /* need to reconfigure the beacon */ 1324 /* need to reconfigure the beacon */
601 sc->sc_beacons = 0; 1325 sc->sc_flags &= ~SC_OP_BEACONS ;
602 1326
603 break; 1327 break;
604 default: 1328 default:
@@ -607,7 +1331,8 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
607 } 1331 }
608 1332
609 if ((conf->changed & IEEE80211_IFCC_BEACON) && 1333 if ((conf->changed & IEEE80211_IFCC_BEACON) &&
610 (vif->type == IEEE80211_IF_TYPE_IBSS)) { 1334 ((vif->type == NL80211_IFTYPE_ADHOC) ||
1335 (vif->type == NL80211_IFTYPE_AP))) {
611 /* 1336 /*
612 * Allocate and setup the beacon frame. 1337 * Allocate and setup the beacon frame.
613 * 1338 *
@@ -626,7 +1351,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
626 } 1351 }
627 1352
628 /* Check for WLAN_CAPABILITY_PRIVACY ? */ 1353 /* Check for WLAN_CAPABILITY_PRIVACY ? */
629 if ((avp->av_opmode != IEEE80211_IF_TYPE_STA)) { 1354 if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
630 for (i = 0; i < IEEE80211_WEP_NKID; i++) 1355 for (i = 0; i < IEEE80211_WEP_NKID; i++)
631 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i)) 1356 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
632 ath9k_hw_keysetmac(sc->sc_ah, 1357 ath9k_hw_keysetmac(sc->sc_ah,
@@ -635,7 +1360,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
635 } 1360 }
636 1361
637 /* Only legacy IBSS for now */ 1362 /* Only legacy IBSS for now */
638 if (vif->type == IEEE80211_IF_TYPE_IBSS) 1363 if (vif->type == NL80211_IFTYPE_ADHOC)
639 ath_update_chainmask(sc, 0); 1364 ath_update_chainmask(sc, 0);
640 1365
641 return 0; 1366 return 0;
@@ -649,8 +1374,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
649 FIF_BCN_PRBRESP_PROMISC | \ 1374 FIF_BCN_PRBRESP_PROMISC | \
650 FIF_FCSFAIL) 1375 FIF_FCSFAIL)
651 1376
652/* Accept unicast, bcast and mcast frames */ 1377/* FIXME: sc->sc_full_reset ? */
653
654static void ath9k_configure_filter(struct ieee80211_hw *hw, 1378static void ath9k_configure_filter(struct ieee80211_hw *hw,
655 unsigned int changed_flags, 1379 unsigned int changed_flags,
656 unsigned int *total_flags, 1380 unsigned int *total_flags,
@@ -658,22 +1382,28 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
658 struct dev_mc_list *mclist) 1382 struct dev_mc_list *mclist)
659{ 1383{
660 struct ath_softc *sc = hw->priv; 1384 struct ath_softc *sc = hw->priv;
1385 u32 rfilt;
661 1386
662 changed_flags &= SUPPORTED_FILTERS; 1387 changed_flags &= SUPPORTED_FILTERS;
663 *total_flags &= SUPPORTED_FILTERS; 1388 *total_flags &= SUPPORTED_FILTERS;
664 1389
1390 sc->rx_filter = *total_flags;
1391 rfilt = ath_calcrxfilter(sc);
1392 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
1393
665 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 1394 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
666 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) 1395 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
667 ath_scan_start(sc); 1396 ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0);
668 else
669 ath_scan_end(sc);
670 } 1397 }
1398
1399 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set HW RX filter: 0x%x\n",
1400 __func__, sc->rx_filter);
671} 1401}
672 1402
673static void ath9k_sta_notify(struct ieee80211_hw *hw, 1403static void ath9k_sta_notify(struct ieee80211_hw *hw,
674 struct ieee80211_vif *vif, 1404 struct ieee80211_vif *vif,
675 enum sta_notify_cmd cmd, 1405 enum sta_notify_cmd cmd,
676 const u8 *addr) 1406 struct ieee80211_sta *sta)
677{ 1407{
678 struct ath_softc *sc = hw->priv; 1408 struct ath_softc *sc = hw->priv;
679 struct ath_node *an; 1409 struct ath_node *an;
@@ -681,19 +1411,18 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
681 DECLARE_MAC_BUF(mac); 1411 DECLARE_MAC_BUF(mac);
682 1412
683 spin_lock_irqsave(&sc->node_lock, flags); 1413 spin_lock_irqsave(&sc->node_lock, flags);
684 an = ath_node_find(sc, (u8 *) addr); 1414 an = ath_node_find(sc, sta->addr);
685 spin_unlock_irqrestore(&sc->node_lock, flags); 1415 spin_unlock_irqrestore(&sc->node_lock, flags);
686 1416
687 switch (cmd) { 1417 switch (cmd) {
688 case STA_NOTIFY_ADD: 1418 case STA_NOTIFY_ADD:
689 spin_lock_irqsave(&sc->node_lock, flags); 1419 spin_lock_irqsave(&sc->node_lock, flags);
690 if (!an) { 1420 if (!an) {
691 ath_node_attach(sc, (u8 *)addr, 0); 1421 ath_node_attach(sc, sta->addr, 0);
692 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n", 1422 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n",
693 __func__, 1423 __func__, print_mac(mac, sta->addr));
694 print_mac(mac, addr));
695 } else { 1424 } else {
696 ath_node_get(sc, (u8 *)addr); 1425 ath_node_get(sc, sta->addr);
697 } 1426 }
698 spin_unlock_irqrestore(&sc->node_lock, flags); 1427 spin_unlock_irqrestore(&sc->node_lock, flags);
699 break; 1428 break;
@@ -706,7 +1435,7 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
706 ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT); 1435 ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT);
707 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n", 1436 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n",
708 __func__, 1437 __func__,
709 print_mac(mac, addr)); 1438 print_mac(mac, sta->addr));
710 } 1439 }
711 break; 1440 break;
712 default: 1441 default:
@@ -784,117 +1513,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
784 return ret; 1513 return ret;
785} 1514}
786 1515
787static void ath9k_ht_conf(struct ath_softc *sc,
788 struct ieee80211_bss_conf *bss_conf)
789{
790#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14)
791 struct ath_ht_info *ht_info = &sc->sc_ht_info;
792
793 if (bss_conf->assoc_ht) {
794 ht_info->ext_chan_offset =
795 bss_conf->ht_bss_conf->bss_cap &
796 IEEE80211_HT_IE_CHA_SEC_OFFSET;
797
798 if (!(bss_conf->ht_conf->cap &
799 IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
800 (bss_conf->ht_bss_conf->bss_cap &
801 IEEE80211_HT_IE_CHA_WIDTH))
802 ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040;
803 else
804 ht_info->tx_chan_width = ATH9K_HT_MACMODE_20;
805
806 ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width);
807 ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
808 bss_conf->ht_conf->ampdu_factor);
809 ht_info->mpdudensity =
810 parse_mpdudensity(bss_conf->ht_conf->ampdu_density);
811
812 }
813
814#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT
815}
816
817static void ath9k_bss_assoc_info(struct ath_softc *sc,
818 struct ieee80211_bss_conf *bss_conf)
819{
820 struct ieee80211_hw *hw = sc->hw;
821 struct ieee80211_channel *curchan = hw->conf.channel;
822 struct ath_vap *avp;
823 int pos;
824 DECLARE_MAC_BUF(mac);
825
826 if (bss_conf->assoc) {
827 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n",
828 __func__,
829 bss_conf->aid);
830
831 avp = sc->sc_vaps[0];
832 if (avp == NULL) {
833 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
834 __func__);
835 return;
836 }
837
838 /* New association, store aid */
839 if (avp->av_opmode == ATH9K_M_STA) {
840 sc->sc_curaid = bss_conf->aid;
841 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
842 sc->sc_curaid);
843 }
844
845 /* Configure the beacon */
846 ath_beacon_config(sc, 0);
847 sc->sc_beacons = 1;
848
849 /* Reset rssi stats */
850 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
851 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
852 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
853 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
854
855 /* Update chainmask */
856 ath_update_chainmask(sc, bss_conf->assoc_ht);
857
858 DPRINTF(sc, ATH_DBG_CONFIG,
859 "%s: bssid %s aid 0x%x\n",
860 __func__,
861 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
862
863 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
864 __func__,
865 curchan->center_freq);
866
867 pos = ath_get_channel(sc, curchan);
868 if (pos == -1) {
869 DPRINTF(sc, ATH_DBG_FATAL,
870 "%s: Invalid channel\n", __func__);
871 return;
872 }
873
874 if (hw->conf.ht_conf.ht_supported)
875 sc->sc_ah->ah_channels[pos].chanmode =
876 ath_get_extchanmode(sc, curchan);
877 else
878 sc->sc_ah->ah_channels[pos].chanmode =
879 (curchan->band == IEEE80211_BAND_2GHZ) ?
880 CHANNEL_G : CHANNEL_A;
881
882 /* set h/w channel */
883 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
884 DPRINTF(sc, ATH_DBG_FATAL,
885 "%s: Unable to set channel\n",
886 __func__);
887
888 ath_rate_newstate(sc, avp);
889 /* Update ratectrl about the new state */
890 ath_rc_node_update(hw, avp->rc_node);
891 } else {
892 DPRINTF(sc, ATH_DBG_CONFIG,
893 "%s: Bss Info DISSOC\n", __func__);
894 sc->sc_curaid = 0;
895 }
896}
897
898static void ath9k_bss_info_changed(struct ieee80211_hw *hw, 1516static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
899 struct ieee80211_vif *vif, 1517 struct ieee80211_vif *vif,
900 struct ieee80211_bss_conf *bss_conf, 1518 struct ieee80211_bss_conf *bss_conf,
@@ -907,9 +1525,9 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
907 __func__, 1525 __func__,
908 bss_conf->use_short_preamble); 1526 bss_conf->use_short_preamble);
909 if (bss_conf->use_short_preamble) 1527 if (bss_conf->use_short_preamble)
910 sc->sc_flags |= ATH_PREAMBLE_SHORT; 1528 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
911 else 1529 else
912 sc->sc_flags &= ~ATH_PREAMBLE_SHORT; 1530 sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
913 } 1531 }
914 1532
915 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 1533 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
@@ -918,9 +1536,9 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
918 bss_conf->use_cts_prot); 1536 bss_conf->use_cts_prot);
919 if (bss_conf->use_cts_prot && 1537 if (bss_conf->use_cts_prot &&
920 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 1538 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
921 sc->sc_flags |= ATH_PROTECT_ENABLE; 1539 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
922 else 1540 else
923 sc->sc_flags &= ~ATH_PROTECT_ENABLE; 1541 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
924 } 1542 }
925 1543
926 if (changed & BSS_CHANGED_HT) { 1544 if (changed & BSS_CHANGED_HT) {
@@ -959,45 +1577,44 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw)
959 1577
960static int ath9k_ampdu_action(struct ieee80211_hw *hw, 1578static int ath9k_ampdu_action(struct ieee80211_hw *hw,
961 enum ieee80211_ampdu_mlme_action action, 1579 enum ieee80211_ampdu_mlme_action action,
962 const u8 *addr, 1580 struct ieee80211_sta *sta,
963 u16 tid, 1581 u16 tid, u16 *ssn)
964 u16 *ssn)
965{ 1582{
966 struct ath_softc *sc = hw->priv; 1583 struct ath_softc *sc = hw->priv;
967 int ret = 0; 1584 int ret = 0;
968 1585
969 switch (action) { 1586 switch (action) {
970 case IEEE80211_AMPDU_RX_START: 1587 case IEEE80211_AMPDU_RX_START:
971 ret = ath_rx_aggr_start(sc, addr, tid, ssn); 1588 ret = ath_rx_aggr_start(sc, sta->addr, tid, ssn);
972 if (ret < 0) 1589 if (ret < 0)
973 DPRINTF(sc, ATH_DBG_FATAL, 1590 DPRINTF(sc, ATH_DBG_FATAL,
974 "%s: Unable to start RX aggregation\n", 1591 "%s: Unable to start RX aggregation\n",
975 __func__); 1592 __func__);
976 break; 1593 break;
977 case IEEE80211_AMPDU_RX_STOP: 1594 case IEEE80211_AMPDU_RX_STOP:
978 ret = ath_rx_aggr_stop(sc, addr, tid); 1595 ret = ath_rx_aggr_stop(sc, sta->addr, tid);
979 if (ret < 0) 1596 if (ret < 0)
980 DPRINTF(sc, ATH_DBG_FATAL, 1597 DPRINTF(sc, ATH_DBG_FATAL,
981 "%s: Unable to stop RX aggregation\n", 1598 "%s: Unable to stop RX aggregation\n",
982 __func__); 1599 __func__);
983 break; 1600 break;
984 case IEEE80211_AMPDU_TX_START: 1601 case IEEE80211_AMPDU_TX_START:
985 ret = ath_tx_aggr_start(sc, addr, tid, ssn); 1602 ret = ath_tx_aggr_start(sc, sta->addr, tid, ssn);
986 if (ret < 0) 1603 if (ret < 0)
987 DPRINTF(sc, ATH_DBG_FATAL, 1604 DPRINTF(sc, ATH_DBG_FATAL,
988 "%s: Unable to start TX aggregation\n", 1605 "%s: Unable to start TX aggregation\n",
989 __func__); 1606 __func__);
990 else 1607 else
991 ieee80211_start_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid); 1608 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
992 break; 1609 break;
993 case IEEE80211_AMPDU_TX_STOP: 1610 case IEEE80211_AMPDU_TX_STOP:
994 ret = ath_tx_aggr_stop(sc, addr, tid); 1611 ret = ath_tx_aggr_stop(sc, sta->addr, tid);
995 if (ret < 0) 1612 if (ret < 0)
996 DPRINTF(sc, ATH_DBG_FATAL, 1613 DPRINTF(sc, ATH_DBG_FATAL,
997 "%s: Unable to stop TX aggregation\n", 1614 "%s: Unable to stop TX aggregation\n",
998 __func__); 1615 __func__);
999 1616
1000 ieee80211_stop_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid); 1617 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
1001 break; 1618 break;
1002 default: 1619 default:
1003 DPRINTF(sc, ATH_DBG_FATAL, 1620 DPRINTF(sc, ATH_DBG_FATAL,
@@ -1034,260 +1651,6 @@ static struct ieee80211_ops ath9k_ops = {
1034 .ampdu_action = ath9k_ampdu_action 1651 .ampdu_action = ath9k_ampdu_action
1035}; 1652};
1036 1653
1037void ath_get_beaconconfig(struct ath_softc *sc,
1038 int if_id,
1039 struct ath_beacon_config *conf)
1040{
1041 struct ieee80211_hw *hw = sc->hw;
1042
1043 /* fill in beacon config data */
1044
1045 conf->beacon_interval = hw->conf.beacon_int;
1046 conf->listen_interval = 100;
1047 conf->dtim_count = 1;
1048 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
1049}
1050
1051int ath_update_beacon(struct ath_softc *sc,
1052 int if_id,
1053 struct ath_beacon_offset *bo,
1054 struct sk_buff *skb,
1055 int mcast)
1056{
1057 return 0;
1058}
1059
1060void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1061 struct ath_xmit_status *tx_status, struct ath_node *an)
1062{
1063 struct ieee80211_hw *hw = sc->hw;
1064 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1065
1066 DPRINTF(sc, ATH_DBG_XMIT,
1067 "%s: TX complete: skb: %p\n", __func__, skb);
1068
1069 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
1070 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1071 /* free driver's private data area of tx_info */
1072 if (tx_info->driver_data[0] != NULL)
1073 kfree(tx_info->driver_data[0]);
1074 tx_info->driver_data[0] = NULL;
1075 }
1076
1077 if (tx_status->flags & ATH_TX_BAR) {
1078 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1079 tx_status->flags &= ~ATH_TX_BAR;
1080 }
1081
1082 if (tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY)) {
1083 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
1084 /* Frame was not ACKed, but an ACK was expected */
1085 tx_info->status.excessive_retries = 1;
1086 }
1087 } else {
1088 /* Frame was ACKed */
1089 tx_info->flags |= IEEE80211_TX_STAT_ACK;
1090 }
1091
1092 tx_info->status.retry_count = tx_status->retries;
1093
1094 ieee80211_tx_status(hw, skb);
1095 if (an)
1096 ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
1097}
1098
1099int ath__rx_indicate(struct ath_softc *sc,
1100 struct sk_buff *skb,
1101 struct ath_recv_status *status,
1102 u16 keyix)
1103{
1104 struct ieee80211_hw *hw = sc->hw;
1105 struct ath_node *an = NULL;
1106 struct ieee80211_rx_status rx_status;
1107 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1108 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1109 int padsize;
1110 enum ATH_RX_TYPE st;
1111
1112 /* see if any padding is done by the hw and remove it */
1113 if (hdrlen & 3) {
1114 padsize = hdrlen % 4;
1115 memmove(skb->data + padsize, skb->data, hdrlen);
1116 skb_pull(skb, padsize);
1117 }
1118
1119 /* remove FCS before passing up to protocol stack */
1120 skb_trim(skb, (skb->len - FCS_LEN));
1121
1122 /* Prepare rx status */
1123 ath9k_rx_prepare(sc, skb, status, &rx_status);
1124
1125 if (!(keyix == ATH9K_RXKEYIX_INVALID) &&
1126 !(status->flags & ATH_RX_DECRYPT_ERROR)) {
1127 rx_status.flag |= RX_FLAG_DECRYPTED;
1128 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
1129 && !(status->flags & ATH_RX_DECRYPT_ERROR)
1130 && skb->len >= hdrlen + 4) {
1131 keyix = skb->data[hdrlen + 3] >> 6;
1132
1133 if (test_bit(keyix, sc->sc_keymap))
1134 rx_status.flag |= RX_FLAG_DECRYPTED;
1135 }
1136
1137 spin_lock_bh(&sc->node_lock);
1138 an = ath_node_find(sc, hdr->addr2);
1139 spin_unlock_bh(&sc->node_lock);
1140
1141 if (an) {
1142 ath_rx_input(sc, an,
1143 hw->conf.ht_conf.ht_supported,
1144 skb, status, &st);
1145 }
1146 if (!an || (st != ATH_RX_CONSUMED))
1147 __ieee80211_rx(hw, skb, &rx_status);
1148
1149 return 0;
1150}
1151
1152int ath_rx_subframe(struct ath_node *an,
1153 struct sk_buff *skb,
1154 struct ath_recv_status *status)
1155{
1156 struct ath_softc *sc = an->an_sc;
1157 struct ieee80211_hw *hw = sc->hw;
1158 struct ieee80211_rx_status rx_status;
1159
1160 /* Prepare rx status */
1161 ath9k_rx_prepare(sc, skb, status, &rx_status);
1162 if (!(status->flags & ATH_RX_DECRYPT_ERROR))
1163 rx_status.flag |= RX_FLAG_DECRYPTED;
1164
1165 __ieee80211_rx(hw, skb, &rx_status);
1166
1167 return 0;
1168}
1169
1170enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc)
1171{
1172 return sc->sc_ht_info.tx_chan_width;
1173}
1174
1175static int ath_detach(struct ath_softc *sc)
1176{
1177 struct ieee80211_hw *hw = sc->hw;
1178
1179 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__);
1180
1181 /* Unregister hw */
1182
1183 ieee80211_unregister_hw(hw);
1184
1185 /* unregister Rate control */
1186 ath_rate_control_unregister();
1187
1188 /* tx/rx cleanup */
1189
1190 ath_rx_cleanup(sc);
1191 ath_tx_cleanup(sc);
1192
1193 /* Deinit */
1194
1195 ath_deinit(sc);
1196
1197 return 0;
1198}
1199
1200static int ath_attach(u16 devid,
1201 struct ath_softc *sc)
1202{
1203 struct ieee80211_hw *hw = sc->hw;
1204 int error = 0;
1205
1206 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__);
1207
1208 error = ath_init(devid, sc);
1209 if (error != 0)
1210 return error;
1211
1212 /* Init nodes */
1213
1214 INIT_LIST_HEAD(&sc->node_list);
1215 spin_lock_init(&sc->node_lock);
1216
1217 /* get mac address from hardware and set in mac80211 */
1218
1219 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr);
1220
1221 /* setup channels and rates */
1222
1223 sc->sbands[IEEE80211_BAND_2GHZ].channels =
1224 sc->channels[IEEE80211_BAND_2GHZ];
1225 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1226 sc->rates[IEEE80211_BAND_2GHZ];
1227 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1228
1229 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1230 /* Setup HT capabilities for 2.4Ghz*/
1231 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info);
1232
1233 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1234 &sc->sbands[IEEE80211_BAND_2GHZ];
1235
1236 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
1237 sc->sbands[IEEE80211_BAND_5GHZ].channels =
1238 sc->channels[IEEE80211_BAND_5GHZ];
1239 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1240 sc->rates[IEEE80211_BAND_5GHZ];
1241 sc->sbands[IEEE80211_BAND_5GHZ].band =
1242 IEEE80211_BAND_5GHZ;
1243
1244 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1245 /* Setup HT capabilities for 5Ghz*/
1246 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info);
1247
1248 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1249 &sc->sbands[IEEE80211_BAND_5GHZ];
1250 }
1251
1252 /* FIXME: Have to figure out proper hw init values later */
1253
1254 hw->queues = 4;
1255 hw->ampdu_queues = 1;
1256
1257 /* Register rate control */
1258 hw->rate_control_algorithm = "ath9k_rate_control";
1259 error = ath_rate_control_register();
1260 if (error != 0) {
1261 DPRINTF(sc, ATH_DBG_FATAL,
1262 "%s: Unable to register rate control "
1263 "algorithm:%d\n", __func__, error);
1264 ath_rate_control_unregister();
1265 goto bad;
1266 }
1267
1268 error = ieee80211_register_hw(hw);
1269 if (error != 0) {
1270 ath_rate_control_unregister();
1271 goto bad;
1272 }
1273
1274 /* initialize tx/rx engine */
1275
1276 error = ath_tx_init(sc, ATH_TXBUF);
1277 if (error != 0)
1278 goto bad1;
1279
1280 error = ath_rx_init(sc, ATH_RXBUF);
1281 if (error != 0)
1282 goto bad1;
1283
1284 return 0;
1285bad1:
1286 ath_detach(sc);
1287bad:
1288 return error;
1289}
1290
1291static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1654static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1292{ 1655{
1293 void __iomem *mem; 1656 void __iomem *mem;
@@ -1361,9 +1724,16 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1361 goto bad2; 1724 goto bad2;
1362 } 1725 }
1363 1726
1364 hw->flags = IEEE80211_HW_SIGNAL_DBM | 1727 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1728 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1729 IEEE80211_HW_SIGNAL_DBM |
1365 IEEE80211_HW_NOISE_DBM; 1730 IEEE80211_HW_NOISE_DBM;
1366 1731
1732 hw->wiphy->interface_modes =
1733 BIT(NL80211_IFTYPE_AP) |
1734 BIT(NL80211_IFTYPE_STATION) |
1735 BIT(NL80211_IFTYPE_ADHOC);
1736
1367 SET_IEEE80211_DEV(hw, &pdev->dev); 1737 SET_IEEE80211_DEV(hw, &pdev->dev);
1368 pci_set_drvdata(pdev, hw); 1738 pci_set_drvdata(pdev, hw);
1369 1739
@@ -1417,7 +1787,7 @@ static void ath_pci_remove(struct pci_dev *pdev)
1417 ath9k_hw_set_interrupts(sc->sc_ah, 0); 1787 ath9k_hw_set_interrupts(sc->sc_ah, 0);
1418 /* clear the ISR */ 1788 /* clear the ISR */
1419 ath9k_hw_getisr(sc->sc_ah, &status); 1789 ath9k_hw_getisr(sc->sc_ah, &status);
1420 sc->sc_invalid = 1; 1790 sc->sc_flags |= SC_OP_INVALID;
1421 free_irq(pdev->irq, sc); 1791 free_irq(pdev->irq, sc);
1422 } 1792 }
1423 ath_detach(sc); 1793 ath_detach(sc);
@@ -1432,6 +1802,16 @@ static void ath_pci_remove(struct pci_dev *pdev)
1432 1802
1433static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) 1803static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1434{ 1804{
1805 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1806 struct ath_softc *sc = hw->priv;
1807
1808 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1809
1810#ifdef CONFIG_RFKILL
1811 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1812 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1813#endif
1814
1435 pci_save_state(pdev); 1815 pci_save_state(pdev);
1436 pci_disable_device(pdev); 1816 pci_disable_device(pdev);
1437 pci_set_power_state(pdev, 3); 1817 pci_set_power_state(pdev, 3);
@@ -1441,6 +1821,8 @@ static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1441 1821
1442static int ath_pci_resume(struct pci_dev *pdev) 1822static int ath_pci_resume(struct pci_dev *pdev)
1443{ 1823{
1824 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1825 struct ath_softc *sc = hw->priv;
1444 u32 val; 1826 u32 val;
1445 int err; 1827 int err;
1446 1828
@@ -1457,6 +1839,21 @@ static int ath_pci_resume(struct pci_dev *pdev)
1457 if ((val & 0x0000ff00) != 0) 1839 if ((val & 0x0000ff00) != 0)
1458 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 1840 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1459 1841
1842 /* Enable LED */
1843 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
1844 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1845 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1846
1847#ifdef CONFIG_RFKILL
1848 /*
1849 * check the h/w rfkill state on resume
1850 * and start the rfkill poll timer
1851 */
1852 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1853 queue_delayed_work(sc->hw->workqueue,
1854 &sc->rf_kill.rfkill_poll, 0);
1855#endif
1856
1460 return 0; 1857 return 0;
1461} 1858}
1462 1859
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
index 0cd399a5344a..14702344448b 100644
--- a/drivers/net/wireless/ath9k/phy.h
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -18,19 +18,19 @@
18#define PHY_H 18#define PHY_H
19 19
20bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah, 20bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah,
21 struct ath9k_channel 21 struct ath9k_channel
22 *chan); 22 *chan);
23bool ath9k_hw_set_channel(struct ath_hal *ah, 23bool ath9k_hw_set_channel(struct ath_hal *ah,
24 struct ath9k_channel *chan); 24 struct ath9k_channel *chan);
25void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, 25void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex,
26 u32 freqIndex, int regWrites); 26 u32 freqIndex, int regWrites);
27bool ath9k_hw_set_rf_regs(struct ath_hal *ah, 27bool ath9k_hw_set_rf_regs(struct ath_hal *ah,
28 struct ath9k_channel *chan, 28 struct ath9k_channel *chan,
29 u16 modesIndex); 29 u16 modesIndex);
30void ath9k_hw_decrease_chain_power(struct ath_hal *ah, 30void ath9k_hw_decrease_chain_power(struct ath_hal *ah,
31 struct ath9k_channel *chan); 31 struct ath9k_channel *chan);
32bool ath9k_hw_init_rf(struct ath_hal *ah, 32bool ath9k_hw_init_rf(struct ath_hal *ah,
33 int *status); 33 int *status);
34 34
35#define AR_PHY_BASE 0x9800 35#define AR_PHY_BASE 0x9800
36#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2)) 36#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 73c460ad355f..cca2fc5b0765 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -20,6 +20,7 @@
20 */ 20 */
21 21
22#include "core.h" 22#include "core.h"
23/* FIXME: remove this include! */
23#include "../net/mac80211/rate.h" 24#include "../net/mac80211/rate.h"
24 25
25static u32 tx_triglevel_max; 26static u32 tx_triglevel_max;
@@ -653,8 +654,8 @@ ath_rc_sib_init_validrates(struct ath_rate_node *ath_rc_priv,
653 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv); 654 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
654 for (i = 0; i < rate_table->rate_cnt; i++) { 655 for (i = 0; i < rate_table->rate_cnt; i++) {
655 valid = (ath_rc_priv->single_stream ? 656 valid = (ath_rc_priv->single_stream ?
656 rate_table->info[i].valid_single_stream : 657 rate_table->info[i].valid_single_stream :
657 rate_table->info[i].valid); 658 rate_table->info[i].valid);
658 if (valid == TRUE) { 659 if (valid == TRUE) {
659 u32 phy = rate_table->info[i].phy; 660 u32 phy = rate_table->info[i].phy;
660 u8 valid_rate_count = 0; 661 u8 valid_rate_count = 0;
@@ -740,14 +741,14 @@ ath_rc_sib_setvalid_htrates(struct ath_rate_node *ath_rc_priv,
740 for (j = 0; j < rate_table->rate_cnt; j++) { 741 for (j = 0; j < rate_table->rate_cnt; j++) {
741 u32 phy = rate_table->info[j].phy; 742 u32 phy = rate_table->info[j].phy;
742 u32 valid = (ath_rc_priv->single_stream ? 743 u32 valid = (ath_rc_priv->single_stream ?
743 rate_table->info[j].valid_single_stream : 744 rate_table->info[j].valid_single_stream :
744 rate_table->info[j].valid); 745 rate_table->info[j].valid);
745 746
746 if (((((struct ath_rateset *) 747 if (((((struct ath_rateset *)
747 mcs_set)->rs_rates[i] & 0x7F) != 748 mcs_set)->rs_rates[i] & 0x7F) !=
748 (rate_table->info[j].dot11rate & 0x7F)) || 749 (rate_table->info[j].dot11rate & 0x7F)) ||
749 !WLAN_RC_PHY_HT(phy) || 750 !WLAN_RC_PHY_HT(phy) ||
750 !WLAN_RC_PHY_HT_VALID(valid, capflag)) 751 !WLAN_RC_PHY_HT_VALID(valid, capflag))
751 continue; 752 continue;
752 753
753 if (!ath_rc_valid_phyrate(phy, capflag, FALSE)) 754 if (!ath_rc_valid_phyrate(phy, capflag, FALSE))
@@ -847,9 +848,9 @@ void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp)
847 /* For half and quarter rate channles use different 848 /* For half and quarter rate channles use different
848 * rate tables 849 * rate tables
849 */ 850 */
850 if (sc->sc_curchan.channelFlags & CHANNEL_HALF) 851 if (sc->sc_ah->ah_curchan->channelFlags & CHANNEL_HALF)
851 ar5416_sethalf_ratetable(asc); 852 ar5416_sethalf_ratetable(asc);
852 else if (sc->sc_curchan.channelFlags & CHANNEL_QUARTER) 853 else if (sc->sc_ah->ah_curchan->channelFlags & CHANNEL_QUARTER)
853 ar5416_setquarter_ratetable(asc); 854 ar5416_setquarter_ratetable(asc);
854 else /* full rate */ 855 else /* full rate */
855 ar5416_setfull_ratetable(asc); 856 ar5416_setfull_ratetable(asc);
@@ -866,10 +867,10 @@ void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp)
866} 867}
867 868
868static u8 ath_rc_ratefind_ht(struct ath_softc *sc, 869static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
869 struct ath_rate_node *ath_rc_priv, 870 struct ath_rate_node *ath_rc_priv,
870 const struct ath_rate_table *rate_table, 871 const struct ath_rate_table *rate_table,
871 int probe_allowed, int *is_probing, 872 int probe_allowed, int *is_probing,
872 int is_retry) 873 int is_retry)
873{ 874{
874 u32 dt, best_thruput, this_thruput, now_msec; 875 u32 dt, best_thruput, this_thruput, now_msec;
875 u8 rate, next_rate, best_rate, maxindex, minindex; 876 u8 rate, next_rate, best_rate, maxindex, minindex;
@@ -997,8 +998,8 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
997 rate = rate_ctrl->rate_table_size - 1; 998 rate = rate_ctrl->rate_table_size - 1;
998 999
999 ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) || 1000 ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) ||
1000 (rate_table->info[rate].valid_single_stream && 1001 (rate_table->info[rate].valid_single_stream &&
1001 ath_rc_priv->single_stream)); 1002 ath_rc_priv->single_stream));
1002 1003
1003 return rate; 1004 return rate;
1004} 1005}
@@ -1023,10 +1024,10 @@ static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table ,
1023} 1024}
1024 1025
1025static u8 ath_rc_rate_getidx(struct ath_softc *sc, 1026static u8 ath_rc_rate_getidx(struct ath_softc *sc,
1026 struct ath_rate_node *ath_rc_priv, 1027 struct ath_rate_node *ath_rc_priv,
1027 const struct ath_rate_table *rate_table, 1028 const struct ath_rate_table *rate_table,
1028 u8 rix, u16 stepdown, 1029 u8 rix, u16 stepdown,
1029 u16 min_rate) 1030 u16 min_rate)
1030{ 1031{
1031 u32 j; 1032 u32 j;
1032 u8 nextindex; 1033 u8 nextindex;
@@ -1066,8 +1067,8 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1066 rate_table = 1067 rate_table =
1067 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode]; 1068 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1068 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table, 1069 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table,
1069 (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0, 1070 (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0,
1070 is_probe, is_retry); 1071 is_probe, is_retry);
1071 nrix = rix; 1072 nrix = rix;
1072 1073
1073 if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) { 1074 if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) {
@@ -1099,13 +1100,13 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1099 try_num = ((i + 1) == num_rates) ? 1100 try_num = ((i + 1) == num_rates) ?
1100 num_tries - (try_per_rate * i) : try_per_rate ; 1101 num_tries - (try_per_rate * i) : try_per_rate ;
1101 min_rate = (((i + 1) == num_rates) && 1102 min_rate = (((i + 1) == num_rates) &&
1102 (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0; 1103 (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0;
1103 1104
1104 nrix = ath_rc_rate_getidx(sc, ath_rc_priv, 1105 nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
1105 rate_table, nrix, 1, min_rate); 1106 rate_table, nrix, 1, min_rate);
1106 /* All other rates in the series have RTS enabled */ 1107 /* All other rates in the series have RTS enabled */
1107 ath_rc_rate_set_series(rate_table, 1108 ath_rc_rate_set_series(rate_table,
1108 &series[i], try_num, nrix, TRUE); 1109 &series[i], try_num, nrix, TRUE);
1109 } 1110 }
1110 1111
1111 /* 1112 /*
@@ -1124,13 +1125,13 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1124 * above conditions. 1125 * above conditions.
1125 */ 1126 */
1126 if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) || 1127 if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) ||
1127 (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) || 1128 (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) ||
1128 (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) { 1129 (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) {
1129 u8 dot11rate = rate_table->info[rix].dot11rate; 1130 u8 dot11rate = rate_table->info[rix].dot11rate;
1130 u8 phy = rate_table->info[rix].phy; 1131 u8 phy = rate_table->info[rix].phy;
1131 if (i == 4 && 1132 if (i == 4 &&
1132 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) || 1133 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
1133 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) { 1134 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
1134 series[3].rix = series[2].rix; 1135 series[3].rix = series[2].rix;
1135 series[3].flags = series[2].flags; 1136 series[3].flags = series[2].flags;
1136 series[3].max_4ms_framelen = series[2].max_4ms_framelen; 1137 series[3].max_4ms_framelen = series[2].max_4ms_framelen;
@@ -1141,18 +1142,19 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1141/* 1142/*
1142 * Return the Tx rate series. 1143 * Return the Tx rate series.
1143 */ 1144 */
1144void ath_rate_findrate(struct ath_softc *sc, 1145static void ath_rate_findrate(struct ath_softc *sc,
1145 struct ath_rate_node *ath_rc_priv, 1146 struct ath_rate_node *ath_rc_priv,
1146 int num_tries, 1147 int num_tries,
1147 int num_rates, 1148 int num_rates,
1148 unsigned int rcflag, 1149 unsigned int rcflag,
1149 struct ath_rc_series series[], 1150 struct ath_rc_series series[],
1150 int *is_probe, 1151 int *is_probe,
1151 int is_retry) 1152 int is_retry)
1152{ 1153{
1153 struct ath_vap *avp = ath_rc_priv->avp; 1154 struct ath_vap *avp = ath_rc_priv->avp;
1154 1155
1155 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); 1156 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1157
1156 if (!num_rates || !num_tries) 1158 if (!num_rates || !num_tries)
1157 return; 1159 return;
1158 1160
@@ -1174,9 +1176,8 @@ void ath_rate_findrate(struct ath_softc *sc,
1174 unsigned int mcs; 1176 unsigned int mcs;
1175 u8 series_rix = 0; 1177 u8 series_rix = 0;
1176 1178
1177 series[idx].tries = 1179 series[idx].tries = IEEE80211_RATE_IDX_ENTRY(
1178 IEEE80211_RATE_IDX_ENTRY( 1180 avp->av_config.av_fixed_retryset, idx);
1179 avp->av_config.av_fixed_retryset, idx);
1180 1181
1181 mcs = IEEE80211_RATE_IDX_ENTRY( 1182 mcs = IEEE80211_RATE_IDX_ENTRY(
1182 avp->av_config.av_fixed_rateset, idx); 1183 avp->av_config.av_fixed_rateset, idx);
@@ -1228,7 +1229,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1228 u32 now_msec = jiffies_to_msecs(jiffies); 1229 u32 now_msec = jiffies_to_msecs(jiffies);
1229 int state_change = FALSE, rate, count; 1230 int state_change = FALSE, rate, count;
1230 u8 last_per; 1231 u8 last_per;
1231 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; 1232 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1232 struct ath_rate_table *rate_table = 1233 struct ath_rate_table *rate_table =
1233 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode]; 1234 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1234 1235
@@ -1272,14 +1273,14 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1272 } else { 1273 } else {
1273 /* xretries == 2 */ 1274 /* xretries == 2 */
1274 count = sizeof(nretry_to_per_lookup) / 1275 count = sizeof(nretry_to_per_lookup) /
1275 sizeof(nretry_to_per_lookup[0]); 1276 sizeof(nretry_to_per_lookup[0]);
1276 if (retries >= count) 1277 if (retries >= count)
1277 retries = count - 1; 1278 retries = count - 1;
1278 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ 1279 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1279 rate_ctrl->state[tx_rate].per = 1280 rate_ctrl->state[tx_rate].per =
1280 (u8)(rate_ctrl->state[tx_rate].per - 1281 (u8)(rate_ctrl->state[tx_rate].per -
1281 (rate_ctrl->state[tx_rate].per >> 3) + 1282 (rate_ctrl->state[tx_rate].per >> 3) +
1282 ((100) >> 3)); 1283 ((100) >> 3));
1283 } 1284 }
1284 1285
1285 /* xretries == 1 or 2 */ 1286 /* xretries == 1 or 2 */
@@ -1295,8 +1296,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1295 if (retries >= count) 1296 if (retries >= count)
1296 retries = count - 1; 1297 retries = count - 1;
1297 if (info_priv->n_bad_frames) { 1298 if (info_priv->n_bad_frames) {
1298 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ 1299 /* new_PER = 7/8*old_PER + 1/8*(currentPER)
1299 /*
1300 * Assuming that n_frames is not 0. The current PER 1300 * Assuming that n_frames is not 0. The current PER
1301 * from the retries is 100 * retries / (retries+1), 1301 * from the retries is 100 * retries / (retries+1),
1302 * since the first retries attempts failed, and the 1302 * since the first retries attempts failed, and the
@@ -1386,7 +1386,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1386 * rssi_ack values. 1386 * rssi_ack values.
1387 */ 1387 */
1388 if (tx_rate == rate_ctrl->rate_max_phy && 1388 if (tx_rate == rate_ctrl->rate_max_phy &&
1389 rate_ctrl->hw_maxretry_pktcnt < 255) { 1389 rate_ctrl->hw_maxretry_pktcnt < 255) {
1390 rate_ctrl->hw_maxretry_pktcnt++; 1390 rate_ctrl->hw_maxretry_pktcnt++;
1391 } 1391 }
1392 1392
@@ -1418,7 +1418,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1418 /* Now reduce the current 1418 /* Now reduce the current
1419 * rssi threshold. */ 1419 * rssi threshold. */
1420 if ((rssi_ackAvg < rssi_thres + 2) && 1420 if ((rssi_ackAvg < rssi_thres + 2) &&
1421 (rssi_thres > rssi_ack_vmin)) { 1421 (rssi_thres > rssi_ack_vmin)) {
1422 rate_ctrl->state[tx_rate]. 1422 rate_ctrl->state[tx_rate].
1423 rssi_thres--; 1423 rssi_thres--;
1424 } 1424 }
@@ -1436,10 +1436,10 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1436 * a while (except if we are probing). 1436 * a while (except if we are probing).
1437 */ 1437 */
1438 if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 && 1438 if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 &&
1439 rate_table->info[tx_rate].ratekbps <= 1439 rate_table->info[tx_rate].ratekbps <=
1440 rate_table->info[rate_ctrl->rate_max_phy].ratekbps) { 1440 rate_table->info[rate_ctrl->rate_max_phy].ratekbps) {
1441 ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl, 1441 ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl,
1442 (u8) tx_rate, &rate_ctrl->rate_max_phy); 1442 (u8) tx_rate, &rate_ctrl->rate_max_phy);
1443 1443
1444 /* Don't probe for a little while. */ 1444 /* Don't probe for a little while. */
1445 rate_ctrl->probe_time = now_msec; 1445 rate_ctrl->probe_time = now_msec;
@@ -1460,43 +1460,43 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1460 break; 1460 break;
1461 1461
1462 if (rate_ctrl->state[rate].rssi_thres + 1462 if (rate_ctrl->state[rate].rssi_thres +
1463 rate_table->info[rate].rssi_ack_deltamin > 1463 rate_table->info[rate].rssi_ack_deltamin >
1464 rate_ctrl->state[rate+1].rssi_thres) { 1464 rate_ctrl->state[rate+1].rssi_thres) {
1465 rate_ctrl->state[rate+1].rssi_thres = 1465 rate_ctrl->state[rate+1].rssi_thres =
1466 rate_ctrl->state[rate]. 1466 rate_ctrl->state[rate].
1467 rssi_thres + 1467 rssi_thres +
1468 rate_table->info[rate]. 1468 rate_table->info[rate].
1469 rssi_ack_deltamin; 1469 rssi_ack_deltamin;
1470 } 1470 }
1471 } 1471 }
1472 1472
1473 /* Make sure the rates below this have lower rssi thresholds. */ 1473 /* Make sure the rates below this have lower rssi thresholds. */
1474 for (rate = tx_rate - 1; rate >= 0; rate--) { 1474 for (rate = tx_rate - 1; rate >= 0; rate--) {
1475 if (rate_table->info[rate].phy != 1475 if (rate_table->info[rate].phy !=
1476 rate_table->info[tx_rate].phy) 1476 rate_table->info[tx_rate].phy)
1477 break; 1477 break;
1478 1478
1479 if (rate_ctrl->state[rate].rssi_thres + 1479 if (rate_ctrl->state[rate].rssi_thres +
1480 rate_table->info[rate].rssi_ack_deltamin > 1480 rate_table->info[rate].rssi_ack_deltamin >
1481 rate_ctrl->state[rate+1].rssi_thres) { 1481 rate_ctrl->state[rate+1].rssi_thres) {
1482 if (rate_ctrl->state[rate+1].rssi_thres < 1482 if (rate_ctrl->state[rate+1].rssi_thres <
1483 rate_table->info[rate]. 1483 rate_table->info[rate].
1484 rssi_ack_deltamin) 1484 rssi_ack_deltamin)
1485 rate_ctrl->state[rate].rssi_thres = 0; 1485 rate_ctrl->state[rate].rssi_thres = 0;
1486 else { 1486 else {
1487 rate_ctrl->state[rate].rssi_thres = 1487 rate_ctrl->state[rate].rssi_thres =
1488 rate_ctrl->state[rate+1]. 1488 rate_ctrl->state[rate+1].
1489 rssi_thres - 1489 rssi_thres -
1490 rate_table->info[rate]. 1490 rate_table->info[rate].
1491 rssi_ack_deltamin; 1491 rssi_ack_deltamin;
1492 } 1492 }
1493 1493
1494 if (rate_ctrl->state[rate].rssi_thres < 1494 if (rate_ctrl->state[rate].rssi_thres <
1495 rate_table->info[rate]. 1495 rate_table->info[rate].
1496 rssi_ack_validmin) { 1496 rssi_ack_validmin) {
1497 rate_ctrl->state[rate].rssi_thres = 1497 rate_ctrl->state[rate].rssi_thres =
1498 rate_table->info[rate]. 1498 rate_table->info[rate].
1499 rssi_ack_validmin; 1499 rssi_ack_validmin;
1500 } 1500 }
1501 } 1501 }
1502 } 1502 }
@@ -1507,11 +1507,11 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1507 if (rate_ctrl->state[tx_rate].per < last_per) { 1507 if (rate_ctrl->state[tx_rate].per < last_per) {
1508 for (rate = tx_rate - 1; rate >= 0; rate--) { 1508 for (rate = tx_rate - 1; rate >= 0; rate--) {
1509 if (rate_table->info[rate].phy != 1509 if (rate_table->info[rate].phy !=
1510 rate_table->info[tx_rate].phy) 1510 rate_table->info[tx_rate].phy)
1511 break; 1511 break;
1512 1512
1513 if (rate_ctrl->state[rate].per > 1513 if (rate_ctrl->state[rate].per >
1514 rate_ctrl->state[rate+1].per) { 1514 rate_ctrl->state[rate+1].per) {
1515 rate_ctrl->state[rate].per = 1515 rate_ctrl->state[rate].per =
1516 rate_ctrl->state[rate+1].per; 1516 rate_ctrl->state[rate+1].per;
1517 } 1517 }
@@ -1528,11 +1528,11 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1528 /* Every so often, we reduce the thresholds and 1528 /* Every so often, we reduce the thresholds and
1529 * PER (different for CCK and OFDM). */ 1529 * PER (different for CCK and OFDM). */
1530 if (now_msec - rate_ctrl->rssi_down_time >= 1530 if (now_msec - rate_ctrl->rssi_down_time >=
1531 rate_table->rssi_reduce_interval) { 1531 rate_table->rssi_reduce_interval) {
1532 1532
1533 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) { 1533 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1534 if (rate_ctrl->state[rate].rssi_thres > 1534 if (rate_ctrl->state[rate].rssi_thres >
1535 rate_table->info[rate].rssi_ack_validmin) 1535 rate_table->info[rate].rssi_ack_validmin)
1536 rate_ctrl->state[rate].rssi_thres -= 1; 1536 rate_ctrl->state[rate].rssi_thres -= 1;
1537 } 1537 }
1538 rate_ctrl->rssi_down_time = now_msec; 1538 rate_ctrl->rssi_down_time = now_msec;
@@ -1541,7 +1541,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1541 /* Every so often, we reduce the thresholds 1541 /* Every so often, we reduce the thresholds
1542 * and PER (different for CCK and OFDM). */ 1542 * and PER (different for CCK and OFDM). */
1543 if (now_msec - rate_ctrl->per_down_time >= 1543 if (now_msec - rate_ctrl->per_down_time >=
1544 rate_table->rssi_reduce_interval) { 1544 rate_table->rssi_reduce_interval) {
1545 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) { 1545 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) {
1546 rate_ctrl->state[rate].per = 1546 rate_ctrl->state[rate].per =
1547 7 * rate_ctrl->state[rate].per / 8; 1547 7 * rate_ctrl->state[rate].per / 8;
@@ -1560,7 +1560,7 @@ static void ath_rc_update(struct ath_softc *sc,
1560 struct ath_tx_info_priv *info_priv, int final_ts_idx, 1560 struct ath_tx_info_priv *info_priv, int final_ts_idx,
1561 int xretries, int long_retry) 1561 int xretries, int long_retry)
1562{ 1562{
1563 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; 1563 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1564 struct ath_rate_table *rate_table; 1564 struct ath_rate_table *rate_table;
1565 struct ath_tx_ratectrl *rate_ctrl; 1565 struct ath_tx_ratectrl *rate_ctrl;
1566 struct ath_rc_series rcs[4]; 1566 struct ath_rc_series rcs[4];
@@ -1637,7 +1637,6 @@ static void ath_rc_update(struct ath_softc *sc,
1637 xretries, long_retry); 1637 xretries, long_retry);
1638} 1638}
1639 1639
1640
1641/* 1640/*
1642 * Process a tx descriptor for a completed transmit (success or failure). 1641 * Process a tx descriptor for a completed transmit (success or failure).
1643 */ 1642 */
@@ -1651,13 +1650,13 @@ static void ath_rate_tx_complete(struct ath_softc *sc,
1651 struct ath_vap *avp; 1650 struct ath_vap *avp;
1652 1651
1653 avp = rc_priv->avp; 1652 avp = rc_priv->avp;
1654 if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) 1653 if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) ||
1655 || info_priv->tx.ts_status & ATH9K_TXERR_FILT) 1654 (info_priv->tx.ts_status & ATH9K_TXERR_FILT))
1656 return; 1655 return;
1657 1656
1658 if (info_priv->tx.ts_rssi > 0) { 1657 if (info_priv->tx.ts_rssi > 0) {
1659 ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi, 1658 ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi,
1660 info_priv->tx.ts_rssi); 1659 info_priv->tx.ts_rssi);
1661 } 1660 }
1662 1661
1663 /* 1662 /*
@@ -1682,7 +1681,6 @@ static void ath_rate_tx_complete(struct ath_softc *sc,
1682 info_priv->tx.ts_longretry); 1681 info_priv->tx.ts_longretry);
1683} 1682}
1684 1683
1685
1686/* 1684/*
1687 * Update the SIB's rate control information 1685 * Update the SIB's rate control information
1688 * 1686 *
@@ -1701,8 +1699,8 @@ static void ath_rc_sib_update(struct ath_softc *sc,
1701 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; 1699 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1702 struct ath_rateset *rateset = negotiated_rates; 1700 struct ath_rateset *rateset = negotiated_rates;
1703 u8 *ht_mcs = (u8 *)negotiated_htrates; 1701 u8 *ht_mcs = (u8 *)negotiated_htrates;
1704 struct ath_tx_ratectrl *rate_ctrl = (struct ath_tx_ratectrl *) 1702 struct ath_tx_ratectrl *rate_ctrl =
1705 (ath_rc_priv); 1703 (struct ath_tx_ratectrl *)ath_rc_priv;
1706 u8 i, j, k, hi = 0, hthi = 0; 1704 u8 i, j, k, hi = 0, hthi = 0;
1707 1705
1708 rate_table = (struct ath_rate_table *) 1706 rate_table = (struct ath_rate_table *)
@@ -1815,19 +1813,18 @@ static void ath_rc_sib_init(struct ath_rate_node *ath_rc_priv)
1815} 1813}
1816 1814
1817 1815
1818static void ath_setup_rates(struct ieee80211_local *local, struct sta_info *sta) 1816static void ath_setup_rates(struct ath_softc *sc,
1817 struct ieee80211_supported_band *sband,
1818 struct ieee80211_sta *sta,
1819 struct ath_rate_node *rc_priv)
1819 1820
1820{ 1821{
1821 struct ieee80211_supported_band *sband;
1822 struct ieee80211_hw *hw = local_to_hw(local);
1823 struct ath_softc *sc = hw->priv;
1824 struct ath_rate_node *rc_priv = sta->rate_ctrl_priv;
1825 int i, j = 0; 1822 int i, j = 0;
1826 1823
1827 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); 1824 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1828 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1825
1829 for (i = 0; i < sband->n_bitrates; i++) { 1826 for (i = 0; i < sband->n_bitrates; i++) {
1830 if (sta->supp_rates[local->hw.conf.channel->band] & BIT(i)) { 1827 if (sta->supp_rates[sband->band] & BIT(i)) {
1831 rc_priv->neg_rates.rs_rates[j] 1828 rc_priv->neg_rates.rs_rates[j]
1832 = (sband->bitrates[i].bitrate * 2) / 10; 1829 = (sband->bitrates[i].bitrate * 2) / 10;
1833 j++; 1830 j++;
@@ -1854,19 +1851,17 @@ void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv)
1854} 1851}
1855 1852
1856/* Rate Control callbacks */ 1853/* Rate Control callbacks */
1857static void ath_tx_status(void *priv, struct net_device *dev, 1854static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1855 struct ieee80211_sta *sta, void *priv_sta,
1858 struct sk_buff *skb) 1856 struct sk_buff *skb)
1859{ 1857{
1860 struct ath_softc *sc = priv; 1858 struct ath_softc *sc = priv;
1861 struct ath_tx_info_priv *tx_info_priv; 1859 struct ath_tx_info_priv *tx_info_priv;
1862 struct ath_node *an; 1860 struct ath_node *an;
1863 struct sta_info *sta;
1864 struct ieee80211_local *local;
1865 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1861 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1866 struct ieee80211_hdr *hdr; 1862 struct ieee80211_hdr *hdr;
1867 __le16 fc; 1863 __le16 fc;
1868 1864
1869 local = hw_to_local(sc->hw);
1870 hdr = (struct ieee80211_hdr *)skb->data; 1865 hdr = (struct ieee80211_hdr *)skb->data;
1871 fc = hdr->frame_control; 1866 fc = hdr->frame_control;
1872 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; 1867 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
@@ -1875,8 +1870,7 @@ static void ath_tx_status(void *priv, struct net_device *dev,
1875 an = ath_node_find(sc, hdr->addr1); 1870 an = ath_node_find(sc, hdr->addr1);
1876 spin_unlock_bh(&sc->node_lock); 1871 spin_unlock_bh(&sc->node_lock);
1877 1872
1878 sta = sta_info_get(local, hdr->addr1); 1873 if (!an || !priv_sta || !ieee80211_is_data(fc)) {
1879 if (!an || !sta || !ieee80211_is_data(fc)) {
1880 if (tx_info->driver_data[0] != NULL) { 1874 if (tx_info->driver_data[0] != NULL) {
1881 kfree(tx_info->driver_data[0]); 1875 kfree(tx_info->driver_data[0]);
1882 tx_info->driver_data[0] = NULL; 1876 tx_info->driver_data[0] = NULL;
@@ -1884,37 +1878,40 @@ static void ath_tx_status(void *priv, struct net_device *dev,
1884 return; 1878 return;
1885 } 1879 }
1886 if (tx_info->driver_data[0] != NULL) { 1880 if (tx_info->driver_data[0] != NULL) {
1887 ath_rate_tx_complete(sc, an, sta->rate_ctrl_priv, tx_info_priv); 1881 ath_rate_tx_complete(sc, an, priv_sta, tx_info_priv);
1888 kfree(tx_info->driver_data[0]); 1882 kfree(tx_info->driver_data[0]);
1889 tx_info->driver_data[0] = NULL; 1883 tx_info->driver_data[0] = NULL;
1890 } 1884 }
1891} 1885}
1892 1886
1893static void ath_tx_aggr_resp(struct ath_softc *sc, 1887static void ath_tx_aggr_resp(struct ath_softc *sc,
1894 struct sta_info *sta, 1888 struct ieee80211_supported_band *sband,
1889 struct ieee80211_sta *sta,
1895 struct ath_node *an, 1890 struct ath_node *an,
1896 u8 tidno) 1891 u8 tidno)
1897{ 1892{
1898 struct ieee80211_hw *hw = sc->hw;
1899 struct ieee80211_local *local;
1900 struct ath_atx_tid *txtid; 1893 struct ath_atx_tid *txtid;
1901 struct ieee80211_supported_band *sband;
1902 u16 buffersize = 0; 1894 u16 buffersize = 0;
1903 int state; 1895 int state;
1904 DECLARE_MAC_BUF(mac); 1896 struct sta_info *si;
1905 1897
1906 if (!sc->sc_txaggr) 1898 if (!(sc->sc_flags & SC_OP_TXAGGR))
1907 return; 1899 return;
1908 1900
1909 txtid = ATH_AN_2_TID(an, tidno); 1901 txtid = ATH_AN_2_TID(an, tidno);
1910 if (!txtid->paused) 1902 if (!txtid->paused)
1911 return; 1903 return;
1912 1904
1913 local = hw_to_local(sc->hw); 1905 /*
1914 sband = hw->wiphy->bands[hw->conf.channel->band]; 1906 * XXX: This is entirely busted, we aren't supposed to
1907 * access the sta from here because it's internal
1908 * to mac80211, and looking at the state without
1909 * locking is wrong too.
1910 */
1911 si = container_of(sta, struct sta_info, sta);
1915 buffersize = IEEE80211_MIN_AMPDU_BUF << 1912 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1916 sband->ht_info.ampdu_factor; /* FIXME */ 1913 sband->ht_info.ampdu_factor; /* FIXME */
1917 state = sta->ampdu_mlme.tid_state_tx[tidno]; 1914 state = si->ampdu_mlme.tid_state_tx[tidno];
1918 1915
1919 if (state & HT_ADDBA_RECEIVED_MSK) { 1916 if (state & HT_ADDBA_RECEIVED_MSK) {
1920 txtid->addba_exchangecomplete = 1; 1917 txtid->addba_exchangecomplete = 1;
@@ -1930,21 +1927,18 @@ static void ath_tx_aggr_resp(struct ath_softc *sc,
1930 } 1927 }
1931} 1928}
1932 1929
1933static void ath_get_rate(void *priv, struct net_device *dev, 1930static void ath_get_rate(void *priv, struct ieee80211_supported_band *sband,
1934 struct ieee80211_supported_band *sband, 1931 struct ieee80211_sta *sta, void *priv_sta,
1935 struct sk_buff *skb, 1932 struct sk_buff *skb, struct rate_selection *sel)
1936 struct rate_selection *sel)
1937{ 1933{
1938 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1934 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1939 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1935 struct ath_softc *sc = priv;
1940 struct sta_info *sta;
1941 struct ath_softc *sc = (struct ath_softc *)priv;
1942 struct ieee80211_hw *hw = sc->hw; 1936 struct ieee80211_hw *hw = sc->hw;
1943 struct ath_tx_info_priv *tx_info_priv; 1937 struct ath_tx_info_priv *tx_info_priv;
1944 struct ath_rate_node *ath_rc_priv; 1938 struct ath_rate_node *ath_rc_priv = priv_sta;
1945 struct ath_node *an; 1939 struct ath_node *an;
1946 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1940 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1947 int is_probe, chk, ret; 1941 int is_probe = FALSE, chk, ret;
1948 s8 lowest_idx; 1942 s8 lowest_idx;
1949 __le16 fc = hdr->frame_control; 1943 __le16 fc = hdr->frame_control;
1950 u8 *qc, tid; 1944 u8 *qc, tid;
@@ -1957,18 +1951,15 @@ static void ath_get_rate(void *priv, struct net_device *dev,
1957 ASSERT(tx_info->driver_data[0] != NULL); 1951 ASSERT(tx_info->driver_data[0] != NULL);
1958 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; 1952 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1959 1953
1960 sta = sta_info_get(local, hdr->addr1); 1954 lowest_idx = rate_lowest_index(sband, sta);
1961 lowest_idx = rate_lowest_index(local, sband, sta);
1962 tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10; 1955 tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10;
1963 /* lowest rate for management and multicast/broadcast frames */ 1956 /* lowest rate for management and multicast/broadcast frames */
1964 if (!ieee80211_is_data(fc) || 1957 if (!ieee80211_is_data(fc) ||
1965 is_multicast_ether_addr(hdr->addr1) || !sta) { 1958 is_multicast_ether_addr(hdr->addr1) || !sta) {
1966 sel->rate_idx = lowest_idx; 1959 sel->rate_idx = lowest_idx;
1967 return; 1960 return;
1968 } 1961 }
1969 1962
1970 ath_rc_priv = sta->rate_ctrl_priv;
1971
1972 /* Find tx rate for unicast frames */ 1963 /* Find tx rate for unicast frames */
1973 ath_rate_findrate(sc, ath_rc_priv, 1964 ath_rate_findrate(sc, ath_rc_priv,
1974 ATH_11N_TXMAXTRY, 4, 1965 ATH_11N_TXMAXTRY, 4,
@@ -1977,8 +1968,7 @@ static void ath_get_rate(void *priv, struct net_device *dev,
1977 &is_probe, 1968 &is_probe,
1978 false); 1969 false);
1979 if (is_probe) 1970 if (is_probe)
1980 sel->probe_idx = ((struct ath_tx_ratectrl *) 1971 sel->probe_idx = ath_rc_priv->tx_ratectrl.probe_rate;
1981 sta->rate_ctrl_priv)->probe_rate;
1982 1972
1983 /* Ratecontrol sometimes returns invalid rate index */ 1973 /* Ratecontrol sometimes returns invalid rate index */
1984 if (tx_info_priv->rcs[0].rix != 0xff) 1974 if (tx_info_priv->rcs[0].rix != 0xff)
@@ -2022,38 +2012,31 @@ static void ath_get_rate(void *priv, struct net_device *dev,
2022 __func__, 2012 __func__,
2023 print_mac(mac, hdr->addr1)); 2013 print_mac(mac, hdr->addr1));
2024 } else if (chk == AGGR_EXCHANGE_PROGRESS) 2014 } else if (chk == AGGR_EXCHANGE_PROGRESS)
2025 ath_tx_aggr_resp(sc, sta, an, tid); 2015 ath_tx_aggr_resp(sc, sband, sta, an, tid);
2026 } 2016 }
2027 } 2017 }
2028} 2018}
2029 2019
2030static void ath_rate_init(void *priv, void *priv_sta, 2020static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
2031 struct ieee80211_local *local, 2021 struct ieee80211_sta *sta, void *priv_sta)
2032 struct sta_info *sta)
2033{ 2022{
2034 struct ieee80211_supported_band *sband; 2023 struct ath_softc *sc = priv;
2035 struct ieee80211_hw *hw = local_to_hw(local); 2024 struct ath_rate_node *ath_rc_priv = priv_sta;
2036 struct ieee80211_conf *conf = &local->hw.conf;
2037 struct ath_softc *sc = hw->priv;
2038 int i, j = 0; 2025 int i, j = 0;
2039 2026
2040 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__); 2027 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2041 2028
2042 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2029 ath_setup_rates(sc, sband, sta, ath_rc_priv);
2043 sta->txrate_idx = rate_lowest_index(local, sband, sta); 2030 if (sc->hw->conf.flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
2044
2045 ath_setup_rates(local, sta);
2046 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
2047 for (i = 0; i < MCS_SET_SIZE; i++) { 2031 for (i = 0; i < MCS_SET_SIZE; i++) {
2048 if (conf->ht_conf.supp_mcs_set[i/8] & (1<<(i%8))) 2032 if (sc->hw->conf.ht_conf.supp_mcs_set[i/8] & (1<<(i%8)))
2049 ((struct ath_rate_node *) 2033 ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
2050 priv_sta)->neg_ht_rates.rs_rates[j++] = i;
2051 if (j == ATH_RATE_MAX) 2034 if (j == ATH_RATE_MAX)
2052 break; 2035 break;
2053 } 2036 }
2054 ((struct ath_rate_node *)priv_sta)->neg_ht_rates.rs_nrates = j; 2037 ath_rc_priv->neg_ht_rates.rs_nrates = j;
2055 } 2038 }
2056 ath_rc_node_update(hw, priv_sta); 2039 ath_rc_node_update(sc->hw, priv_sta);
2057} 2040}
2058 2041
2059static void ath_rate_clear(void *priv) 2042static void ath_rate_clear(void *priv)
@@ -2061,13 +2044,12 @@ static void ath_rate_clear(void *priv)
2061 return; 2044 return;
2062} 2045}
2063 2046
2064static void *ath_rate_alloc(struct ieee80211_local *local) 2047static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2065{ 2048{
2066 struct ieee80211_hw *hw = local_to_hw(local);
2067 struct ath_softc *sc = hw->priv; 2049 struct ath_softc *sc = hw->priv;
2068 2050
2069 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); 2051 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2070 return local->hw.priv; 2052 return hw->priv;
2071} 2053}
2072 2054
2073static void ath_rate_free(void *priv) 2055static void ath_rate_free(void *priv)
@@ -2075,24 +2057,28 @@ static void ath_rate_free(void *priv)
2075 return; 2057 return;
2076} 2058}
2077 2059
2078static void *ath_rate_alloc_sta(void *priv, gfp_t gfp) 2060static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
2079{ 2061{
2080 struct ath_softc *sc = priv; 2062 struct ath_softc *sc = priv;
2081 struct ath_vap *avp = sc->sc_vaps[0]; 2063 struct ath_vap *avp = sc->sc_vaps[0];
2082 struct ath_rate_node *rate_priv; 2064 struct ath_rate_node *rate_priv;
2083 2065
2084 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); 2066 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2067
2085 rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp); 2068 rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp);
2086 if (!rate_priv) { 2069 if (!rate_priv) {
2087 DPRINTF(sc, ATH_DBG_FATAL, "%s:Unable to allocate" 2070 DPRINTF(sc, ATH_DBG_FATAL,
2088 "private rate control structure", __func__); 2071 "%s: Unable to allocate private rc structure\n",
2072 __func__);
2089 return NULL; 2073 return NULL;
2090 } 2074 }
2091 ath_rc_sib_init(rate_priv); 2075 ath_rc_sib_init(rate_priv);
2076
2092 return rate_priv; 2077 return rate_priv;
2093} 2078}
2094 2079
2095static void ath_rate_free_sta(void *priv, void *priv_sta) 2080static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
2081 void *priv_sta)
2096{ 2082{
2097 struct ath_rate_node *rate_priv = priv_sta; 2083 struct ath_rate_node *rate_priv = priv_sta;
2098 struct ath_softc *sc = priv; 2084 struct ath_softc *sc = priv;
@@ -2111,7 +2097,7 @@ static struct rate_control_ops ath_rate_ops = {
2111 .alloc = ath_rate_alloc, 2097 .alloc = ath_rate_alloc,
2112 .free = ath_rate_free, 2098 .free = ath_rate_free,
2113 .alloc_sta = ath_rate_alloc_sta, 2099 .alloc_sta = ath_rate_alloc_sta,
2114 .free_sta = ath_rate_free_sta 2100 .free_sta = ath_rate_free_sta,
2115}; 2101};
2116 2102
2117int ath_rate_control_register(void) 2103int ath_rate_control_register(void)
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
index 71aef9c75232..b95b41508b98 100644
--- a/drivers/net/wireless/ath9k/rc.h
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -71,9 +71,6 @@ enum ieee80211_fixed_rate_mode {
71 */ 71 */
72#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8))) 72#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
73 73
74#define SHORT_PRE 1
75#define LONG_PRE 0
76
77#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS 74#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS
78#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS 75#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS
79#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI 76#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI
@@ -102,18 +99,18 @@ enum {
102 WLAN_RC_PHY_MAX 99 WLAN_RC_PHY_MAX
103}; 100};
104 101
105#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \ 102#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
106 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 103 || (_phy == WLAN_RC_PHY_HT_40_DS) \
107 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 104 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
108 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 105 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
109#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \ 106#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
110 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 107 || (_phy == WLAN_RC_PHY_HT_40_DS) \
111 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ 108 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
112 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 109 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
113#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \ 110#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
114 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ 111 || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
115 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ 112 || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
116 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) 113 || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
117 114
118#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS) 115#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
119 116
@@ -135,56 +132,59 @@ enum {
135#define WLAN_RC_SGI_FLAG (0x04) 132#define WLAN_RC_SGI_FLAG (0x04)
136#define WLAN_RC_HT_FLAG (0x08) 133#define WLAN_RC_HT_FLAG (0x08)
137 134
138/* Index into the rate table */
139#define INIT_RATE_MAX_20 23
140#define INIT_RATE_MAX_40 40
141
142#define RATE_TABLE_SIZE 64 135#define RATE_TABLE_SIZE 64
143 136
144/* XXX: Convert to kdoc */ 137/**
138 * struct ath_rate_table - Rate Control table
139 * @valid: valid for use in rate control
140 * @valid_single_stream: valid for use in rate control for
141 * single stream operation
142 * @phy: CCK/OFDM
143 * @ratekbps: rate in Kbits per second
144 * @user_ratekbps: user rate in Kbits per second
145 * @ratecode: rate that goes into HW descriptors
146 * @short_preamble: Mask for enabling short preamble in ratecode for CCK
147 * @dot11rate: value that goes into supported
148 * rates info element of MLME
149 * @ctrl_rate: Index of next lower basic rate, used for duration computation
150 * @max_4ms_framelen: maximum frame length(bytes) for tx duration
151 * @probe_interval: interval for rate control to probe for other rates
152 * @rssi_reduce_interval: interval for rate control to reduce rssi
153 * @initial_ratemax: initial ratemax value used in ath_rc_sib_update()
154 */
145struct ath_rate_table { 155struct ath_rate_table {
146 int rate_cnt; 156 int rate_cnt;
147 struct { 157 struct {
148 int valid; /* Valid for use in rate control */ 158 int valid;
149 int valid_single_stream;/* Valid for use in rate control 159 int valid_single_stream;
150 for single stream operation */ 160 u8 phy;
151 u8 phy; /* CCK/OFDM/TURBO/XR */ 161 u32 ratekbps;
152 u32 ratekbps; /* Rate in Kbits per second */ 162 u32 user_ratekbps;
153 u32 user_ratekbps; /* User rate in KBits per second */ 163 u8 ratecode;
154 u8 ratecode; /* rate that goes into 164 u8 short_preamble;
155 hw descriptors */ 165 u8 dot11rate;
156 u8 short_preamble; /* Mask for enabling short preamble 166 u8 ctrl_rate;
157 in rate code for CCK */ 167 int8_t rssi_ack_validmin;
158 u8 dot11rate; /* Value that goes into supported 168 int8_t rssi_ack_deltamin;
159 rates info element of MLME */ 169 u8 base_index;
160 u8 ctrl_rate; /* Index of next lower basic rate, 170 u8 cw40index;
161 used for duration computation */ 171 u8 sgi_index;
162 int8_t rssi_ack_validmin; /* Rate control related */ 172 u8 ht_index;
163 int8_t rssi_ack_deltamin; /* Rate control related */ 173 u32 max_4ms_framelen;
164 u8 base_index; /* base rate index */
165 u8 cw40index; /* 40cap rate index */
166 u8 sgi_index; /* shortgi rate index */
167 u8 ht_index; /* shortgi rate index */
168 u32 max_4ms_framelen; /* Maximum frame length(bytes)
169 for 4ms tx duration */
170 } info[RATE_TABLE_SIZE]; 174 } info[RATE_TABLE_SIZE];
171 u32 probe_interval; /* interval for ratectrl to 175 u32 probe_interval;
172 probe for other rates */ 176 u32 rssi_reduce_interval;
173 u32 rssi_reduce_interval; /* interval for ratectrl 177 u8 initial_ratemax;
174 to reduce RSSI */
175 u8 initial_ratemax; /* the initial ratemax value used
176 in ath_rc_sib_update() */
177}; 178};
178 179
179#define ATH_RC_PROBE_ALLOWED 0x00000001 180#define ATH_RC_PROBE_ALLOWED 0x00000001
180#define ATH_RC_MINRATE_LASTRATE 0x00000002 181#define ATH_RC_MINRATE_LASTRATE 0x00000002
181#define ATH_RC_SHORT_PREAMBLE 0x00000004
182 182
183struct ath_rc_series { 183struct ath_rc_series {
184 u8 rix; 184 u8 rix;
185 u8 tries; 185 u8 tries;
186 u8 flags; 186 u8 flags;
187 u32 max_4ms_framelen; 187 u32 max_4ms_framelen;
188}; 188};
189 189
190/* rcs_flags definition */ 190/* rcs_flags definition */
@@ -201,42 +201,56 @@ struct ath_rc_series {
201#define MAX_TX_RATE_PHY 48 201#define MAX_TX_RATE_PHY 48
202 202
203struct ath_tx_ratectrl_state { 203struct ath_tx_ratectrl_state {
204 int8_t rssi_thres; /* required rssi for this rate (dB) */ 204 int8_t rssi_thres; /* required rssi for this rate (dB) */
205 u8 per; /* recent estimate of packet error rate (%) */ 205 u8 per; /* recent estimate of packet error rate (%) */
206}; 206};
207 207
208/**
209 * struct ath_tx_ratectrl - TX Rate control Information
210 * @state: RC state
211 * @rssi_last: last ACK rssi
212 * @rssi_last_lookup: last ACK rssi used for lookup
213 * @rssi_last_prev: previous last ACK rssi
214 * @rssi_last_prev2: 2nd previous last ACK rssi
215 * @rssi_sum_cnt: count of rssi_sum for averaging
216 * @rssi_sum_rate: rate that we are averaging
217 * @rssi_sum: running sum of rssi for averaging
218 * @probe_rate: rate we are probing at
219 * @rssi_time: msec timestamp for last ack rssi
220 * @rssi_down_time: msec timestamp for last down step
221 * @probe_time: msec timestamp for last probe
222 * @hw_maxretry_pktcnt: num of packets since we got HW max retry error
223 * @max_valid_rate: maximum number of valid rate
224 * @per_down_time: msec timestamp for last PER down step
225 * @valid_phy_ratecnt: valid rate count
226 * @rate_max_phy: phy index for the max rate
227 * @probe_interval: interval for ratectrl to probe for other rates
228 */
208struct ath_tx_ratectrl { 229struct ath_tx_ratectrl {
209 struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL]; /* state */ 230 struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL];
210 int8_t rssi_last; /* last ack rssi */ 231 int8_t rssi_last;
211 int8_t rssi_last_lookup; /* last ack rssi used for lookup */ 232 int8_t rssi_last_lookup;
212 int8_t rssi_last_prev; /* previous last ack rssi */ 233 int8_t rssi_last_prev;
213 int8_t rssi_last_prev2; /* 2nd previous last ack rssi */ 234 int8_t rssi_last_prev2;
214 int32_t rssi_sum_cnt; /* count of rssi_sum for averaging */ 235 int32_t rssi_sum_cnt;
215 int32_t rssi_sum_rate; /* rate that we are averaging */ 236 int32_t rssi_sum_rate;
216 int32_t rssi_sum; /* running sum of rssi for averaging */ 237 int32_t rssi_sum;
217 u32 valid_txrate_mask; /* mask of valid rates */ 238 u8 rate_table_size;
218 u8 rate_table_size; /* rate table size */ 239 u8 probe_rate;
219 u8 rate_max; /* max rate that has recently worked */ 240 u32 rssi_time;
220 u8 probe_rate; /* rate we are probing at */ 241 u32 rssi_down_time;
221 u32 rssi_time; /* msec timestamp for last ack rssi */ 242 u32 probe_time;
222 u32 rssi_down_time; /* msec timestamp for last down step */ 243 u8 hw_maxretry_pktcnt;
223 u32 probe_time; /* msec timestamp for last probe */ 244 u8 max_valid_rate;
224 u8 hw_maxretry_pktcnt; /* num packets since we got 245 u8 valid_rate_index[MAX_TX_RATE_TBL];
225 HW max retry error */ 246 u32 per_down_time;
226 u8 max_valid_rate; /* maximum number of valid rate */
227 u8 valid_rate_index[MAX_TX_RATE_TBL]; /* valid rate index */
228 u32 per_down_time; /* msec timstamp for last
229 PER down step */
230 247
231 /* 11n state */ 248 /* 11n state */
232 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX]; /* valid rate count */ 249 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
233 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL]; 250 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL];
234 u8 rc_phy_mode; 251 u8 rc_phy_mode;
235 u8 rate_max_phy; /* Phy index for the max rate */ 252 u8 rate_max_phy;
236 u32 rate_max_lastused; /* msec timstamp of when we 253 u32 probe_interval;
237 last used rateMaxPhy */
238 u32 probe_interval; /* interval for ratectrl to probe
239 for other rates */
240}; 254};
241 255
242struct ath_rateset { 256struct ath_rateset {
@@ -248,29 +262,32 @@ struct ath_rateset {
248struct ath_rate_softc { 262struct ath_rate_softc {
249 /* phy tables that contain rate control data */ 263 /* phy tables that contain rate control data */
250 const void *hw_rate_table[ATH9K_MODE_MAX]; 264 const void *hw_rate_table[ATH9K_MODE_MAX];
251 int fixedrix; /* -1 or index of fixed rate */ 265
266 /* -1 or index of fixed rate */
267 int fixedrix;
252}; 268};
253 269
254/* per-node state */ 270/* per-node state */
255struct ath_rate_node { 271struct ath_rate_node {
256 struct ath_tx_ratectrl tx_ratectrl; /* rate control state proper */ 272 struct ath_tx_ratectrl tx_ratectrl;
257 u32 prev_data_rix; /* rate idx of last data frame */
258 273
259 /* map of rate ix -> negotiated rate set ix */ 274 /* rate idx of last data frame */
260 u8 rixmap[MAX_TX_RATE_TBL]; 275 u32 prev_data_rix;
261 276
262 /* map of ht rate ix -> negotiated rate set ix */ 277 /* ht capabilities */
263 u8 ht_rixmap[MAX_TX_RATE_TBL]; 278 u8 ht_cap;
264 279
265 u8 ht_cap; /* ht capabilities */ 280 /* When TRUE, only single stream Tx possible */
266 u8 ant_tx; /* current transmit antenna */ 281 u8 single_stream;
267 282
268 u8 single_stream; /* When TRUE, only single 283 /* Negotiated rates */
269 stream Tx possible */ 284 struct ath_rateset neg_rates;
270 struct ath_rateset neg_rates; /* Negotiated rates */ 285
271 struct ath_rateset neg_ht_rates; /* Negotiated HT rates */ 286 /* Negotiated HT rates */
272 struct ath_rate_softc *asc; /* back pointer to atheros softc */ 287 struct ath_rateset neg_ht_rates;
273 struct ath_vap *avp; /* back pointer to vap */ 288
289 struct ath_rate_softc *asc;
290 struct ath_vap *avp;
274}; 291};
275 292
276/* Driver data of ieee80211_tx_info */ 293/* Driver data of ieee80211_tx_info */
@@ -297,17 +314,10 @@ void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv);
297void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp); 314void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp);
298 315
299/* 316/*
300 * Return the tx rate series.
301 */
302void ath_rate_findrate(struct ath_softc *sc, struct ath_rate_node *ath_rc_priv,
303 int num_tries, int num_rates,
304 unsigned int rcflag, struct ath_rc_series[],
305 int *is_probe, int isretry);
306/*
307 * Return rate index for given Dot11 Rate. 317 * Return rate index for given Dot11 Rate.
308 */ 318 */
309u8 ath_rate_findrateix(struct ath_softc *sc, 319u8 ath_rate_findrateix(struct ath_softc *sc,
310 u8 dot11_rate); 320 u8 dot11_rate);
311 321
312/* Routines to register/unregister rate control algorithm */ 322/* Routines to register/unregister rate control algorithm */
313int ath_rate_control_register(void); 323int ath_rate_control_register(void);
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 20ddb7acdb94..498256309ab7 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -184,7 +184,7 @@ static int ath_ampdu_input(struct ath_softc *sc,
184 tid = qc[0] & 0xf; 184 tid = qc[0] & 0xf;
185 } 185 }
186 186
187 if (sc->sc_opmode == ATH9K_M_STA) { 187 if (sc->sc_ah->ah_opmode == ATH9K_M_STA) {
188 /* Drop the frame not belonging to me. */ 188 /* Drop the frame not belonging to me. */
189 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) { 189 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
190 dev_kfree_skb(skb); 190 dev_kfree_skb(skb);
@@ -449,17 +449,16 @@ static int ath_rx_indicate(struct ath_softc *sc,
449 int type; 449 int type;
450 450
451 /* indicate frame to the stack, which will free the old skb. */ 451 /* indicate frame to the stack, which will free the old skb. */
452 type = ath__rx_indicate(sc, skb, status, keyix); 452 type = _ath_rx_indicate(sc, skb, status, keyix);
453 453
454 /* allocate a new skb and queue it to for H/W processing */ 454 /* allocate a new skb and queue it to for H/W processing */
455 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); 455 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
456 if (nskb != NULL) { 456 if (nskb != NULL) {
457 bf->bf_mpdu = nskb; 457 bf->bf_mpdu = nskb;
458 bf->bf_buf_addr = ath_skb_map_single(sc, 458 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
459 nskb, 459 skb_end_pointer(nskb) - nskb->head,
460 PCI_DMA_FROMDEVICE, 460 PCI_DMA_FROMDEVICE);
461 /* XXX: Remove get_dma_mem_context() */ 461 bf->bf_dmacontext = bf->bf_buf_addr;
462 get_dma_mem_context(bf, bf_dmacontext));
463 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; 462 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
464 463
465 /* queue the new wbuf to H/W */ 464 /* queue the new wbuf to H/W */
@@ -505,7 +504,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
505 504
506 do { 505 do {
507 spin_lock_init(&sc->sc_rxflushlock); 506 spin_lock_init(&sc->sc_rxflushlock);
508 sc->sc_rxflush = 0; 507 sc->sc_flags &= ~SC_OP_RXFLUSH;
509 spin_lock_init(&sc->sc_rxbuflock); 508 spin_lock_init(&sc->sc_rxbuflock);
510 509
511 /* 510 /*
@@ -542,9 +541,10 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
542 } 541 }
543 542
544 bf->bf_mpdu = skb; 543 bf->bf_mpdu = skb;
545 bf->bf_buf_addr = 544 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
546 ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE, 545 skb_end_pointer(skb) - skb->head,
547 get_dma_mem_context(bf, bf_dmacontext)); 546 PCI_DMA_FROMDEVICE);
547 bf->bf_dmacontext = bf->bf_buf_addr;
548 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; 548 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
549 } 549 }
550 sc->sc_rxlink = NULL; 550 sc->sc_rxlink = NULL;
@@ -598,6 +598,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
598u32 ath_calcrxfilter(struct ath_softc *sc) 598u32 ath_calcrxfilter(struct ath_softc *sc)
599{ 599{
600#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) 600#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
601
601 u32 rfilt; 602 u32 rfilt;
602 603
603 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) 604 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
@@ -605,25 +606,29 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
605 | ATH9K_RX_FILTER_MCAST; 606 | ATH9K_RX_FILTER_MCAST;
606 607
607 /* If not a STA, enable processing of Probe Requests */ 608 /* If not a STA, enable processing of Probe Requests */
608 if (sc->sc_opmode != ATH9K_M_STA) 609 if (sc->sc_ah->ah_opmode != ATH9K_M_STA)
609 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 610 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
610 611
611 /* Can't set HOSTAP into promiscous mode */ 612 /* Can't set HOSTAP into promiscous mode */
612 if (sc->sc_opmode == ATH9K_M_MONITOR) { 613 if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) &&
614 (sc->rx_filter & FIF_PROMISC_IN_BSS)) ||
615 (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) {
613 rfilt |= ATH9K_RX_FILTER_PROM; 616 rfilt |= ATH9K_RX_FILTER_PROM;
614 /* ??? To prevent from sending ACK */ 617 /* ??? To prevent from sending ACK */
615 rfilt &= ~ATH9K_RX_FILTER_UCAST; 618 rfilt &= ~ATH9K_RX_FILTER_UCAST;
616 } 619 }
617 620
618 if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS || 621 if (((sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
619 sc->sc_scanning) 622 (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)) ||
623 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS))
620 rfilt |= ATH9K_RX_FILTER_BEACON; 624 rfilt |= ATH9K_RX_FILTER_BEACON;
621 625
622 /* If in HOSTAP mode, want to enable reception of PSPOLL frames 626 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
623 & beacon frames */ 627 & beacon frames */
624 if (sc->sc_opmode == ATH9K_M_HOSTAP) 628 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP)
625 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); 629 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
626 return rfilt; 630 return rfilt;
631
627#undef RX_FILTER_PRESERVE 632#undef RX_FILTER_PRESERVE
628} 633}
629 634
@@ -703,11 +708,11 @@ void ath_flushrecv(struct ath_softc *sc)
703 * progress (see references to sc_rxflush) 708 * progress (see references to sc_rxflush)
704 */ 709 */
705 spin_lock_bh(&sc->sc_rxflushlock); 710 spin_lock_bh(&sc->sc_rxflushlock);
706 sc->sc_rxflush = 1; 711 sc->sc_flags |= SC_OP_RXFLUSH;
707 712
708 ath_rx_tasklet(sc, 1); 713 ath_rx_tasklet(sc, 1);
709 714
710 sc->sc_rxflush = 0; 715 sc->sc_flags &= ~SC_OP_RXFLUSH;
711 spin_unlock_bh(&sc->sc_rxflushlock); 716 spin_unlock_bh(&sc->sc_rxflushlock);
712} 717}
713 718
@@ -720,7 +725,7 @@ int ath_rx_input(struct ath_softc *sc,
720 struct ath_recv_status *rx_status, 725 struct ath_recv_status *rx_status,
721 enum ATH_RX_TYPE *status) 726 enum ATH_RX_TYPE *status)
722{ 727{
723 if (is_ampdu && sc->sc_rxaggr) { 728 if (is_ampdu && (sc->sc_flags & SC_OP_RXAGGR)) {
724 *status = ATH_RX_CONSUMED; 729 *status = ATH_RX_CONSUMED;
725 return ath_ampdu_input(sc, an, skb, rx_status); 730 return ath_ampdu_input(sc, an, skb, rx_status);
726 } else { 731 } else {
@@ -751,7 +756,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
751 756
752 do { 757 do {
753 /* If handling rx interrupt and flush is in progress => exit */ 758 /* If handling rx interrupt and flush is in progress => exit */
754 if (sc->sc_rxflush && (flush == 0)) 759 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
755 break; 760 break;
756 761
757 spin_lock_bh(&sc->sc_rxbuflock); 762 spin_lock_bh(&sc->sc_rxbuflock);
@@ -901,7 +906,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
901 * Enable this if you want to see 906 * Enable this if you want to see
902 * error frames in Monitor mode. 907 * error frames in Monitor mode.
903 */ 908 */
904 if (sc->sc_opmode != ATH9K_M_MONITOR) 909 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
905 goto rx_next; 910 goto rx_next;
906#endif 911#endif
907 /* fall thru for monitor mode handling... */ 912 /* fall thru for monitor mode handling... */
@@ -946,7 +951,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
946 * decryption and MIC failures. For monitor mode, 951 * decryption and MIC failures. For monitor mode,
947 * we also ignore the CRC error. 952 * we also ignore the CRC error.
948 */ 953 */
949 if (sc->sc_opmode == ATH9K_M_MONITOR) { 954 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
950 if (ds->ds_rxstat.rs_status & 955 if (ds->ds_rxstat.rs_status &
951 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 956 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
952 ATH9K_RXERR_CRC)) 957 ATH9K_RXERR_CRC))
@@ -1090,7 +1095,7 @@ rx_next:
1090 "%s: Reset rx chain mask. " 1095 "%s: Reset rx chain mask. "
1091 "Do internal reset\n", __func__); 1096 "Do internal reset\n", __func__);
1092 ASSERT(flush == 0); 1097 ASSERT(flush == 0);
1093 ath_internal_reset(sc); 1098 ath_reset(sc, false);
1094 } 1099 }
1095 1100
1096 return 0; 1101 return 0;
@@ -1128,7 +1133,7 @@ int ath_rx_aggr_start(struct ath_softc *sc,
1128 rxtid = &an->an_aggr.rx.tid[tid]; 1133 rxtid = &an->an_aggr.rx.tid[tid];
1129 1134
1130 spin_lock_bh(&rxtid->tidlock); 1135 spin_lock_bh(&rxtid->tidlock);
1131 if (sc->sc_rxaggr) { 1136 if (sc->sc_flags & SC_OP_RXAGGR) {
1132 /* Allow aggregation reception 1137 /* Allow aggregation reception
1133 * Adjust rx BA window size. Peer might indicate a 1138 * Adjust rx BA window size. Peer might indicate a
1134 * zero buffer size for a _dont_care_ condition. 1139 * zero buffer size for a _dont_care_ condition.
@@ -1228,7 +1233,7 @@ void ath_rx_aggr_teardown(struct ath_softc *sc,
1228 1233
1229void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an) 1234void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1230{ 1235{
1231 if (sc->sc_rxaggr) { 1236 if (sc->sc_flags & SC_OP_RXAGGR) {
1232 struct ath_arx_tid *rxtid; 1237 struct ath_arx_tid *rxtid;
1233 int tidno; 1238 int tidno;
1234 1239
@@ -1260,7 +1265,7 @@ void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1260 1265
1261void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an) 1266void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1262{ 1267{
1263 if (sc->sc_rxaggr) { 1268 if (sc->sc_flags & SC_OP_RXAGGR) {
1264 struct ath_arx_tid *rxtid; 1269 struct ath_arx_tid *rxtid;
1265 int tidno, i; 1270 int tidno, i;
1266 1271
@@ -1293,27 +1298,3 @@ void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an)
1293{ 1298{
1294 ath_rx_node_cleanup(sc, an); 1299 ath_rx_node_cleanup(sc, an);
1295} 1300}
1296
1297dma_addr_t ath_skb_map_single(struct ath_softc *sc,
1298 struct sk_buff *skb,
1299 int direction,
1300 dma_addr_t *pa)
1301{
1302 /*
1303 * NB: do NOT use skb->len, which is 0 on initialization.
1304 * Use skb's entire data area instead.
1305 */
1306 *pa = pci_map_single(sc->pdev, skb->data,
1307 skb_end_pointer(skb) - skb->head, direction);
1308 return *pa;
1309}
1310
1311void ath_skb_unmap_single(struct ath_softc *sc,
1312 struct sk_buff *skb,
1313 int direction,
1314 dma_addr_t *pa)
1315{
1316 /* Unmap skb's entire data area */
1317 pci_unmap_single(sc->pdev, *pa,
1318 skb_end_pointer(skb) - skb->head, direction);
1319}
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
index 42b0890a4685..60617ae66209 100644
--- a/drivers/net/wireless/ath9k/reg.h
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -899,12 +899,6 @@ enum {
899#define AR_GPIO_OUTPUT_MUX2 0x4064 899#define AR_GPIO_OUTPUT_MUX2 0x4064
900#define AR_GPIO_OUTPUT_MUX3 0x4068 900#define AR_GPIO_OUTPUT_MUX3 0x4068
901 901
902#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0
903#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1
904#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2
905#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
906#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
907
908#define AR_INPUT_STATE 0x406c 902#define AR_INPUT_STATE 0x406c
909 903
910#define AR_EEPROM_STATUS_DATA 0x407c 904#define AR_EEPROM_STATUS_DATA 0x407c
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 8b332e11a656..25929059c7dc 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -60,79 +60,6 @@ static u32 bits_per_symbol[][2] = {
60#define IS_HT_RATE(_rate) ((_rate) & 0x80) 60#define IS_HT_RATE(_rate) ((_rate) & 0x80)
61 61
62/* 62/*
63 * Insert a chain of ath_buf (descriptors) on a multicast txq
64 * but do NOT start tx DMA on this queue.
65 * NB: must be called with txq lock held
66 */
67
68static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
69 struct ath_txq *txq,
70 struct list_head *head)
71{
72 struct ath_hal *ah = sc->sc_ah;
73 struct ath_buf *bf;
74
75 if (list_empty(head))
76 return;
77
78 /*
79 * Insert the frame on the outbound list and
80 * pass it on to the hardware.
81 */
82 bf = list_first_entry(head, struct ath_buf, list);
83
84 /*
85 * The CAB queue is started from the SWBA handler since
86 * frames only go out on DTIM and to avoid possible races.
87 */
88 ath9k_hw_set_interrupts(ah, 0);
89
90 /*
91 * If there is anything in the mcastq, we want to set
92 * the "more data" bit in the last item in the queue to
93 * indicate that there is "more data". It makes sense to add
94 * it here since you are *always* going to have
95 * more data when adding to this queue, no matter where
96 * you call from.
97 */
98
99 if (txq->axq_depth) {
100 struct ath_buf *lbf;
101 struct ieee80211_hdr *hdr;
102
103 /*
104 * Add the "more data flag" to the last frame
105 */
106
107 lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
108 hdr = (struct ieee80211_hdr *)
109 ((struct sk_buff *)(lbf->bf_mpdu))->data;
110 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
111 }
112
113 /*
114 * Now, concat the frame onto the queue
115 */
116 list_splice_tail_init(head, &txq->axq_q);
117 txq->axq_depth++;
118 txq->axq_totalqueued++;
119 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
120
121 DPRINTF(sc, ATH_DBG_QUEUE,
122 "%s: txq depth = %d\n", __func__, txq->axq_depth);
123 if (txq->axq_link != NULL) {
124 *txq->axq_link = bf->bf_daddr;
125 DPRINTF(sc, ATH_DBG_XMIT,
126 "%s: link[%u](%p)=%llx (%p)\n",
127 __func__,
128 txq->axq_qnum, txq->axq_link,
129 ito64(bf->bf_daddr), bf->bf_desc);
130 }
131 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
132 ath9k_hw_set_interrupts(ah, sc->sc_imask);
133}
134
135/*
136 * Insert a chain of ath_buf (descriptors) on a txq and 63 * Insert a chain of ath_buf (descriptors) on a txq and
137 * assume the descriptors are already chained together by caller. 64 * assume the descriptors are already chained together by caller.
138 * NB: must be called with txq lock held 65 * NB: must be called with txq lock held
@@ -277,8 +204,6 @@ static int ath_tx_prepare(struct ath_softc *sc,
277 __le16 fc; 204 __le16 fc;
278 u8 *qc; 205 u8 *qc;
279 206
280 memset(txctl, 0, sizeof(struct ath_tx_control));
281
282 txctl->dev = sc; 207 txctl->dev = sc;
283 hdr = (struct ieee80211_hdr *)skb->data; 208 hdr = (struct ieee80211_hdr *)skb->data;
284 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 209 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -302,7 +227,6 @@ static int ath_tx_prepare(struct ath_softc *sc,
302 } 227 }
303 228
304 txctl->if_id = 0; 229 txctl->if_id = 0;
305 txctl->nextfraglen = 0;
306 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3); 230 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
307 txctl->txpower = MAX_RATE_POWER; /* FIXME */ 231 txctl->txpower = MAX_RATE_POWER; /* FIXME */
308 232
@@ -329,12 +253,18 @@ static int ath_tx_prepare(struct ath_softc *sc,
329 253
330 /* Fill qnum */ 254 /* Fill qnum */
331 255
332 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); 256 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) {
333 txq = &sc->sc_txq[txctl->qnum]; 257 txctl->qnum = 0;
258 txq = sc->sc_cabq;
259 } else {
260 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
261 txq = &sc->sc_txq[txctl->qnum];
262 }
334 spin_lock_bh(&txq->axq_lock); 263 spin_lock_bh(&txq->axq_lock);
335 264
336 /* Try to avoid running out of descriptors */ 265 /* Try to avoid running out of descriptors */
337 if (txq->axq_depth >= (ATH_TXBUF - 20)) { 266 if (txq->axq_depth >= (ATH_TXBUF - 20) &&
267 !(txctl->flags & ATH9K_TXDESC_CAB)) {
338 DPRINTF(sc, ATH_DBG_FATAL, 268 DPRINTF(sc, ATH_DBG_FATAL,
339 "%s: TX queue: %d is full, depth: %d\n", 269 "%s: TX queue: %d is full, depth: %d\n",
340 __func__, 270 __func__,
@@ -354,7 +284,7 @@ static int ath_tx_prepare(struct ath_softc *sc,
354 284
355 /* Fill flags */ 285 /* Fill flags */
356 286
357 txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ 287 txctl->flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
358 288
359 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 289 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
360 txctl->flags |= ATH9K_TXDESC_NOACK; 290 txctl->flags |= ATH9K_TXDESC_NOACK;
@@ -392,7 +322,7 @@ static int ath_tx_prepare(struct ath_softc *sc,
392 * incremented by the fragmentation routine. 322 * incremented by the fragmentation routine.
393 */ 323 */
394 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) && 324 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
395 txctl->ht && sc->sc_txaggr) { 325 txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
396 struct ath_atx_tid *tid; 326 struct ath_atx_tid *tid;
397 327
398 tid = ATH_AN_2_TID(txctl->an, txctl->tidno); 328 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
@@ -413,50 +343,18 @@ static int ath_tx_prepare(struct ath_softc *sc,
413 } 343 }
414 rix = rcs[0].rix; 344 rix = rcs[0].rix;
415 345
416 /* 346 if (ieee80211_has_morefrags(fc) ||
417 * Calculate duration. This logically belongs in the 802.11 347 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
418 * layer but it lacks sufficient information to calculate it.
419 */
420 if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
421 u16 dur;
422 /* 348 /*
423 * XXX not right with fragmentation. 349 ** Force hardware to use computed duration for next
424 */ 350 ** fragment by disabling multi-rate retry, which
425 if (sc->sc_flags & ATH_PREAMBLE_SHORT) 351 ** updates duration based on the multi-rate
426 dur = rt->info[rix].spAckDuration; 352 ** duration table.
427 else 353 */
428 dur = rt->info[rix].lpAckDuration; 354 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
429 355 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
430 if (le16_to_cpu(hdr->frame_control) & 356 /* reset tries but keep rate index */
431 IEEE80211_FCTL_MOREFRAGS) { 357 rcs[0].tries = ATH_TXMAXTRY;
432 dur += dur; /* Add additional 'SIFS + ACK' */
433
434 /*
435 ** Compute size of next fragment in order to compute
436 ** durations needed to update NAV.
437 ** The last fragment uses the ACK duration only.
438 ** Add time for next fragment.
439 */
440 dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
441 txctl->nextfraglen,
442 rix, sc->sc_flags & ATH_PREAMBLE_SHORT);
443 }
444
445 if (ieee80211_has_morefrags(fc) ||
446 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
447 /*
448 ** Force hardware to use computed duration for next
449 ** fragment by disabling multi-rate retry, which
450 ** updates duration based on the multi-rate
451 ** duration table.
452 */
453 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
454 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
455 /* reset tries but keep rate index */
456 rcs[0].tries = ATH_TXMAXTRY;
457 }
458
459 hdr->duration_id = cpu_to_le16(dur);
460 } 358 }
461 359
462 /* 360 /*
@@ -484,12 +382,8 @@ static int ath_tx_prepare(struct ath_softc *sc,
484 if (is_multicast_ether_addr(hdr->addr1)) { 382 if (is_multicast_ether_addr(hdr->addr1)) {
485 antenna = sc->sc_mcastantenna + 1; 383 antenna = sc->sc_mcastantenna + 1;
486 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1; 384 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
487 } else 385 }
488 antenna = sc->sc_txantenna;
489 386
490#ifdef USE_LEGACY_HAL
491 txctl->antenna = antenna;
492#endif
493 return 0; 387 return 0;
494} 388}
495 389
@@ -502,7 +396,6 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
502{ 396{
503 struct sk_buff *skb = bf->bf_mpdu; 397 struct sk_buff *skb = bf->bf_mpdu;
504 struct ath_xmit_status tx_status; 398 struct ath_xmit_status tx_status;
505 dma_addr_t *pa;
506 399
507 /* 400 /*
508 * Set retry information. 401 * Set retry information.
@@ -518,13 +411,12 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
518 if (!txok) { 411 if (!txok) {
519 tx_status.flags |= ATH_TX_ERROR; 412 tx_status.flags |= ATH_TX_ERROR;
520 413
521 if (bf->bf_isxretried) 414 if (bf_isxretried(bf))
522 tx_status.flags |= ATH_TX_XRETRY; 415 tx_status.flags |= ATH_TX_XRETRY;
523 } 416 }
524 /* Unmap this frame */ 417 /* Unmap this frame */
525 pa = get_dma_mem_context(bf, bf_dmacontext);
526 pci_unmap_single(sc->pdev, 418 pci_unmap_single(sc->pdev,
527 *pa, 419 bf->bf_dmacontext,
528 skb->len, 420 skb->len,
529 PCI_DMA_TODEVICE); 421 PCI_DMA_TODEVICE);
530 /* complete this frame */ 422 /* complete this frame */
@@ -629,7 +521,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc,
629 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) 521 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
630 return 0; 522 return 0;
631 523
632 isaggr = bf->bf_isaggr; 524 isaggr = bf_isaggr(bf);
633 if (isaggr) { 525 if (isaggr) {
634 seq_st = ATH_DS_BA_SEQ(ds); 526 seq_st = ATH_DS_BA_SEQ(ds);
635 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); 527 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
@@ -651,7 +543,7 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
651 struct sk_buff *skb; 543 struct sk_buff *skb;
652 struct ieee80211_hdr *hdr; 544 struct ieee80211_hdr *hdr;
653 545
654 bf->bf_isretried = 1; 546 bf->bf_state.bf_type |= BUF_RETRY;
655 bf->bf_retries++; 547 bf->bf_retries++;
656 548
657 skb = bf->bf_mpdu; 549 skb = bf->bf_mpdu;
@@ -698,7 +590,7 @@ static u32 ath_pkt_duration(struct ath_softc *sc,
698 u8 rc; 590 u8 rc;
699 int streams, pktlen; 591 int streams, pktlen;
700 592
701 pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen; 593 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
702 rc = rt->info[rix].rateCode; 594 rc = rt->info[rix].rateCode;
703 595
704 /* 596 /*
@@ -742,7 +634,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
742 int i, flags, rtsctsena = 0, dynamic_mimops = 0; 634 int i, flags, rtsctsena = 0, dynamic_mimops = 0;
743 u32 ctsduration = 0; 635 u32 ctsduration = 0;
744 u8 rix = 0, cix, ctsrate = 0; 636 u8 rix = 0, cix, ctsrate = 0;
745 u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit; 637 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
746 struct ath_node *an = (struct ath_node *) bf->bf_node; 638 struct ath_node *an = (struct ath_node *) bf->bf_node;
747 639
748 /* 640 /*
@@ -781,7 +673,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
781 * let rate series flags determine which rates will actually 673 * let rate series flags determine which rates will actually
782 * use RTS. 674 * use RTS.
783 */ 675 */
784 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) { 676 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
785 BUG_ON(!an); 677 BUG_ON(!an);
786 /* 678 /*
787 * 802.11g protection not needed, use our default behavior 679 * 802.11g protection not needed, use our default behavior
@@ -793,7 +685,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
793 * and the second aggregate should have any protection at all. 685 * and the second aggregate should have any protection at all.
794 */ 686 */
795 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) { 687 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
796 if (!bf->bf_aggrburst) { 688 if (!bf_isaggrburst(bf)) {
797 flags = ATH9K_TXDESC_RTSENA; 689 flags = ATH9K_TXDESC_RTSENA;
798 dynamic_mimops = 1; 690 dynamic_mimops = 1;
799 } else { 691 } else {
@@ -806,7 +698,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
806 * Set protection if aggregate protection on 698 * Set protection if aggregate protection on
807 */ 699 */
808 if (sc->sc_config.ath_aggr_prot && 700 if (sc->sc_config.ath_aggr_prot &&
809 (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) { 701 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
810 flags = ATH9K_TXDESC_RTSENA; 702 flags = ATH9K_TXDESC_RTSENA;
811 cix = rt->info[sc->sc_protrix].controlRate; 703 cix = rt->info[sc->sc_protrix].controlRate;
812 rtsctsena = 1; 704 rtsctsena = 1;
@@ -815,7 +707,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
815 /* 707 /*
816 * For AR5416 - RTS cannot be followed by a frame larger than 8K. 708 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
817 */ 709 */
818 if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) { 710 if (bf_isaggr(bf) && (bf->bf_al > aggr_limit_with_rts)) {
819 /* 711 /*
820 * Ensure that in the case of SM Dynamic power save 712 * Ensure that in the case of SM Dynamic power save
821 * while we are bursting the second aggregate the 713 * while we are bursting the second aggregate the
@@ -832,7 +724,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
832 /* NB: cix is set above where RTS/CTS is enabled */ 724 /* NB: cix is set above where RTS/CTS is enabled */
833 BUG_ON(cix == 0xff); 725 BUG_ON(cix == 0xff);
834 ctsrate = rt->info[cix].rateCode | 726 ctsrate = rt->info[cix].rateCode |
835 (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0); 727 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
836 728
837 /* 729 /*
838 * Setup HAL rate series 730 * Setup HAL rate series
@@ -846,7 +738,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
846 rix = bf->bf_rcs[i].rix; 738 rix = bf->bf_rcs[i].rix;
847 739
848 series[i].Rate = rt->info[rix].rateCode | 740 series[i].Rate = rt->info[rix].rateCode |
849 (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0); 741 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
850 742
851 series[i].Tries = bf->bf_rcs[i].tries; 743 series[i].Tries = bf->bf_rcs[i].tries;
852 744
@@ -862,7 +754,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
862 sc, rix, bf, 754 sc, rix, bf,
863 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0, 755 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
864 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG), 756 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
865 bf->bf_shpreamble); 757 bf_isshpreamble(bf));
866 758
867 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) && 759 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
868 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) { 760 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
@@ -875,7 +767,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
875 */ 767 */
876 series[i].ChSel = sc->sc_tx_chainmask; 768 series[i].ChSel = sc->sc_tx_chainmask;
877 } else { 769 } else {
878 if (bf->bf_ht) 770 if (bf_isht(bf))
879 series[i].ChSel = 771 series[i].ChSel =
880 ath_chainmask_sel_logic(sc, an); 772 ath_chainmask_sel_logic(sc, an);
881 else 773 else
@@ -908,7 +800,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
908 * use the precalculated ACK durations. 800 * use the precalculated ACK durations.
909 */ 801 */
910 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */ 802 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
911 ctsduration += bf->bf_shpreamble ? 803 ctsduration += bf_isshpreamble(bf) ?
912 rt->info[cix].spAckDuration : 804 rt->info[cix].spAckDuration :
913 rt->info[cix].lpAckDuration; 805 rt->info[cix].lpAckDuration;
914 } 806 }
@@ -916,7 +808,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
916 ctsduration += series[0].PktDuration; 808 ctsduration += series[0].PktDuration;
917 809
918 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 810 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
919 ctsduration += bf->bf_shpreamble ? 811 ctsduration += bf_isshpreamble(bf) ?
920 rt->info[rix].spAckDuration : 812 rt->info[rix].spAckDuration :
921 rt->info[rix].lpAckDuration; 813 rt->info[rix].lpAckDuration;
922 } 814 }
@@ -932,10 +824,10 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
932 * set dur_update_en for l-sig computation except for PS-Poll frames 824 * set dur_update_en for l-sig computation except for PS-Poll frames
933 */ 825 */
934 ath9k_hw_set11n_ratescenario(ah, ds, lastds, 826 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
935 !bf->bf_ispspoll, 827 !bf_ispspoll(bf),
936 ctsrate, 828 ctsrate,
937 ctsduration, 829 ctsduration,
938 series, 4, flags); 830 series, 4, flags);
939 if (sc->sc_config.ath_aggr_prot && flags) 831 if (sc->sc_config.ath_aggr_prot && flags)
940 ath9k_hw_set11n_burstduration(ah, ds, 8192); 832 ath9k_hw_set11n_burstduration(ah, ds, 8192);
941} 833}
@@ -958,7 +850,7 @@ static int ath_tx_send_normal(struct ath_softc *sc,
958 BUG_ON(list_empty(bf_head)); 850 BUG_ON(list_empty(bf_head));
959 851
960 bf = list_first_entry(bf_head, struct ath_buf, list); 852 bf = list_first_entry(bf_head, struct ath_buf, list);
961 bf->bf_isampdu = 0; /* regular HT frame */ 853 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
962 854
963 skb = (struct sk_buff *)bf->bf_mpdu; 855 skb = (struct sk_buff *)bf->bf_mpdu;
964 tx_info = IEEE80211_SKB_CB(skb); 856 tx_info = IEEE80211_SKB_CB(skb);
@@ -998,7 +890,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
998 890
999 while (!list_empty(&tid->buf_q)) { 891 while (!list_empty(&tid->buf_q)) {
1000 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 892 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1001 ASSERT(!bf->bf_isretried); 893 ASSERT(!bf_isretried(bf));
1002 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); 894 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1003 ath_tx_send_normal(sc, txq, tid, &bf_head); 895 ath_tx_send_normal(sc, txq, tid, &bf_head);
1004 } 896 }
@@ -1025,7 +917,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1025 int isaggr, txfail, txpending, sendbar = 0, needreset = 0; 917 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
1026 int isnodegone = (an->an_flags & ATH_NODE_CLEAN); 918 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
1027 919
1028 isaggr = bf->bf_isaggr; 920 isaggr = bf_isaggr(bf);
1029 if (isaggr) { 921 if (isaggr) {
1030 if (txok) { 922 if (txok) {
1031 if (ATH_DS_TX_BA(ds)) { 923 if (ATH_DS_TX_BA(ds)) {
@@ -1047,7 +939,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1047 * when perform internal reset in this routine. 939 * when perform internal reset in this routine.
1048 * Only enable reset in STA mode for now. 940 * Only enable reset in STA mode for now.
1049 */ 941 */
1050 if (sc->sc_opmode == ATH9K_M_STA) 942 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
1051 needreset = 1; 943 needreset = 1;
1052 } 944 }
1053 } else { 945 } else {
@@ -1075,7 +967,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1075 ath_tx_set_retry(sc, bf); 967 ath_tx_set_retry(sc, bf);
1076 txpending = 1; 968 txpending = 1;
1077 } else { 969 } else {
1078 bf->bf_isxretried = 1; 970 bf->bf_state.bf_type |= BUF_XRETRY;
1079 txfail = 1; 971 txfail = 1;
1080 sendbar = 1; 972 sendbar = 1;
1081 } 973 }
@@ -1175,11 +1067,8 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1175 tbf->bf_lastfrm->bf_desc); 1067 tbf->bf_lastfrm->bf_desc);
1176 1068
1177 /* copy the DMA context */ 1069 /* copy the DMA context */
1178 copy_dma_mem_context( 1070 tbf->bf_dmacontext =
1179 get_dma_mem_context(tbf, 1071 bf_last->bf_dmacontext;
1180 bf_dmacontext),
1181 get_dma_mem_context(bf_last,
1182 bf_dmacontext));
1183 } 1072 }
1184 list_add_tail(&tbf->list, &bf_head); 1073 list_add_tail(&tbf->list, &bf_head);
1185 } else { 1074 } else {
@@ -1188,7 +1077,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1188 * software retry 1077 * software retry
1189 */ 1078 */
1190 ath9k_hw_cleartxdesc(sc->sc_ah, 1079 ath9k_hw_cleartxdesc(sc->sc_ah,
1191 bf->bf_lastfrm->bf_desc); 1080 bf->bf_lastfrm->bf_desc);
1192 } 1081 }
1193 1082
1194 /* 1083 /*
@@ -1242,7 +1131,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1242 } 1131 }
1243 1132
1244 if (needreset) 1133 if (needreset)
1245 ath_internal_reset(sc); 1134 ath_reset(sc, false);
1246 1135
1247 return; 1136 return;
1248} 1137}
@@ -1331,7 +1220,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1331 1220
1332 txq->axq_depth--; 1221 txq->axq_depth--;
1333 1222
1334 if (bf->bf_isaggr) 1223 if (bf_isaggr(bf))
1335 txq->axq_aggr_depth--; 1224 txq->axq_aggr_depth--;
1336 1225
1337 txok = (ds->ds_txstat.ts_status == 0); 1226 txok = (ds->ds_txstat.ts_status == 0);
@@ -1345,14 +1234,14 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1345 spin_unlock_bh(&sc->sc_txbuflock); 1234 spin_unlock_bh(&sc->sc_txbuflock);
1346 } 1235 }
1347 1236
1348 if (!bf->bf_isampdu) { 1237 if (!bf_isampdu(bf)) {
1349 /* 1238 /*
1350 * This frame is sent out as a single frame. 1239 * This frame is sent out as a single frame.
1351 * Use hardware retry status for this frame. 1240 * Use hardware retry status for this frame.
1352 */ 1241 */
1353 bf->bf_retries = ds->ds_txstat.ts_longretry; 1242 bf->bf_retries = ds->ds_txstat.ts_longretry;
1354 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) 1243 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1355 bf->bf_isxretried = 1; 1244 bf->bf_state.bf_type |= BUF_XRETRY;
1356 nbad = 0; 1245 nbad = 0;
1357 } else { 1246 } else {
1358 nbad = ath_tx_num_badfrms(sc, bf, txok); 1247 nbad = ath_tx_num_badfrms(sc, bf, txok);
@@ -1368,7 +1257,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1368 if (ds->ds_txstat.ts_status == 0) 1257 if (ds->ds_txstat.ts_status == 0)
1369 nacked++; 1258 nacked++;
1370 1259
1371 if (bf->bf_isdata) { 1260 if (bf_isdata(bf)) {
1372 if (isrifs) 1261 if (isrifs)
1373 tmp_ds = bf->bf_rifslast->bf_desc; 1262 tmp_ds = bf->bf_rifslast->bf_desc;
1374 else 1263 else
@@ -1384,7 +1273,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1384 /* 1273 /*
1385 * Complete this transmit unit 1274 * Complete this transmit unit
1386 */ 1275 */
1387 if (bf->bf_isampdu) 1276 if (bf_isampdu(bf))
1388 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok); 1277 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1389 else 1278 else
1390 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); 1279 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
@@ -1406,7 +1295,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1406 /* 1295 /*
1407 * schedule any pending packets if aggregation is enabled 1296 * schedule any pending packets if aggregation is enabled
1408 */ 1297 */
1409 if (sc->sc_txaggr) 1298 if (sc->sc_flags & SC_OP_TXAGGR)
1410 ath_txq_schedule(sc, txq); 1299 ath_txq_schedule(sc, txq);
1411 spin_unlock_bh(&txq->axq_lock); 1300 spin_unlock_bh(&txq->axq_lock);
1412 } 1301 }
@@ -1430,10 +1319,9 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1430 struct ath_hal *ah = sc->sc_ah; 1319 struct ath_hal *ah = sc->sc_ah;
1431 int i; 1320 int i;
1432 int npend = 0; 1321 int npend = 0;
1433 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
1434 1322
1435 /* XXX return value */ 1323 /* XXX return value */
1436 if (!sc->sc_invalid) { 1324 if (!(sc->sc_flags & SC_OP_INVALID)) {
1437 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1325 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1438 if (ATH_TXQ_SETUP(sc, i)) { 1326 if (ATH_TXQ_SETUP(sc, i)) {
1439 ath_tx_stopdma(sc, &sc->sc_txq[i]); 1327 ath_tx_stopdma(sc, &sc->sc_txq[i]);
@@ -1454,10 +1342,11 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1454 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__); 1342 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1455 1343
1456 spin_lock_bh(&sc->sc_resetlock); 1344 spin_lock_bh(&sc->sc_resetlock);
1457 if (!ath9k_hw_reset(ah, sc->sc_opmode, 1345 if (!ath9k_hw_reset(ah,
1458 &sc->sc_curchan, ht_macmode, 1346 sc->sc_ah->ah_curchan,
1459 sc->sc_tx_chainmask, sc->sc_rx_chainmask, 1347 sc->sc_ht_info.tx_chan_width,
1460 sc->sc_ht_extprotspacing, true, &status)) { 1348 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1349 sc->sc_ht_extprotspacing, true, &status)) {
1461 1350
1462 DPRINTF(sc, ATH_DBG_FATAL, 1351 DPRINTF(sc, ATH_DBG_FATAL,
1463 "%s: unable to reset hardware; hal status %u\n", 1352 "%s: unable to reset hardware; hal status %u\n",
@@ -1481,7 +1370,7 @@ static void ath_tx_addto_baw(struct ath_softc *sc,
1481{ 1370{
1482 int index, cindex; 1371 int index, cindex;
1483 1372
1484 if (bf->bf_isretried) 1373 if (bf_isretried(bf))
1485 return; 1374 return;
1486 1375
1487 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 1376 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
@@ -1516,7 +1405,7 @@ static int ath_tx_send_ampdu(struct ath_softc *sc,
1516 BUG_ON(list_empty(bf_head)); 1405 BUG_ON(list_empty(bf_head));
1517 1406
1518 bf = list_first_entry(bf_head, struct ath_buf, list); 1407 bf = list_first_entry(bf_head, struct ath_buf, list);
1519 bf->bf_isampdu = 1; 1408 bf->bf_state.bf_type |= BUF_AMPDU;
1520 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */ 1409 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1521 bf->bf_tidno = txctl->tidno; 1410 bf->bf_tidno = txctl->tidno;
1522 1411
@@ -1860,7 +1749,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc,
1860 if (bf->bf_nframes == 1) { 1749 if (bf->bf_nframes == 1) {
1861 ASSERT(bf->bf_lastfrm == bf_last); 1750 ASSERT(bf->bf_lastfrm == bf_last);
1862 1751
1863 bf->bf_isaggr = 0; 1752 bf->bf_state.bf_type &= ~BUF_AGGR;
1864 /* 1753 /*
1865 * clear aggr bits for every descriptor 1754 * clear aggr bits for every descriptor
1866 * XXX TODO: is there a way to optimize it? 1755 * XXX TODO: is there a way to optimize it?
@@ -1877,7 +1766,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc,
1877 /* 1766 /*
1878 * setup first desc with rate and aggr info 1767 * setup first desc with rate and aggr info
1879 */ 1768 */
1880 bf->bf_isaggr = 1; 1769 bf->bf_state.bf_type |= BUF_AGGR;
1881 ath_buf_set_rate(sc, bf); 1770 ath_buf_set_rate(sc, bf);
1882 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 1771 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1883 1772
@@ -1925,7 +1814,7 @@ static void ath_tid_drain(struct ath_softc *sc,
1925 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); 1814 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1926 1815
1927 /* update baw for software retried frame */ 1816 /* update baw for software retried frame */
1928 if (bf->bf_isretried) 1817 if (bf_isretried(bf))
1929 ath_tx_update_baw(sc, tid, bf->bf_seqno); 1818 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1930 1819
1931 /* 1820 /*
@@ -1990,13 +1879,18 @@ static int ath_tx_start_dma(struct ath_softc *sc,
1990 struct list_head bf_head; 1879 struct list_head bf_head;
1991 struct ath_desc *ds; 1880 struct ath_desc *ds;
1992 struct ath_hal *ah = sc->sc_ah; 1881 struct ath_hal *ah = sc->sc_ah;
1993 struct ath_txq *txq = &sc->sc_txq[txctl->qnum]; 1882 struct ath_txq *txq;
1994 struct ath_tx_info_priv *tx_info_priv; 1883 struct ath_tx_info_priv *tx_info_priv;
1995 struct ath_rc_series *rcs; 1884 struct ath_rc_series *rcs;
1996 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1885 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1886 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1998 __le16 fc = hdr->frame_control; 1887 __le16 fc = hdr->frame_control;
1999 1888
1889 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB))
1890 txq = sc->sc_cabq;
1891 else
1892 txq = &sc->sc_txq[txctl->qnum];
1893
2000 /* For each sglist entry, allocate an ath_buf for DMA */ 1894 /* For each sglist entry, allocate an ath_buf for DMA */
2001 INIT_LIST_HEAD(&bf_head); 1895 INIT_LIST_HEAD(&bf_head);
2002 spin_lock_bh(&sc->sc_txbuflock); 1896 spin_lock_bh(&sc->sc_txbuflock);
@@ -2014,11 +1908,21 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2014 /* set up this buffer */ 1908 /* set up this buffer */
2015 ATH_TXBUF_RESET(bf); 1909 ATH_TXBUF_RESET(bf);
2016 bf->bf_frmlen = txctl->frmlen; 1910 bf->bf_frmlen = txctl->frmlen;
2017 bf->bf_isdata = ieee80211_is_data(fc); 1911
2018 bf->bf_isbar = ieee80211_is_back_req(fc); 1912 ieee80211_is_data(fc) ?
2019 bf->bf_ispspoll = ieee80211_is_pspoll(fc); 1913 (bf->bf_state.bf_type |= BUF_DATA) :
1914 (bf->bf_state.bf_type &= ~BUF_DATA);
1915 ieee80211_is_back_req(fc) ?
1916 (bf->bf_state.bf_type |= BUF_BAR) :
1917 (bf->bf_state.bf_type &= ~BUF_BAR);
1918 ieee80211_is_pspoll(fc) ?
1919 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1920 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1921 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1922 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1923 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1924
2020 bf->bf_flags = txctl->flags; 1925 bf->bf_flags = txctl->flags;
2021 bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT;
2022 bf->bf_keytype = txctl->keytype; 1926 bf->bf_keytype = txctl->keytype;
2023 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; 1927 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
2024 rcs = tx_info_priv->rcs; 1928 rcs = tx_info_priv->rcs;
@@ -2038,8 +1942,7 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2038 /* 1942 /*
2039 * Save the DMA context in the first ath_buf 1943 * Save the DMA context in the first ath_buf
2040 */ 1944 */
2041 copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext), 1945 bf->bf_dmacontext = txctl->dmacontext;
2042 get_dma_mem_context(txctl, dmacontext));
2043 1946
2044 /* 1947 /*
2045 * Formulate first tx descriptor with tx controls. 1948 * Formulate first tx descriptor with tx controls.
@@ -2060,11 +1963,13 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2060 ds); /* first descriptor */ 1963 ds); /* first descriptor */
2061 1964
2062 bf->bf_lastfrm = bf; 1965 bf->bf_lastfrm = bf;
2063 bf->bf_ht = txctl->ht; 1966 (txctl->ht) ?
1967 (bf->bf_state.bf_type |= BUF_HT) :
1968 (bf->bf_state.bf_type &= ~BUF_HT);
2064 1969
2065 spin_lock_bh(&txq->axq_lock); 1970 spin_lock_bh(&txq->axq_lock);
2066 1971
2067 if (txctl->ht && sc->sc_txaggr) { 1972 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2068 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno); 1973 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
2069 if (ath_aggr_query(sc, an, txctl->tidno)) { 1974 if (ath_aggr_query(sc, an, txctl->tidno)) {
2070 /* 1975 /*
@@ -2090,27 +1995,7 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2090 bf->bf_tidno = txctl->tidno; 1995 bf->bf_tidno = txctl->tidno;
2091 } 1996 }
2092 1997
2093 if (is_multicast_ether_addr(hdr->addr1)) { 1998 ath_tx_txqaddbuf(sc, txq, &bf_head);
2094 struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
2095
2096 /*
2097 * When servicing one or more stations in power-save
2098 * mode (or) if there is some mcast data waiting on
2099 * mcast queue (to prevent out of order delivery of
2100 * mcast,bcast packets) multicast frames must be
2101 * buffered until after the beacon. We use the private
2102 * mcast queue for that.
2103 */
2104 /* XXX? more bit in 802.11 frame header */
2105 spin_lock_bh(&avp->av_mcastq.axq_lock);
2106 if (txctl->ps || avp->av_mcastq.axq_depth)
2107 ath_tx_mcastqaddbuf(sc,
2108 &avp->av_mcastq, &bf_head);
2109 else
2110 ath_tx_txqaddbuf(sc, txq, &bf_head);
2111 spin_unlock_bh(&avp->av_mcastq.axq_lock);
2112 } else
2113 ath_tx_txqaddbuf(sc, txq, &bf_head);
2114 } 1999 }
2115 spin_unlock_bh(&txq->axq_lock); 2000 spin_unlock_bh(&txq->axq_lock);
2116 return 0; 2001 return 0;
@@ -2118,30 +2003,31 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2118 2003
2119static void xmit_map_sg(struct ath_softc *sc, 2004static void xmit_map_sg(struct ath_softc *sc,
2120 struct sk_buff *skb, 2005 struct sk_buff *skb,
2121 dma_addr_t *pa,
2122 struct ath_tx_control *txctl) 2006 struct ath_tx_control *txctl)
2123{ 2007{
2124 struct ath_xmit_status tx_status; 2008 struct ath_xmit_status tx_status;
2125 struct ath_atx_tid *tid; 2009 struct ath_atx_tid *tid;
2126 struct scatterlist sg; 2010 struct scatterlist sg;
2127 2011
2128 *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 2012 txctl->dmacontext = pci_map_single(sc->pdev, skb->data,
2013 skb->len, PCI_DMA_TODEVICE);
2129 2014
2130 /* setup S/G list */ 2015 /* setup S/G list */
2131 memset(&sg, 0, sizeof(struct scatterlist)); 2016 memset(&sg, 0, sizeof(struct scatterlist));
2132 sg_dma_address(&sg) = *pa; 2017 sg_dma_address(&sg) = txctl->dmacontext;
2133 sg_dma_len(&sg) = skb->len; 2018 sg_dma_len(&sg) = skb->len;
2134 2019
2135 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) { 2020 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
2136 /* 2021 /*
2137 * We have to do drop frame here. 2022 * We have to do drop frame here.
2138 */ 2023 */
2139 pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE); 2024 pci_unmap_single(sc->pdev, txctl->dmacontext,
2025 skb->len, PCI_DMA_TODEVICE);
2140 2026
2141 tx_status.retries = 0; 2027 tx_status.retries = 0;
2142 tx_status.flags = ATH_TX_ERROR; 2028 tx_status.flags = ATH_TX_ERROR;
2143 2029
2144 if (txctl->ht && sc->sc_txaggr) { 2030 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2145 /* Reclaim the seqno. */ 2031 /* Reclaim the seqno. */
2146 tid = ATH_AN_2_TID((struct ath_node *) 2032 tid = ATH_AN_2_TID((struct ath_node *)
2147 txctl->an, txctl->tidno); 2033 txctl->an, txctl->tidno);
@@ -2162,7 +2048,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2162 2048
2163 /* Setup tx descriptors */ 2049 /* Setup tx descriptors */
2164 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2050 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
2165 "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC); 2051 "tx", nbufs, 1);
2166 if (error != 0) { 2052 if (error != 0) {
2167 DPRINTF(sc, ATH_DBG_FATAL, 2053 DPRINTF(sc, ATH_DBG_FATAL,
2168 "%s: failed to allocate tx descriptors: %d\n", 2054 "%s: failed to allocate tx descriptors: %d\n",
@@ -2403,6 +2289,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2403 struct ath_tx_control txctl; 2289 struct ath_tx_control txctl;
2404 int error = 0; 2290 int error = 0;
2405 2291
2292 memset(&txctl, 0, sizeof(struct ath_tx_control));
2406 error = ath_tx_prepare(sc, skb, &txctl); 2293 error = ath_tx_prepare(sc, skb, &txctl);
2407 if (error == 0) 2294 if (error == 0)
2408 /* 2295 /*
@@ -2410,9 +2297,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2410 * ath_tx_start_dma() will be called either synchronously 2297 * ath_tx_start_dma() will be called either synchronously
2411 * or asynchrounsly once DMA is complete. 2298 * or asynchrounsly once DMA is complete.
2412 */ 2299 */
2413 xmit_map_sg(sc, skb, 2300 xmit_map_sg(sc, skb, &txctl);
2414 get_dma_mem_context(&txctl, dmacontext),
2415 &txctl);
2416 else 2301 else
2417 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE); 2302 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2418 2303
@@ -2424,8 +2309,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2424 2309
2425void ath_tx_tasklet(struct ath_softc *sc) 2310void ath_tx_tasklet(struct ath_softc *sc)
2426{ 2311{
2427 u64 tsf = ath9k_hw_gettsf64(sc->sc_ah); 2312 int i;
2428 int i, nacked = 0;
2429 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 2313 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2430 2314
2431 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); 2315 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
@@ -2435,10 +2319,8 @@ void ath_tx_tasklet(struct ath_softc *sc)
2435 */ 2319 */
2436 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2320 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2437 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2321 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2438 nacked += ath_tx_processq(sc, &sc->sc_txq[i]); 2322 ath_tx_processq(sc, &sc->sc_txq[i]);
2439 } 2323 }
2440 if (nacked)
2441 sc->sc_lastrx = tsf;
2442} 2324}
2443 2325
2444void ath_tx_draintxq(struct ath_softc *sc, 2326void ath_tx_draintxq(struct ath_softc *sc,
@@ -2486,14 +2368,14 @@ void ath_tx_draintxq(struct ath_softc *sc,
2486 2368
2487 spin_unlock_bh(&txq->axq_lock); 2369 spin_unlock_bh(&txq->axq_lock);
2488 2370
2489 if (bf->bf_isampdu) 2371 if (bf_isampdu(bf))
2490 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0); 2372 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2491 else 2373 else
2492 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); 2374 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2493 } 2375 }
2494 2376
2495 /* flush any pending frames if aggregation is enabled */ 2377 /* flush any pending frames if aggregation is enabled */
2496 if (sc->sc_txaggr) { 2378 if (sc->sc_flags & SC_OP_TXAGGR) {
2497 if (!retry_tx) { 2379 if (!retry_tx) {
2498 spin_lock_bh(&txq->axq_lock); 2380 spin_lock_bh(&txq->axq_lock);
2499 ath_txq_drain_pending_buffers(sc, txq, 2381 ath_txq_drain_pending_buffers(sc, txq,
@@ -2509,7 +2391,7 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2509{ 2391{
2510 /* stop beacon queue. The beacon will be freed when 2392 /* stop beacon queue. The beacon will be freed when
2511 * we go to INIT state */ 2393 * we go to INIT state */
2512 if (!sc->sc_invalid) { 2394 if (!(sc->sc_flags & SC_OP_INVALID)) {
2513 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 2395 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2514 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__, 2396 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2515 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); 2397 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
@@ -2536,7 +2418,7 @@ enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2536 struct ath_atx_tid *txtid; 2418 struct ath_atx_tid *txtid;
2537 DECLARE_MAC_BUF(mac); 2419 DECLARE_MAC_BUF(mac);
2538 2420
2539 if (!sc->sc_txaggr) 2421 if (!(sc->sc_flags & SC_OP_TXAGGR))
2540 return AGGR_NOT_REQUIRED; 2422 return AGGR_NOT_REQUIRED;
2541 2423
2542 /* ADDBA exchange must be completed before sending aggregates */ 2424 /* ADDBA exchange must be completed before sending aggregates */
@@ -2583,7 +2465,7 @@ int ath_tx_aggr_start(struct ath_softc *sc,
2583 return -1; 2465 return -1;
2584 } 2466 }
2585 2467
2586 if (sc->sc_txaggr) { 2468 if (sc->sc_flags & SC_OP_TXAGGR) {
2587 txtid = ATH_AN_2_TID(an, tid); 2469 txtid = ATH_AN_2_TID(an, tid);
2588 txtid->addba_exchangeinprogress = 1; 2470 txtid->addba_exchangeinprogress = 1;
2589 ath_tx_pause_tid(sc, txtid); 2471 ath_tx_pause_tid(sc, txtid);
@@ -2647,7 +2529,7 @@ void ath_tx_aggr_teardown(struct ath_softc *sc,
2647 spin_lock_bh(&txq->axq_lock); 2529 spin_lock_bh(&txq->axq_lock);
2648 while (!list_empty(&txtid->buf_q)) { 2530 while (!list_empty(&txtid->buf_q)) {
2649 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); 2531 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2650 if (!bf->bf_isretried) { 2532 if (!bf_isretried(bf)) {
2651 /* 2533 /*
2652 * NB: it's based on the assumption that 2534 * NB: it's based on the assumption that
2653 * software retried frame will always stay 2535 * software retried frame will always stay
@@ -2743,7 +2625,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2743 2625
2744void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2626void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2745{ 2627{
2746 if (sc->sc_txaggr) { 2628 if (sc->sc_flags & SC_OP_TXAGGR) {
2747 struct ath_atx_tid *tid; 2629 struct ath_atx_tid *tid;
2748 struct ath_atx_ac *ac; 2630 struct ath_atx_ac *ac;
2749 int tidno, acno; 2631 int tidno, acno;
@@ -2855,7 +2737,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc,
2855 2737
2856void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an) 2738void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2857{ 2739{
2858 if (sc->sc_txaggr) { 2740 if (sc->sc_flags & SC_OP_TXAGGR) {
2859 struct ath_atx_tid *tid; 2741 struct ath_atx_tid *tid;
2860 int tidno, i; 2742 int tidno, i;
2861 2743
@@ -2869,3 +2751,57 @@ void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2869 } 2751 }
2870 } 2752 }
2871} 2753}
2754
2755void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2756{
2757 int hdrlen, padsize;
2758 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2759 struct ath_tx_control txctl;
2760
2761 /*
2762 * As a temporary workaround, assign seq# here; this will likely need
2763 * to be cleaned up to work better with Beacon transmission and virtual
2764 * BSSes.
2765 */
2766 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2768 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2769 sc->seq_no += 0x10;
2770 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2771 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2772 }
2773
2774 /* Add the padding after the header if this is not already done */
2775 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2776 if (hdrlen & 3) {
2777 padsize = hdrlen % 4;
2778 if (skb_headroom(skb) < padsize) {
2779 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2780 "failed\n", __func__);
2781 dev_kfree_skb_any(skb);
2782 return;
2783 }
2784 skb_push(skb, padsize);
2785 memmove(skb->data, skb->data + padsize, hdrlen);
2786 }
2787
2788 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2789 __func__,
2790 skb);
2791
2792 memset(&txctl, 0, sizeof(struct ath_tx_control));
2793 txctl.flags = ATH9K_TXDESC_CAB;
2794 if (ath_tx_prepare(sc, skb, &txctl) == 0) {
2795 /*
2796 * Start DMA mapping.
2797 * ath_tx_start_dma() will be called either synchronously
2798 * or asynchrounsly once DMA is complete.
2799 */
2800 xmit_map_sg(sc, skb, &txctl);
2801 } else {
2802 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2803 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__);
2804 dev_kfree_skb_any(skb);
2805 }
2806}
2807
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index bd65c485098c..ecb02bdaab5b 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2258,7 +2258,7 @@ static int atmel_get_freq(struct net_device *dev,
2258 2258
2259static int atmel_set_scan(struct net_device *dev, 2259static int atmel_set_scan(struct net_device *dev,
2260 struct iw_request_info *info, 2260 struct iw_request_info *info,
2261 struct iw_param *vwrq, 2261 struct iw_point *dwrq,
2262 char *extra) 2262 char *extra)
2263{ 2263{
2264 struct atmel_private *priv = netdev_priv(dev); 2264 struct atmel_private *priv = netdev_priv(dev);
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 12617cd0b78e..d2388e8d179a 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -158,7 +158,7 @@ static int atmel_probe(struct pcmcia_device *p_dev)
158 DEBUG(0, "atmel_attach()\n"); 158 DEBUG(0, "atmel_attach()\n");
159 159
160 /* Interrupt setup */ 160 /* Interrupt setup */
161 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE; 161 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
162 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; 162 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
163 p_dev->irq.Handler = NULL; 163 p_dev->irq.Handler = NULL;
164 164
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 1fa043d1802c..1f81d36f87c5 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -80,6 +80,18 @@ config B43_NPHY
80 80
81 SAY N. 81 SAY N.
82 82
83config B43_PHY_LP
84 bool "IEEE 802.11g LP-PHY support (BROKEN)"
85 depends on B43 && EXPERIMENTAL && BROKEN
86 ---help---
87 Support for the LP-PHY.
88 The LP-PHY is an IEEE 802.11g based PHY built into some notebooks
89 and embedded devices.
90
91 THIS IS BROKEN AND DOES NOT WORK YET.
92
93 SAY N.
94
83# This config option automatically enables b43 LEDS support, 95# This config option automatically enables b43 LEDS support,
84# if it's possible. 96# if it's possible.
85config B43_LEDS 97config B43_LEDS
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 8c52b0b9862a..14a02b3aea53 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -1,8 +1,11 @@
1b43-y += main.o 1b43-y += main.o
2b43-y += tables.o 2b43-y += tables.o
3b43-$(CONFIG_B43_NPHY) += tables_nphy.o 3b43-$(CONFIG_B43_NPHY) += tables_nphy.o
4b43-y += phy.o 4b43-y += phy_common.o
5b43-$(CONFIG_B43_NPHY) += nphy.o 5b43-y += phy_g.o
6b43-y += phy_a.o
7b43-$(CONFIG_B43_NPHY) += phy_n.o
8b43-$(CONFIG_B43_PHY_LP) += phy_lp.o
6b43-y += sysfs.o 9b43-y += sysfs.o
7b43-y += xmit.o 10b43-y += xmit.o
8b43-y += lo.o 11b43-y += lo.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index edcdfa366452..427b8203e3f9 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -12,7 +12,7 @@
12#include "leds.h" 12#include "leds.h"
13#include "rfkill.h" 13#include "rfkill.h"
14#include "lo.h" 14#include "lo.h"
15#include "phy.h" 15#include "phy_common.h"
16 16
17 17
18/* The unique identifier of the firmware that's officially supported by 18/* The unique identifier of the firmware that's officially supported by
@@ -173,6 +173,11 @@ enum {
173#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */ 173#define B43_SHM_SH_CHAN 0x00A0 /* Current channel (low 8bit only) */
174#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5Ghz channel */ 174#define B43_SHM_SH_CHAN_5GHZ 0x0100 /* Bit set, if 5Ghz channel */
175#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */ 175#define B43_SHM_SH_BCMCFIFOID 0x0108 /* Last posted cookie to the bcast/mcast FIFO */
176/* TSSI information */
177#define B43_SHM_SH_TSSI_CCK 0x0058 /* TSSI for last 4 CCK frames (32bit) */
178#define B43_SHM_SH_TSSI_OFDM_A 0x0068 /* TSSI for last 4 OFDM frames (32bit) */
179#define B43_SHM_SH_TSSI_OFDM_G 0x0070 /* TSSI for last 4 OFDM frames (32bit) */
180#define B43_TSSI_MAX 0x7F /* Max value for one TSSI value */
176/* SHM_SHARED TX FIFO variables */ 181/* SHM_SHARED TX FIFO variables */
177#define B43_SHM_SH_SIZE01 0x0098 /* TX FIFO size for FIFO 0 (low) and 1 (high) */ 182#define B43_SHM_SH_SIZE01 0x0098 /* TX FIFO size for FIFO 0 (low) and 1 (high) */
178#define B43_SHM_SH_SIZE23 0x009A /* TX FIFO size for FIFO 2 and 3 */ 183#define B43_SHM_SH_SIZE23 0x009A /* TX FIFO size for FIFO 2 and 3 */
@@ -508,122 +513,6 @@ struct b43_iv {
508} __attribute__((__packed__)); 513} __attribute__((__packed__));
509 514
510 515
511struct b43_phy {
512 /* Band support flags. */
513 bool supports_2ghz;
514 bool supports_5ghz;
515
516 /* GMODE bit enabled? */
517 bool gmode;
518
519 /* Analog Type */
520 u8 analog;
521 /* B43_PHYTYPE_ */
522 u8 type;
523 /* PHY revision number. */
524 u8 rev;
525
526 /* Radio versioning */
527 u16 radio_manuf; /* Radio manufacturer */
528 u16 radio_ver; /* Radio version */
529 u8 radio_rev; /* Radio revision */
530
531 bool dyn_tssi_tbl; /* tssi2dbm is kmalloc()ed. */
532
533 /* ACI (adjacent channel interference) flags. */
534 bool aci_enable;
535 bool aci_wlan_automatic;
536 bool aci_hw_rssi;
537
538 /* Radio switched on/off */
539 bool radio_on;
540 struct {
541 /* Values saved when turning the radio off.
542 * They are needed when turning it on again. */
543 bool valid;
544 u16 rfover;
545 u16 rfoverval;
546 } radio_off_context;
547
548 u16 minlowsig[2];
549 u16 minlowsigpos[2];
550
551 /* TSSI to dBm table in use */
552 const s8 *tssi2dbm;
553 /* Target idle TSSI */
554 int tgt_idle_tssi;
555 /* Current idle TSSI */
556 int cur_idle_tssi;
557
558 /* LocalOscillator control values. */
559 struct b43_txpower_lo_control *lo_control;
560 /* Values from b43_calc_loopback_gain() */
561 s16 max_lb_gain; /* Maximum Loopback gain in hdB */
562 s16 trsw_rx_gain; /* TRSW RX gain in hdB */
563 s16 lna_lod_gain; /* LNA lod */
564 s16 lna_gain; /* LNA */
565 s16 pga_gain; /* PGA */
566
567 /* Desired TX power level (in dBm).
568 * This is set by the user and adjusted in b43_phy_xmitpower(). */
569 u8 power_level;
570 /* A-PHY TX Power control value. */
571 u16 txpwr_offset;
572
573 /* Current TX power level attenuation control values */
574 struct b43_bbatt bbatt;
575 struct b43_rfatt rfatt;
576 u8 tx_control; /* B43_TXCTL_XXX */
577
578 /* Hardware Power Control enabled? */
579 bool hardware_power_control;
580
581 /* Current Interference Mitigation mode */
582 int interfmode;
583 /* Stack of saved values from the Interference Mitigation code.
584 * Each value in the stack is layed out as follows:
585 * bit 0-11: offset
586 * bit 12-15: register ID
587 * bit 16-32: value
588 * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT
589 */
590#define B43_INTERFSTACK_SIZE 26
591 u32 interfstack[B43_INTERFSTACK_SIZE]; //FIXME: use a data structure
592
593 /* Saved values from the NRSSI Slope calculation */
594 s16 nrssi[2];
595 s32 nrssislope;
596 /* In memory nrssi lookup table. */
597 s8 nrssi_lt[64];
598
599 /* current channel */
600 u8 channel;
601
602 u16 lofcal;
603
604 u16 initval; //FIXME rename?
605
606 /* PHY TX errors counter. */
607 atomic_t txerr_cnt;
608
609 /* The device does address auto increment for the OFDM tables.
610 * We cache the previously used address here and omit the address
611 * write on the next table access, if possible. */
612 u16 ofdmtab_addr; /* The address currently set in hardware. */
613 enum { /* The last data flow direction. */
614 B43_OFDMTAB_DIRECTION_UNKNOWN = 0,
615 B43_OFDMTAB_DIRECTION_READ,
616 B43_OFDMTAB_DIRECTION_WRITE,
617 } ofdmtab_addr_direction;
618
619#if B43_DEBUG
620 /* Manual TX-power control enabled? */
621 bool manual_txpower_control;
622 /* PHY registers locked by b43_phy_lock()? */
623 bool phy_locked;
624#endif /* B43_DEBUG */
625};
626
627/* Data structures for DMA transmission, per 80211 core. */ 516/* Data structures for DMA transmission, per 80211 core. */
628struct b43_dma { 517struct b43_dma {
629 struct b43_dmaring *tx_ring_AC_BK; /* Background */ 518 struct b43_dmaring *tx_ring_AC_BK; /* Background */
@@ -680,7 +569,7 @@ struct b43_key {
680#define B43_QOS_VOICE B43_QOS_PARAMS(3) 569#define B43_QOS_VOICE B43_QOS_PARAMS(3)
681 570
682/* QOS parameter hardware data structure offsets. */ 571/* QOS parameter hardware data structure offsets. */
683#define B43_NR_QOSPARAMS 22 572#define B43_NR_QOSPARAMS 16
684enum { 573enum {
685 B43_QOSPARAM_TXOP = 0, 574 B43_QOSPARAM_TXOP = 0,
686 B43_QOSPARAM_CWMIN, 575 B43_QOSPARAM_CWMIN,
@@ -696,8 +585,6 @@ enum {
696struct b43_qos_params { 585struct b43_qos_params {
697 /* The QOS parameters */ 586 /* The QOS parameters */
698 struct ieee80211_tx_queue_params p; 587 struct ieee80211_tx_queue_params p;
699 /* Does this need to get uploaded to hardware? */
700 bool need_hw_update;
701}; 588};
702 589
703struct b43_wldev; 590struct b43_wldev;
@@ -759,11 +646,13 @@ struct b43_wl {
759 bool beacon_templates_virgin; /* Never wrote the templates? */ 646 bool beacon_templates_virgin; /* Never wrote the templates? */
760 struct work_struct beacon_update_trigger; 647 struct work_struct beacon_update_trigger;
761 648
762 /* The current QOS parameters for the 4 queues. 649 /* The current QOS parameters for the 4 queues. */
763 * This is protected by the irq_lock. */
764 struct b43_qos_params qos_params[4]; 650 struct b43_qos_params qos_params[4];
765 /* Workqueue for updating QOS parameters in hardware. */ 651
766 struct work_struct qos_update_work; 652 /* Work for adjustment of the transmission power.
653 * This is scheduled when we determine that the actual TX output
654 * power doesn't match what we want. */
655 struct work_struct txpower_adjust_work;
767}; 656};
768 657
769/* In-memory representation of a cached microcode file. */ 658/* In-memory representation of a cached microcode file. */
@@ -908,6 +797,15 @@ static inline int b43_is_mode(struct b43_wl *wl, int type)
908 return (wl->operating && wl->if_type == type); 797 return (wl->operating && wl->if_type == type);
909} 798}
910 799
800/**
801 * b43_current_band - Returns the currently used band.
802 * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ.
803 */
804static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
805{
806 return wl->hw->conf.channel->band;
807}
808
911static inline u16 b43_read16(struct b43_wldev *dev, u16 offset) 809static inline u16 b43_read16(struct b43_wldev *dev, u16 offset)
912{ 810{
913 return ssb_read16(dev->dev, offset); 811 return ssb_read16(dev->dev, offset);
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 29851bc1101f..06a01da80160 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -443,76 +443,6 @@ out_unlock:
443 return count; 443 return count;
444} 444}
445 445
446static ssize_t txpower_g_read_file(struct b43_wldev *dev,
447 char *buf, size_t bufsize)
448{
449 ssize_t count = 0;
450
451 if (dev->phy.type != B43_PHYTYPE_G) {
452 fappend("Device is not a G-PHY\n");
453 goto out;
454 }
455 fappend("Control: %s\n", dev->phy.manual_txpower_control ?
456 "MANUAL" : "AUTOMATIC");
457 fappend("Baseband attenuation: %u\n", dev->phy.bbatt.att);
458 fappend("Radio attenuation: %u\n", dev->phy.rfatt.att);
459 fappend("TX Mixer Gain: %s\n",
460 (dev->phy.tx_control & B43_TXCTL_TXMIX) ? "ON" : "OFF");
461 fappend("PA Gain 2dB: %s\n",
462 (dev->phy.tx_control & B43_TXCTL_PA2DB) ? "ON" : "OFF");
463 fappend("PA Gain 3dB: %s\n",
464 (dev->phy.tx_control & B43_TXCTL_PA3DB) ? "ON" : "OFF");
465 fappend("\n\n");
466 fappend("You can write to this file:\n");
467 fappend("Writing \"auto\" enables automatic txpower control.\n");
468 fappend
469 ("Writing the attenuation values as \"bbatt rfatt txmix pa2db pa3db\" "
470 "enables manual txpower control.\n");
471 fappend("Example: 5 4 0 0 1\n");
472 fappend("Enables manual control with Baseband attenuation 5, "
473 "Radio attenuation 4, No TX Mixer Gain, "
474 "No PA Gain 2dB, With PA Gain 3dB.\n");
475out:
476 return count;
477}
478
479static int txpower_g_write_file(struct b43_wldev *dev,
480 const char *buf, size_t count)
481{
482 if (dev->phy.type != B43_PHYTYPE_G)
483 return -ENODEV;
484 if ((count >= 4) && (memcmp(buf, "auto", 4) == 0)) {
485 /* Automatic control */
486 dev->phy.manual_txpower_control = 0;
487 b43_phy_xmitpower(dev);
488 } else {
489 int bbatt = 0, rfatt = 0, txmix = 0, pa2db = 0, pa3db = 0;
490 /* Manual control */
491 if (sscanf(buf, "%d %d %d %d %d", &bbatt, &rfatt,
492 &txmix, &pa2db, &pa3db) != 5)
493 return -EINVAL;
494 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
495 dev->phy.manual_txpower_control = 1;
496 dev->phy.bbatt.att = bbatt;
497 dev->phy.rfatt.att = rfatt;
498 dev->phy.tx_control = 0;
499 if (txmix)
500 dev->phy.tx_control |= B43_TXCTL_TXMIX;
501 if (pa2db)
502 dev->phy.tx_control |= B43_TXCTL_PA2DB;
503 if (pa3db)
504 dev->phy.tx_control |= B43_TXCTL_PA3DB;
505 b43_phy_lock(dev);
506 b43_radio_lock(dev);
507 b43_set_txpower_g(dev, &dev->phy.bbatt,
508 &dev->phy.rfatt, dev->phy.tx_control);
509 b43_radio_unlock(dev);
510 b43_phy_unlock(dev);
511 }
512
513 return 0;
514}
515
516/* wl->irq_lock is locked */ 446/* wl->irq_lock is locked */
517static int restart_write_file(struct b43_wldev *dev, 447static int restart_write_file(struct b43_wldev *dev,
518 const char *buf, size_t count) 448 const char *buf, size_t count)
@@ -560,7 +490,7 @@ static ssize_t loctls_read_file(struct b43_wldev *dev,
560 err = -ENODEV; 490 err = -ENODEV;
561 goto out; 491 goto out;
562 } 492 }
563 lo = phy->lo_control; 493 lo = phy->g->lo_control;
564 fappend("-- Local Oscillator calibration data --\n\n"); 494 fappend("-- Local Oscillator calibration data --\n\n");
565 fappend("HW-power-control enabled: %d\n", 495 fappend("HW-power-control enabled: %d\n",
566 dev->phy.hardware_power_control); 496 dev->phy.hardware_power_control);
@@ -578,8 +508,8 @@ static ssize_t loctls_read_file(struct b43_wldev *dev,
578 list_for_each_entry(cal, &lo->calib_list, list) { 508 list_for_each_entry(cal, &lo->calib_list, list) {
579 bool active; 509 bool active;
580 510
581 active = (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) && 511 active = (b43_compare_bbatt(&cal->bbatt, &phy->g->bbatt) &&
582 b43_compare_rfatt(&cal->rfatt, &phy->rfatt)); 512 b43_compare_rfatt(&cal->rfatt, &phy->g->rfatt));
583 fappend("BB(%d), RF(%d,%d) -> I=%d, Q=%d " 513 fappend("BB(%d), RF(%d,%d) -> I=%d, Q=%d "
584 "(expires in %lu sec)%s\n", 514 "(expires in %lu sec)%s\n",
585 cal->bbatt.att, 515 cal->bbatt.att,
@@ -763,7 +693,6 @@ B43_DEBUGFS_FOPS(mmio32read, mmio32read__read_file, mmio32read__write_file, 1);
763B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file, 1); 693B43_DEBUGFS_FOPS(mmio32write, NULL, mmio32write__write_file, 1);
764B43_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1); 694B43_DEBUGFS_FOPS(tsf, tsf_read_file, tsf_write_file, 1);
765B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0); 695B43_DEBUGFS_FOPS(txstat, txstat_read_file, NULL, 0);
766B43_DEBUGFS_FOPS(txpower_g, txpower_g_read_file, txpower_g_write_file, 0);
767B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1); 696B43_DEBUGFS_FOPS(restart, NULL, restart_write_file, 1);
768B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0); 697B43_DEBUGFS_FOPS(loctls, loctls_read_file, NULL, 0);
769 698
@@ -877,7 +806,6 @@ void b43_debugfs_add_device(struct b43_wldev *dev)
877 ADD_FILE(mmio32write, 0200); 806 ADD_FILE(mmio32write, 0200);
878 ADD_FILE(tsf, 0600); 807 ADD_FILE(tsf, 0600);
879 ADD_FILE(txstat, 0400); 808 ADD_FILE(txstat, 0400);
880 ADD_FILE(txpower_g, 0600);
881 ADD_FILE(restart, 0200); 809 ADD_FILE(restart, 0200);
882 ADD_FILE(loctls, 0400); 810 ADD_FILE(loctls, 0400);
883 811
@@ -907,7 +835,6 @@ void b43_debugfs_remove_device(struct b43_wldev *dev)
907 debugfs_remove(e->file_mmio32write.dentry); 835 debugfs_remove(e->file_mmio32write.dentry);
908 debugfs_remove(e->file_tsf.dentry); 836 debugfs_remove(e->file_tsf.dentry);
909 debugfs_remove(e->file_txstat.dentry); 837 debugfs_remove(e->file_txstat.dentry);
910 debugfs_remove(e->file_txpower_g.dentry);
911 debugfs_remove(e->file_restart.dentry); 838 debugfs_remove(e->file_restart.dentry);
912 debugfs_remove(e->file_loctls.dentry); 839 debugfs_remove(e->file_loctls.dentry);
913 840
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 9c854d6aae36..6a18a1470465 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -29,7 +29,7 @@
29 29
30#include "b43.h" 30#include "b43.h"
31#include "lo.h" 31#include "lo.h"
32#include "phy.h" 32#include "phy_g.h"
33#include "main.h" 33#include "main.h"
34 34
35#include <linux/delay.h> 35#include <linux/delay.h>
@@ -174,7 +174,8 @@ static u16 lo_txctl_register_table(struct b43_wldev *dev,
174static void lo_measure_txctl_values(struct b43_wldev *dev) 174static void lo_measure_txctl_values(struct b43_wldev *dev)
175{ 175{
176 struct b43_phy *phy = &dev->phy; 176 struct b43_phy *phy = &dev->phy;
177 struct b43_txpower_lo_control *lo = phy->lo_control; 177 struct b43_phy_g *gphy = phy->g;
178 struct b43_txpower_lo_control *lo = gphy->lo_control;
178 u16 reg, mask; 179 u16 reg, mask;
179 u16 trsw_rx, pga; 180 u16 trsw_rx, pga;
180 u16 radio_pctl_reg; 181 u16 radio_pctl_reg;
@@ -195,7 +196,7 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
195 int lb_gain; /* Loopback gain (in dB) */ 196 int lb_gain; /* Loopback gain (in dB) */
196 197
197 trsw_rx = 0; 198 trsw_rx = 0;
198 lb_gain = phy->max_lb_gain / 2; 199 lb_gain = gphy->max_lb_gain / 2;
199 if (lb_gain > 10) { 200 if (lb_gain > 10) {
200 radio_pctl_reg = 0; 201 radio_pctl_reg = 0;
201 pga = abs(10 - lb_gain) / 6; 202 pga = abs(10 - lb_gain) / 6;
@@ -226,7 +227,7 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
226 } 227 }
227 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43) 228 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43)
228 & 0xFFF0) | radio_pctl_reg); 229 & 0xFFF0) | radio_pctl_reg);
229 b43_phy_set_baseband_attenuation(dev, 2); 230 b43_gphy_set_baseband_attenuation(dev, 2);
230 231
231 reg = lo_txctl_register_table(dev, &mask, NULL); 232 reg = lo_txctl_register_table(dev, &mask, NULL);
232 mask = ~mask; 233 mask = ~mask;
@@ -277,7 +278,8 @@ static void lo_measure_txctl_values(struct b43_wldev *dev)
277static void lo_read_power_vector(struct b43_wldev *dev) 278static void lo_read_power_vector(struct b43_wldev *dev)
278{ 279{
279 struct b43_phy *phy = &dev->phy; 280 struct b43_phy *phy = &dev->phy;
280 struct b43_txpower_lo_control *lo = phy->lo_control; 281 struct b43_phy_g *gphy = phy->g;
282 struct b43_txpower_lo_control *lo = gphy->lo_control;
281 int i; 283 int i;
282 u64 tmp; 284 u64 tmp;
283 u64 power_vector = 0; 285 u64 power_vector = 0;
@@ -298,6 +300,7 @@ static void lo_measure_gain_values(struct b43_wldev *dev,
298 s16 max_rx_gain, int use_trsw_rx) 300 s16 max_rx_gain, int use_trsw_rx)
299{ 301{
300 struct b43_phy *phy = &dev->phy; 302 struct b43_phy *phy = &dev->phy;
303 struct b43_phy_g *gphy = phy->g;
301 u16 tmp; 304 u16 tmp;
302 305
303 if (max_rx_gain < 0) 306 if (max_rx_gain < 0)
@@ -308,7 +311,7 @@ static void lo_measure_gain_values(struct b43_wldev *dev,
308 int trsw_rx_gain; 311 int trsw_rx_gain;
309 312
310 if (use_trsw_rx) { 313 if (use_trsw_rx) {
311 trsw_rx_gain = phy->trsw_rx_gain / 2; 314 trsw_rx_gain = gphy->trsw_rx_gain / 2;
312 if (max_rx_gain >= trsw_rx_gain) { 315 if (max_rx_gain >= trsw_rx_gain) {
313 trsw_rx_gain = max_rx_gain - trsw_rx_gain; 316 trsw_rx_gain = max_rx_gain - trsw_rx_gain;
314 trsw_rx = 0x20; 317 trsw_rx = 0x20;
@@ -316,38 +319,38 @@ static void lo_measure_gain_values(struct b43_wldev *dev,
316 } else 319 } else
317 trsw_rx_gain = max_rx_gain; 320 trsw_rx_gain = max_rx_gain;
318 if (trsw_rx_gain < 9) { 321 if (trsw_rx_gain < 9) {
319 phy->lna_lod_gain = 0; 322 gphy->lna_lod_gain = 0;
320 } else { 323 } else {
321 phy->lna_lod_gain = 1; 324 gphy->lna_lod_gain = 1;
322 trsw_rx_gain -= 8; 325 trsw_rx_gain -= 8;
323 } 326 }
324 trsw_rx_gain = clamp_val(trsw_rx_gain, 0, 0x2D); 327 trsw_rx_gain = clamp_val(trsw_rx_gain, 0, 0x2D);
325 phy->pga_gain = trsw_rx_gain / 3; 328 gphy->pga_gain = trsw_rx_gain / 3;
326 if (phy->pga_gain >= 5) { 329 if (gphy->pga_gain >= 5) {
327 phy->pga_gain -= 5; 330 gphy->pga_gain -= 5;
328 phy->lna_gain = 2; 331 gphy->lna_gain = 2;
329 } else 332 } else
330 phy->lna_gain = 0; 333 gphy->lna_gain = 0;
331 } else { 334 } else {
332 phy->lna_gain = 0; 335 gphy->lna_gain = 0;
333 phy->trsw_rx_gain = 0x20; 336 gphy->trsw_rx_gain = 0x20;
334 if (max_rx_gain >= 0x14) { 337 if (max_rx_gain >= 0x14) {
335 phy->lna_lod_gain = 1; 338 gphy->lna_lod_gain = 1;
336 phy->pga_gain = 2; 339 gphy->pga_gain = 2;
337 } else if (max_rx_gain >= 0x12) { 340 } else if (max_rx_gain >= 0x12) {
338 phy->lna_lod_gain = 1; 341 gphy->lna_lod_gain = 1;
339 phy->pga_gain = 1; 342 gphy->pga_gain = 1;
340 } else if (max_rx_gain >= 0xF) { 343 } else if (max_rx_gain >= 0xF) {
341 phy->lna_lod_gain = 1; 344 gphy->lna_lod_gain = 1;
342 phy->pga_gain = 0; 345 gphy->pga_gain = 0;
343 } else { 346 } else {
344 phy->lna_lod_gain = 0; 347 gphy->lna_lod_gain = 0;
345 phy->pga_gain = 0; 348 gphy->pga_gain = 0;
346 } 349 }
347 } 350 }
348 351
349 tmp = b43_radio_read16(dev, 0x7A); 352 tmp = b43_radio_read16(dev, 0x7A);
350 if (phy->lna_lod_gain == 0) 353 if (gphy->lna_lod_gain == 0)
351 tmp &= ~0x0008; 354 tmp &= ~0x0008;
352 else 355 else
353 tmp |= 0x0008; 356 tmp |= 0x0008;
@@ -392,10 +395,11 @@ static void lo_measure_setup(struct b43_wldev *dev,
392{ 395{
393 struct ssb_sprom *sprom = &dev->dev->bus->sprom; 396 struct ssb_sprom *sprom = &dev->dev->bus->sprom;
394 struct b43_phy *phy = &dev->phy; 397 struct b43_phy *phy = &dev->phy;
395 struct b43_txpower_lo_control *lo = phy->lo_control; 398 struct b43_phy_g *gphy = phy->g;
399 struct b43_txpower_lo_control *lo = gphy->lo_control;
396 u16 tmp; 400 u16 tmp;
397 401
398 if (b43_has_hardware_pctl(phy)) { 402 if (b43_has_hardware_pctl(dev)) {
399 sav->phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK); 403 sav->phy_lo_mask = b43_phy_read(dev, B43_PHY_LO_MASK);
400 sav->phy_extg_01 = b43_phy_read(dev, B43_PHY_EXTG(0x01)); 404 sav->phy_extg_01 = b43_phy_read(dev, B43_PHY_EXTG(0x01));
401 sav->phy_dacctl_hwpctl = b43_phy_read(dev, B43_PHY_DACCTL); 405 sav->phy_dacctl_hwpctl = b43_phy_read(dev, B43_PHY_DACCTL);
@@ -496,7 +500,7 @@ static void lo_measure_setup(struct b43_wldev *dev,
496 b43_phy_write(dev, B43_PHY_CCK(0x2B), 0x0802); 500 b43_phy_write(dev, B43_PHY_CCK(0x2B), 0x0802);
497 if (phy->rev >= 2) 501 if (phy->rev >= 2)
498 b43_dummy_transmission(dev); 502 b43_dummy_transmission(dev);
499 b43_radio_selectchannel(dev, 6, 0); 503 b43_gphy_channel_switch(dev, 6, 0);
500 b43_radio_read16(dev, 0x51); /* dummy read */ 504 b43_radio_read16(dev, 0x51); /* dummy read */
501 if (phy->type == B43_PHYTYPE_G) 505 if (phy->type == B43_PHYTYPE_G)
502 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0); 506 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0);
@@ -520,18 +524,19 @@ static void lo_measure_restore(struct b43_wldev *dev,
520 struct lo_g_saved_values *sav) 524 struct lo_g_saved_values *sav)
521{ 525{
522 struct b43_phy *phy = &dev->phy; 526 struct b43_phy *phy = &dev->phy;
527 struct b43_phy_g *gphy = phy->g;
523 u16 tmp; 528 u16 tmp;
524 529
525 if (phy->rev >= 2) { 530 if (phy->rev >= 2) {
526 b43_phy_write(dev, B43_PHY_PGACTL, 0xE300); 531 b43_phy_write(dev, B43_PHY_PGACTL, 0xE300);
527 tmp = (phy->pga_gain << 8); 532 tmp = (gphy->pga_gain << 8);
528 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA0); 533 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA0);
529 udelay(5); 534 udelay(5);
530 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA2); 535 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA2);
531 udelay(2); 536 udelay(2);
532 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA3); 537 b43_phy_write(dev, B43_PHY_RFOVERVAL, tmp | 0xA3);
533 } else { 538 } else {
534 tmp = (phy->pga_gain | 0xEFA0); 539 tmp = (gphy->pga_gain | 0xEFA0);
535 b43_phy_write(dev, B43_PHY_PGACTL, tmp); 540 b43_phy_write(dev, B43_PHY_PGACTL, tmp);
536 } 541 }
537 if (phy->type == B43_PHYTYPE_G) { 542 if (phy->type == B43_PHYTYPE_G) {
@@ -572,7 +577,7 @@ static void lo_measure_restore(struct b43_wldev *dev,
572 b43_phy_write(dev, B43_PHY_CCK(0x3E), sav->phy_cck_3E); 577 b43_phy_write(dev, B43_PHY_CCK(0x3E), sav->phy_cck_3E);
573 b43_phy_write(dev, B43_PHY_CRS0, sav->phy_crs0); 578 b43_phy_write(dev, B43_PHY_CRS0, sav->phy_crs0);
574 } 579 }
575 if (b43_has_hardware_pctl(phy)) { 580 if (b43_has_hardware_pctl(dev)) {
576 tmp = (sav->phy_lo_mask & 0xBFFF); 581 tmp = (sav->phy_lo_mask & 0xBFFF);
577 b43_phy_write(dev, B43_PHY_LO_MASK, tmp); 582 b43_phy_write(dev, B43_PHY_LO_MASK, tmp);
578 b43_phy_write(dev, B43_PHY_EXTG(0x01), sav->phy_extg_01); 583 b43_phy_write(dev, B43_PHY_EXTG(0x01), sav->phy_extg_01);
@@ -580,7 +585,7 @@ static void lo_measure_restore(struct b43_wldev *dev,
580 b43_phy_write(dev, B43_PHY_CCK(0x14), sav->phy_cck_14); 585 b43_phy_write(dev, B43_PHY_CCK(0x14), sav->phy_cck_14);
581 b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl); 586 b43_phy_write(dev, B43_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl);
582 } 587 }
583 b43_radio_selectchannel(dev, sav->old_channel, 1); 588 b43_gphy_channel_switch(dev, sav->old_channel, 1);
584} 589}
585 590
586struct b43_lo_g_statemachine { 591struct b43_lo_g_statemachine {
@@ -597,6 +602,7 @@ static int lo_probe_possible_loctls(struct b43_wldev *dev,
597 struct b43_lo_g_statemachine *d) 602 struct b43_lo_g_statemachine *d)
598{ 603{
599 struct b43_phy *phy = &dev->phy; 604 struct b43_phy *phy = &dev->phy;
605 struct b43_phy_g *gphy = phy->g;
600 struct b43_loctl test_loctl; 606 struct b43_loctl test_loctl;
601 struct b43_loctl orig_loctl; 607 struct b43_loctl orig_loctl;
602 struct b43_loctl prev_loctl = { 608 struct b43_loctl prev_loctl = {
@@ -646,9 +652,9 @@ static int lo_probe_possible_loctls(struct b43_wldev *dev,
646 test_loctl.q != prev_loctl.q) && 652 test_loctl.q != prev_loctl.q) &&
647 (abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) { 653 (abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) {
648 b43_lo_write(dev, &test_loctl); 654 b43_lo_write(dev, &test_loctl);
649 feedth = lo_measure_feedthrough(dev, phy->lna_gain, 655 feedth = lo_measure_feedthrough(dev, gphy->lna_gain,
650 phy->pga_gain, 656 gphy->pga_gain,
651 phy->trsw_rx_gain); 657 gphy->trsw_rx_gain);
652 if (feedth < d->lowest_feedth) { 658 if (feedth < d->lowest_feedth) {
653 memcpy(probe_loctl, &test_loctl, 659 memcpy(probe_loctl, &test_loctl,
654 sizeof(struct b43_loctl)); 660 sizeof(struct b43_loctl));
@@ -677,6 +683,7 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
677 int *max_rx_gain) 683 int *max_rx_gain)
678{ 684{
679 struct b43_phy *phy = &dev->phy; 685 struct b43_phy *phy = &dev->phy;
686 struct b43_phy_g *gphy = phy->g;
680 struct b43_lo_g_statemachine d; 687 struct b43_lo_g_statemachine d;
681 u16 feedth; 688 u16 feedth;
682 int found_lower; 689 int found_lower;
@@ -693,17 +700,17 @@ static void lo_probe_loctls_statemachine(struct b43_wldev *dev,
693 max_repeat = 4; 700 max_repeat = 4;
694 do { 701 do {
695 b43_lo_write(dev, &d.min_loctl); 702 b43_lo_write(dev, &d.min_loctl);
696 feedth = lo_measure_feedthrough(dev, phy->lna_gain, 703 feedth = lo_measure_feedthrough(dev, gphy->lna_gain,
697 phy->pga_gain, 704 gphy->pga_gain,
698 phy->trsw_rx_gain); 705 gphy->trsw_rx_gain);
699 if (feedth < 0x258) { 706 if (feedth < 0x258) {
700 if (feedth >= 0x12C) 707 if (feedth >= 0x12C)
701 *max_rx_gain += 6; 708 *max_rx_gain += 6;
702 else 709 else
703 *max_rx_gain += 3; 710 *max_rx_gain += 3;
704 feedth = lo_measure_feedthrough(dev, phy->lna_gain, 711 feedth = lo_measure_feedthrough(dev, gphy->lna_gain,
705 phy->pga_gain, 712 gphy->pga_gain,
706 phy->trsw_rx_gain); 713 gphy->trsw_rx_gain);
707 } 714 }
708 d.lowest_feedth = feedth; 715 d.lowest_feedth = feedth;
709 716
@@ -752,6 +759,7 @@ struct b43_lo_calib * b43_calibrate_lo_setting(struct b43_wldev *dev,
752 const struct b43_rfatt *rfatt) 759 const struct b43_rfatt *rfatt)
753{ 760{
754 struct b43_phy *phy = &dev->phy; 761 struct b43_phy *phy = &dev->phy;
762 struct b43_phy_g *gphy = phy->g;
755 struct b43_loctl loctl = { 763 struct b43_loctl loctl = {
756 .i = 0, 764 .i = 0,
757 .q = 0, 765 .q = 0,
@@ -782,11 +790,11 @@ struct b43_lo_calib * b43_calibrate_lo_setting(struct b43_wldev *dev,
782 if (rfatt->with_padmix) 790 if (rfatt->with_padmix)
783 max_rx_gain -= pad_mix_gain; 791 max_rx_gain -= pad_mix_gain;
784 if (has_loopback_gain(phy)) 792 if (has_loopback_gain(phy))
785 max_rx_gain += phy->max_lb_gain; 793 max_rx_gain += gphy->max_lb_gain;
786 lo_measure_gain_values(dev, max_rx_gain, 794 lo_measure_gain_values(dev, max_rx_gain,
787 has_loopback_gain(phy)); 795 has_loopback_gain(phy));
788 796
789 b43_phy_set_baseband_attenuation(dev, bbatt->att); 797 b43_gphy_set_baseband_attenuation(dev, bbatt->att);
790 lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain); 798 lo_probe_loctls_statemachine(dev, &loctl, &max_rx_gain);
791 799
792 lo_measure_restore(dev, &saved_regs); 800 lo_measure_restore(dev, &saved_regs);
@@ -820,7 +828,7 @@ struct b43_lo_calib * b43_get_calib_lo_settings(struct b43_wldev *dev,
820 const struct b43_bbatt *bbatt, 828 const struct b43_bbatt *bbatt,
821 const struct b43_rfatt *rfatt) 829 const struct b43_rfatt *rfatt)
822{ 830{
823 struct b43_txpower_lo_control *lo = dev->phy.lo_control; 831 struct b43_txpower_lo_control *lo = dev->phy.g->lo_control;
824 struct b43_lo_calib *c; 832 struct b43_lo_calib *c;
825 833
826 c = b43_find_lo_calib(lo, bbatt, rfatt); 834 c = b43_find_lo_calib(lo, bbatt, rfatt);
@@ -839,7 +847,8 @@ struct b43_lo_calib * b43_get_calib_lo_settings(struct b43_wldev *dev,
839void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all) 847void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
840{ 848{
841 struct b43_phy *phy = &dev->phy; 849 struct b43_phy *phy = &dev->phy;
842 struct b43_txpower_lo_control *lo = phy->lo_control; 850 struct b43_phy_g *gphy = phy->g;
851 struct b43_txpower_lo_control *lo = gphy->lo_control;
843 int i; 852 int i;
844 int rf_offset, bb_offset; 853 int rf_offset, bb_offset;
845 const struct b43_rfatt *rfatt; 854 const struct b43_rfatt *rfatt;
@@ -917,14 +926,14 @@ static inline void b43_lo_fixup_rfatt(struct b43_rfatt *rf)
917 926
918void b43_lo_g_adjust(struct b43_wldev *dev) 927void b43_lo_g_adjust(struct b43_wldev *dev)
919{ 928{
920 struct b43_phy *phy = &dev->phy; 929 struct b43_phy_g *gphy = dev->phy.g;
921 struct b43_lo_calib *cal; 930 struct b43_lo_calib *cal;
922 struct b43_rfatt rf; 931 struct b43_rfatt rf;
923 932
924 memcpy(&rf, &phy->rfatt, sizeof(rf)); 933 memcpy(&rf, &gphy->rfatt, sizeof(rf));
925 b43_lo_fixup_rfatt(&rf); 934 b43_lo_fixup_rfatt(&rf);
926 935
927 cal = b43_get_calib_lo_settings(dev, &phy->bbatt, &rf); 936 cal = b43_get_calib_lo_settings(dev, &gphy->bbatt, &rf);
928 if (!cal) 937 if (!cal)
929 return; 938 return;
930 b43_lo_write(dev, &cal->ctl); 939 b43_lo_write(dev, &cal->ctl);
@@ -952,7 +961,8 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
952void b43_lo_g_maintanance_work(struct b43_wldev *dev) 961void b43_lo_g_maintanance_work(struct b43_wldev *dev)
953{ 962{
954 struct b43_phy *phy = &dev->phy; 963 struct b43_phy *phy = &dev->phy;
955 struct b43_txpower_lo_control *lo = phy->lo_control; 964 struct b43_phy_g *gphy = phy->g;
965 struct b43_txpower_lo_control *lo = gphy->lo_control;
956 unsigned long now; 966 unsigned long now;
957 unsigned long expire; 967 unsigned long expire;
958 struct b43_lo_calib *cal, *tmp; 968 struct b43_lo_calib *cal, *tmp;
@@ -962,7 +972,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
962 if (!lo) 972 if (!lo)
963 return; 973 return;
964 now = jiffies; 974 now = jiffies;
965 hwpctl = b43_has_hardware_pctl(phy); 975 hwpctl = b43_has_hardware_pctl(dev);
966 976
967 if (hwpctl) { 977 if (hwpctl) {
968 /* Read the power vector and update it, if needed. */ 978 /* Read the power vector and update it, if needed. */
@@ -983,8 +993,8 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
983 if (!time_before(cal->calib_time, expire)) 993 if (!time_before(cal->calib_time, expire))
984 continue; 994 continue;
985 /* This item expired. */ 995 /* This item expired. */
986 if (b43_compare_bbatt(&cal->bbatt, &phy->bbatt) && 996 if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
987 b43_compare_rfatt(&cal->rfatt, &phy->rfatt)) { 997 b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
988 B43_WARN_ON(current_item_expired); 998 B43_WARN_ON(current_item_expired);
989 current_item_expired = 1; 999 current_item_expired = 1;
990 } 1000 }
@@ -1002,7 +1012,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
1002 /* Recalibrate currently used LO setting. */ 1012 /* Recalibrate currently used LO setting. */
1003 if (b43_debug(dev, B43_DBG_LO)) 1013 if (b43_debug(dev, B43_DBG_LO))
1004 b43dbg(dev->wl, "LO: Recalibrating current LO setting\n"); 1014 b43dbg(dev->wl, "LO: Recalibrating current LO setting\n");
1005 cal = b43_calibrate_lo_setting(dev, &phy->bbatt, &phy->rfatt); 1015 cal = b43_calibrate_lo_setting(dev, &gphy->bbatt, &gphy->rfatt);
1006 if (cal) { 1016 if (cal) {
1007 list_add(&cal->list, &lo->calib_list); 1017 list_add(&cal->list, &lo->calib_list);
1008 b43_lo_write(dev, &cal->ctl); 1018 b43_lo_write(dev, &cal->ctl);
@@ -1013,7 +1023,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
1013 1023
1014void b43_lo_g_cleanup(struct b43_wldev *dev) 1024void b43_lo_g_cleanup(struct b43_wldev *dev)
1015{ 1025{
1016 struct b43_txpower_lo_control *lo = dev->phy.lo_control; 1026 struct b43_txpower_lo_control *lo = dev->phy.g->lo_control;
1017 struct b43_lo_calib *cal, *tmp; 1027 struct b43_lo_calib *cal, *tmp;
1018 1028
1019 if (!lo) 1029 if (!lo)
@@ -1027,9 +1037,7 @@ void b43_lo_g_cleanup(struct b43_wldev *dev)
1027/* LO Initialization */ 1037/* LO Initialization */
1028void b43_lo_g_init(struct b43_wldev *dev) 1038void b43_lo_g_init(struct b43_wldev *dev)
1029{ 1039{
1030 struct b43_phy *phy = &dev->phy; 1040 if (b43_has_hardware_pctl(dev)) {
1031
1032 if (b43_has_hardware_pctl(phy)) {
1033 lo_read_power_vector(dev); 1041 lo_read_power_vector(dev);
1034 b43_gphy_dc_lt_init(dev, 1); 1042 b43_gphy_dc_lt_init(dev, 1);
1035 } 1043 }
diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h
index 1da321cabc12..3b27e20eff80 100644
--- a/drivers/net/wireless/b43/lo.h
+++ b/drivers/net/wireless/b43/lo.h
@@ -1,7 +1,9 @@
1#ifndef B43_LO_H_ 1#ifndef B43_LO_H_
2#define B43_LO_H_ 2#define B43_LO_H_
3 3
4#include "phy.h" 4/* G-PHY Local Oscillator */
5
6#include "phy_g.h"
5 7
6struct b43_wldev; 8struct b43_wldev;
7 9
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 7205a936ec74..3bf74e236abc 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -44,8 +44,9 @@
44#include "b43.h" 44#include "b43.h"
45#include "main.h" 45#include "main.h"
46#include "debugfs.h" 46#include "debugfs.h"
47#include "phy.h" 47#include "phy_common.h"
48#include "nphy.h" 48#include "phy_g.h"
49#include "phy_n.h"
49#include "dma.h" 50#include "dma.h"
50#include "pio.h" 51#include "pio.h"
51#include "sysfs.h" 52#include "sysfs.h"
@@ -814,7 +815,7 @@ void b43_dummy_transmission(struct b43_wldev *dev)
814 break; 815 break;
815 udelay(10); 816 udelay(10);
816 } 817 }
817 for (i = 0x00; i < 0x0A; i++) { 818 for (i = 0x00; i < 0x19; i++) {
818 value = b43_read16(dev, 0x0690); 819 value = b43_read16(dev, 0x0690);
819 if (!(value & 0x0100)) 820 if (!(value & 0x0100))
820 break; 821 break;
@@ -1051,23 +1052,6 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
1051 } 1052 }
1052} 1053}
1053 1054
1054/* Turn the Analog ON/OFF */
1055static void b43_switch_analog(struct b43_wldev *dev, int on)
1056{
1057 switch (dev->phy.type) {
1058 case B43_PHYTYPE_A:
1059 case B43_PHYTYPE_G:
1060 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
1061 break;
1062 case B43_PHYTYPE_N:
1063 b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
1064 on ? 0 : 0x7FFF);
1065 break;
1066 default:
1067 B43_WARN_ON(1);
1068 }
1069}
1070
1071void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) 1055void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
1072{ 1056{
1073 u32 tmslow; 1057 u32 tmslow;
@@ -1090,8 +1074,12 @@ void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
1090 ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ 1074 ssb_read32(dev->dev, SSB_TMSLOW); /* flush */
1091 msleep(1); 1075 msleep(1);
1092 1076
1093 /* Turn Analog ON */ 1077 /* Turn Analog ON, but only if we already know the PHY-type.
1094 b43_switch_analog(dev, 1); 1078 * This protects against very early setup where we don't know the
1079 * PHY-type, yet. wireless_core_reset will be called once again later,
1080 * when we know the PHY-type. */
1081 if (dev->phy.ops)
1082 dev->phy.ops->switch_analog(dev, 1);
1095 1083
1096 macctl = b43_read32(dev, B43_MMIO_MACCTL); 1084 macctl = b43_read32(dev, B43_MMIO_MACCTL);
1097 macctl &= ~B43_MACCTL_GMODE; 1085 macctl &= ~B43_MACCTL_GMODE;
@@ -1174,6 +1162,8 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
1174{ 1162{
1175 /* Top half of Link Quality calculation. */ 1163 /* Top half of Link Quality calculation. */
1176 1164
1165 if (dev->phy.type != B43_PHYTYPE_G)
1166 return;
1177 if (dev->noisecalc.calculation_running) 1167 if (dev->noisecalc.calculation_running)
1178 return; 1168 return;
1179 dev->noisecalc.calculation_running = 1; 1169 dev->noisecalc.calculation_running = 1;
@@ -1184,7 +1174,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
1184 1174
1185static void handle_irq_noise(struct b43_wldev *dev) 1175static void handle_irq_noise(struct b43_wldev *dev)
1186{ 1176{
1187 struct b43_phy *phy = &dev->phy; 1177 struct b43_phy_g *phy = dev->phy.g;
1188 u16 tmp; 1178 u16 tmp;
1189 u8 noise[4]; 1179 u8 noise[4];
1190 u8 i, j; 1180 u8 i, j;
@@ -1192,6 +1182,9 @@ static void handle_irq_noise(struct b43_wldev *dev)
1192 1182
1193 /* Bottom half of Link Quality calculation. */ 1183 /* Bottom half of Link Quality calculation. */
1194 1184
1185 if (dev->phy.type != B43_PHYTYPE_G)
1186 return;
1187
1195 /* Possible race condition: It might be possible that the user 1188 /* Possible race condition: It might be possible that the user
1196 * changed to a different channel in the meantime since we 1189 * changed to a different channel in the meantime since we
1197 * started the calculation. We ignore that fact, since it's 1190 * started the calculation. We ignore that fact, since it's
@@ -1251,13 +1244,13 @@ generate_new:
1251 1244
1252static void handle_irq_tbtt_indication(struct b43_wldev *dev) 1245static void handle_irq_tbtt_indication(struct b43_wldev *dev)
1253{ 1246{
1254 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) { 1247 if (b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) {
1255 ///TODO: PS TBTT 1248 ///TODO: PS TBTT
1256 } else { 1249 } else {
1257 if (1 /*FIXME: the last PSpoll frame was sent successfully */ ) 1250 if (1 /*FIXME: the last PSpoll frame was sent successfully */ )
1258 b43_power_saving_ctl_bits(dev, 0); 1251 b43_power_saving_ctl_bits(dev, 0);
1259 } 1252 }
1260 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) 1253 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
1261 dev->dfq_valid = 1; 1254 dev->dfq_valid = 1;
1262} 1255}
1263 1256
@@ -1606,8 +1599,8 @@ static void handle_irq_beacon(struct b43_wldev *dev)
1606 struct b43_wl *wl = dev->wl; 1599 struct b43_wl *wl = dev->wl;
1607 u32 cmd, beacon0_valid, beacon1_valid; 1600 u32 cmd, beacon0_valid, beacon1_valid;
1608 1601
1609 if (!b43_is_mode(wl, IEEE80211_IF_TYPE_AP) && 1602 if (!b43_is_mode(wl, NL80211_IFTYPE_AP) &&
1610 !b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) 1603 !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
1611 return; 1604 return;
1612 1605
1613 /* This is the bottom half of the asynchronous beacon update. */ 1606 /* This is the bottom half of the asynchronous beacon update. */
@@ -2575,10 +2568,10 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
2575 ctl &= ~B43_MACCTL_BEACPROMISC; 2568 ctl &= ~B43_MACCTL_BEACPROMISC;
2576 ctl |= B43_MACCTL_INFRA; 2569 ctl |= B43_MACCTL_INFRA;
2577 2570
2578 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) || 2571 if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
2579 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) 2572 b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
2580 ctl |= B43_MACCTL_AP; 2573 ctl |= B43_MACCTL_AP;
2581 else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) 2574 else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC))
2582 ctl &= ~B43_MACCTL_INFRA; 2575 ctl &= ~B43_MACCTL_INFRA;
2583 2576
2584 if (wl->filter_flags & FIF_CONTROL) 2577 if (wl->filter_flags & FIF_CONTROL)
@@ -2688,9 +2681,8 @@ static void b43_mgmtframe_txantenna(struct b43_wldev *dev, int antenna)
2688/* This is the opposite of b43_chip_init() */ 2681/* This is the opposite of b43_chip_init() */
2689static void b43_chip_exit(struct b43_wldev *dev) 2682static void b43_chip_exit(struct b43_wldev *dev)
2690{ 2683{
2691 b43_radio_turn_off(dev, 1); 2684 b43_phy_exit(dev);
2692 b43_gpio_cleanup(dev); 2685 b43_gpio_cleanup(dev);
2693 b43_lo_g_cleanup(dev);
2694 /* firmware is released later */ 2686 /* firmware is released later */
2695} 2687}
2696 2688
@@ -2700,7 +2692,7 @@ static void b43_chip_exit(struct b43_wldev *dev)
2700static int b43_chip_init(struct b43_wldev *dev) 2692static int b43_chip_init(struct b43_wldev *dev)
2701{ 2693{
2702 struct b43_phy *phy = &dev->phy; 2694 struct b43_phy *phy = &dev->phy;
2703 int err, tmp; 2695 int err;
2704 u32 value32, macctl; 2696 u32 value32, macctl;
2705 u16 value16; 2697 u16 value16;
2706 2698
@@ -2725,19 +2717,20 @@ static int b43_chip_init(struct b43_wldev *dev)
2725 err = b43_upload_initvals(dev); 2717 err = b43_upload_initvals(dev);
2726 if (err) 2718 if (err)
2727 goto err_gpio_clean; 2719 goto err_gpio_clean;
2728 b43_radio_turn_on(dev);
2729 2720
2730 b43_write16(dev, 0x03E6, 0x0000); 2721 /* Turn the Analog on and initialize the PHY. */
2722 phy->ops->switch_analog(dev, 1);
2731 err = b43_phy_init(dev); 2723 err = b43_phy_init(dev);
2732 if (err) 2724 if (err)
2733 goto err_radio_off; 2725 goto err_gpio_clean;
2734 2726
2735 /* Select initial Interference Mitigation. */ 2727 /* Disable Interference Mitigation. */
2736 tmp = phy->interfmode; 2728 if (phy->ops->interf_mitigation)
2737 phy->interfmode = B43_INTERFMODE_NONE; 2729 phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE);
2738 b43_radio_set_interference_mitigation(dev, tmp);
2739 2730
2740 b43_set_rx_antenna(dev, B43_ANTENNA_DEFAULT); 2731 /* Select the antennae */
2732 if (phy->ops->set_rx_antenna)
2733 phy->ops->set_rx_antenna(dev, B43_ANTENNA_DEFAULT);
2741 b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT); 2734 b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT);
2742 2735
2743 if (phy->type == B43_PHYTYPE_B) { 2736 if (phy->type == B43_PHYTYPE_B) {
@@ -2790,8 +2783,6 @@ static int b43_chip_init(struct b43_wldev *dev)
2790out: 2783out:
2791 return err; 2784 return err;
2792 2785
2793err_radio_off:
2794 b43_radio_turn_off(dev, 1);
2795err_gpio_clean: 2786err_gpio_clean:
2796 b43_gpio_cleanup(dev); 2787 b43_gpio_cleanup(dev);
2797 return err; 2788 return err;
@@ -2799,25 +2790,13 @@ err_gpio_clean:
2799 2790
2800static void b43_periodic_every60sec(struct b43_wldev *dev) 2791static void b43_periodic_every60sec(struct b43_wldev *dev)
2801{ 2792{
2802 struct b43_phy *phy = &dev->phy; 2793 const struct b43_phy_operations *ops = dev->phy.ops;
2803 2794
2804 if (phy->type != B43_PHYTYPE_G) 2795 if (ops->pwork_60sec)
2805 return; 2796 ops->pwork_60sec(dev);
2806 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI) { 2797
2807 b43_mac_suspend(dev); 2798 /* Force check the TX power emission now. */
2808 b43_calc_nrssi_slope(dev); 2799 b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME);
2809 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) {
2810 u8 old_chan = phy->channel;
2811
2812 /* VCO Calibration */
2813 if (old_chan >= 8)
2814 b43_radio_selectchannel(dev, 1, 0);
2815 else
2816 b43_radio_selectchannel(dev, 13, 0);
2817 b43_radio_selectchannel(dev, old_chan, 0);
2818 }
2819 b43_mac_enable(dev);
2820 }
2821} 2800}
2822 2801
2823static void b43_periodic_every30sec(struct b43_wldev *dev) 2802static void b43_periodic_every30sec(struct b43_wldev *dev)
@@ -2845,32 +2824,8 @@ static void b43_periodic_every15sec(struct b43_wldev *dev)
2845 } 2824 }
2846 } 2825 }
2847 2826
2848 if (phy->type == B43_PHYTYPE_G) { 2827 if (phy->ops->pwork_15sec)
2849 //TODO: update_aci_moving_average 2828 phy->ops->pwork_15sec(dev);
2850 if (phy->aci_enable && phy->aci_wlan_automatic) {
2851 b43_mac_suspend(dev);
2852 if (!phy->aci_enable && 1 /*TODO: not scanning? */ ) {
2853 if (0 /*TODO: bunch of conditions */ ) {
2854 b43_radio_set_interference_mitigation
2855 (dev, B43_INTERFMODE_MANUALWLAN);
2856 }
2857 } else if (1 /*TODO*/) {
2858 /*
2859 if ((aci_average > 1000) && !(b43_radio_aci_scan(dev))) {
2860 b43_radio_set_interference_mitigation(dev,
2861 B43_INTERFMODE_NONE);
2862 }
2863 */
2864 }
2865 b43_mac_enable(dev);
2866 } else if (phy->interfmode == B43_INTERFMODE_NONWLAN &&
2867 phy->rev == 1) {
2868 //TODO: implement rev1 workaround
2869 }
2870 }
2871 b43_phy_xmitpower(dev); //FIXME: unless scanning?
2872 b43_lo_g_maintanance_work(dev);
2873 //TODO for APHY (temperature?)
2874 2829
2875 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); 2830 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
2876 wmb(); 2831 wmb();
@@ -3104,36 +3059,31 @@ static void b43_qos_params_upload(struct b43_wldev *dev,
3104 } 3059 }
3105} 3060}
3106 3061
3107/* Update the QOS parameters in hardware. */ 3062/* Mapping of mac80211 queue numbers to b43 QoS SHM offsets. */
3108static void b43_qos_update(struct b43_wldev *dev) 3063static const u16 b43_qos_shm_offsets[] = {
3064 /* [mac80211-queue-nr] = SHM_OFFSET, */
3065 [0] = B43_QOS_VOICE,
3066 [1] = B43_QOS_VIDEO,
3067 [2] = B43_QOS_BESTEFFORT,
3068 [3] = B43_QOS_BACKGROUND,
3069};
3070
3071/* Update all QOS parameters in hardware. */
3072static void b43_qos_upload_all(struct b43_wldev *dev)
3109{ 3073{
3110 struct b43_wl *wl = dev->wl; 3074 struct b43_wl *wl = dev->wl;
3111 struct b43_qos_params *params; 3075 struct b43_qos_params *params;
3112 unsigned long flags;
3113 unsigned int i; 3076 unsigned int i;
3114 3077
3115 /* Mapping of mac80211 queues to b43 SHM offsets. */ 3078 BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
3116 static const u16 qos_shm_offsets[] = { 3079 ARRAY_SIZE(wl->qos_params));
3117 [0] = B43_QOS_VOICE,
3118 [1] = B43_QOS_VIDEO,
3119 [2] = B43_QOS_BESTEFFORT,
3120 [3] = B43_QOS_BACKGROUND,
3121 };
3122 BUILD_BUG_ON(ARRAY_SIZE(qos_shm_offsets) != ARRAY_SIZE(wl->qos_params));
3123 3080
3124 b43_mac_suspend(dev); 3081 b43_mac_suspend(dev);
3125 spin_lock_irqsave(&wl->irq_lock, flags);
3126
3127 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { 3082 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) {
3128 params = &(wl->qos_params[i]); 3083 params = &(wl->qos_params[i]);
3129 if (params->need_hw_update) { 3084 b43_qos_params_upload(dev, &(params->p),
3130 b43_qos_params_upload(dev, &(params->p), 3085 b43_qos_shm_offsets[i]);
3131 qos_shm_offsets[i]);
3132 params->need_hw_update = 0;
3133 }
3134 } 3086 }
3135
3136 spin_unlock_irqrestore(&wl->irq_lock, flags);
3137 b43_mac_enable(dev); 3087 b43_mac_enable(dev);
3138} 3088}
3139 3089
@@ -3142,25 +3092,50 @@ static void b43_qos_clear(struct b43_wl *wl)
3142 struct b43_qos_params *params; 3092 struct b43_qos_params *params;
3143 unsigned int i; 3093 unsigned int i;
3144 3094
3095 /* Initialize QoS parameters to sane defaults. */
3096
3097 BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
3098 ARRAY_SIZE(wl->qos_params));
3099
3145 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { 3100 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) {
3146 params = &(wl->qos_params[i]); 3101 params = &(wl->qos_params[i]);
3147 3102
3148 memset(&(params->p), 0, sizeof(params->p)); 3103 switch (b43_qos_shm_offsets[i]) {
3149 params->p.aifs = -1; 3104 case B43_QOS_VOICE:
3150 params->need_hw_update = 1; 3105 params->p.txop = 0;
3106 params->p.aifs = 2;
3107 params->p.cw_min = 0x0001;
3108 params->p.cw_max = 0x0001;
3109 break;
3110 case B43_QOS_VIDEO:
3111 params->p.txop = 0;
3112 params->p.aifs = 2;
3113 params->p.cw_min = 0x0001;
3114 params->p.cw_max = 0x0001;
3115 break;
3116 case B43_QOS_BESTEFFORT:
3117 params->p.txop = 0;
3118 params->p.aifs = 3;
3119 params->p.cw_min = 0x0001;
3120 params->p.cw_max = 0x03FF;
3121 break;
3122 case B43_QOS_BACKGROUND:
3123 params->p.txop = 0;
3124 params->p.aifs = 7;
3125 params->p.cw_min = 0x0001;
3126 params->p.cw_max = 0x03FF;
3127 break;
3128 default:
3129 B43_WARN_ON(1);
3130 }
3151 } 3131 }
3152} 3132}
3153 3133
3154/* Initialize the core's QOS capabilities */ 3134/* Initialize the core's QOS capabilities */
3155static void b43_qos_init(struct b43_wldev *dev) 3135static void b43_qos_init(struct b43_wldev *dev)
3156{ 3136{
3157 struct b43_wl *wl = dev->wl;
3158 unsigned int i;
3159
3160 /* Upload the current QOS parameters. */ 3137 /* Upload the current QOS parameters. */
3161 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) 3138 b43_qos_upload_all(dev);
3162 wl->qos_params[i].need_hw_update = 1;
3163 b43_qos_update(dev);
3164 3139
3165 /* Enable QOS support. */ 3140 /* Enable QOS support. */
3166 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF); 3141 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF);
@@ -3169,25 +3144,13 @@ static void b43_qos_init(struct b43_wldev *dev)
3169 | B43_MMIO_IFSCTL_USE_EDCF); 3144 | B43_MMIO_IFSCTL_USE_EDCF);
3170} 3145}
3171 3146
3172static void b43_qos_update_work(struct work_struct *work)
3173{
3174 struct b43_wl *wl = container_of(work, struct b43_wl, qos_update_work);
3175 struct b43_wldev *dev;
3176
3177 mutex_lock(&wl->mutex);
3178 dev = wl->current_dev;
3179 if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED)))
3180 b43_qos_update(dev);
3181 mutex_unlock(&wl->mutex);
3182}
3183
3184static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue, 3147static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue,
3185 const struct ieee80211_tx_queue_params *params) 3148 const struct ieee80211_tx_queue_params *params)
3186{ 3149{
3187 struct b43_wl *wl = hw_to_b43_wl(hw); 3150 struct b43_wl *wl = hw_to_b43_wl(hw);
3188 unsigned long flags; 3151 struct b43_wldev *dev;
3189 unsigned int queue = (unsigned int)_queue; 3152 unsigned int queue = (unsigned int)_queue;
3190 struct b43_qos_params *p; 3153 int err = -ENODEV;
3191 3154
3192 if (queue >= ARRAY_SIZE(wl->qos_params)) { 3155 if (queue >= ARRAY_SIZE(wl->qos_params)) {
3193 /* Queue not available or don't support setting 3156 /* Queue not available or don't support setting
@@ -3195,16 +3158,25 @@ static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue,
3195 * confuse mac80211. */ 3158 * confuse mac80211. */
3196 return 0; 3159 return 0;
3197 } 3160 }
3161 BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) !=
3162 ARRAY_SIZE(wl->qos_params));
3198 3163
3199 spin_lock_irqsave(&wl->irq_lock, flags); 3164 mutex_lock(&wl->mutex);
3200 p = &(wl->qos_params[queue]); 3165 dev = wl->current_dev;
3201 memcpy(&(p->p), params, sizeof(p->p)); 3166 if (unlikely(!dev || (b43_status(dev) < B43_STAT_INITIALIZED)))
3202 p->need_hw_update = 1; 3167 goto out_unlock;
3203 spin_unlock_irqrestore(&wl->irq_lock, flags);
3204 3168
3205 queue_work(hw->workqueue, &wl->qos_update_work); 3169 memcpy(&(wl->qos_params[queue].p), params, sizeof(*params));
3170 b43_mac_suspend(dev);
3171 b43_qos_params_upload(dev, &(wl->qos_params[queue].p),
3172 b43_qos_shm_offsets[queue]);
3173 b43_mac_enable(dev);
3174 err = 0;
3206 3175
3207 return 0; 3176out_unlock:
3177 mutex_unlock(&wl->mutex);
3178
3179 return err;
3208} 3180}
3209 3181
3210static int b43_op_get_tx_stats(struct ieee80211_hw *hw, 3182static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
@@ -3401,7 +3373,7 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3401 /* Switch to the requested channel. 3373 /* Switch to the requested channel.
3402 * The firmware takes care of races with the TX handler. */ 3374 * The firmware takes care of races with the TX handler. */
3403 if (conf->channel->hw_value != phy->channel) 3375 if (conf->channel->hw_value != phy->channel)
3404 b43_radio_selectchannel(dev, conf->channel->hw_value, 0); 3376 b43_switch_channel(dev, conf->channel->hw_value);
3405 3377
3406 /* Enable/Disable ShortSlot timing. */ 3378 /* Enable/Disable ShortSlot timing. */
3407 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) != 3379 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) !=
@@ -3417,26 +3389,30 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3417 3389
3418 /* Adjust the desired TX power level. */ 3390 /* Adjust the desired TX power level. */
3419 if (conf->power_level != 0) { 3391 if (conf->power_level != 0) {
3420 if (conf->power_level != phy->power_level) { 3392 spin_lock_irqsave(&wl->irq_lock, flags);
3421 phy->power_level = conf->power_level; 3393 if (conf->power_level != phy->desired_txpower) {
3422 b43_phy_xmitpower(dev); 3394 phy->desired_txpower = conf->power_level;
3395 b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME |
3396 B43_TXPWR_IGNORE_TSSI);
3423 } 3397 }
3398 spin_unlock_irqrestore(&wl->irq_lock, flags);
3424 } 3399 }
3425 3400
3426 /* Antennas for RX and management frame TX. */ 3401 /* Antennas for RX and management frame TX. */
3427 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_tx); 3402 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_tx);
3428 b43_mgmtframe_txantenna(dev, antenna); 3403 b43_mgmtframe_txantenna(dev, antenna);
3429 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx); 3404 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx);
3430 b43_set_rx_antenna(dev, antenna); 3405 if (phy->ops->set_rx_antenna)
3406 phy->ops->set_rx_antenna(dev, antenna);
3431 3407
3432 /* Update templates for AP/mesh mode. */ 3408 /* Update templates for AP/mesh mode. */
3433 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) || 3409 if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
3434 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) 3410 b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT))
3435 b43_set_beacon_int(dev, conf->beacon_int); 3411 b43_set_beacon_int(dev, conf->beacon_int);
3436 3412
3437 if (!!conf->radio_enabled != phy->radio_on) { 3413 if (!!conf->radio_enabled != phy->radio_on) {
3438 if (conf->radio_enabled) { 3414 if (conf->radio_enabled) {
3439 b43_radio_turn_on(dev); 3415 b43_software_rfkill(dev, RFKILL_STATE_UNBLOCKED);
3440 b43info(dev->wl, "Radio turned on by software\n"); 3416 b43info(dev->wl, "Radio turned on by software\n");
3441 if (!dev->radio_hw_enable) { 3417 if (!dev->radio_hw_enable) {
3442 b43info(dev->wl, "The hardware RF-kill button " 3418 b43info(dev->wl, "The hardware RF-kill button "
@@ -3444,7 +3420,7 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3444 "Press the button to turn it on.\n"); 3420 "Press the button to turn it on.\n");
3445 } 3421 }
3446 } else { 3422 } else {
3447 b43_radio_turn_off(dev, 0); 3423 b43_software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
3448 b43info(dev->wl, "Radio turned off by software\n"); 3424 b43info(dev->wl, "Radio turned off by software\n");
3449 } 3425 }
3450 } 3426 }
@@ -3619,14 +3595,14 @@ static int b43_op_config_interface(struct ieee80211_hw *hw,
3619 else 3595 else
3620 memset(wl->bssid, 0, ETH_ALEN); 3596 memset(wl->bssid, 0, ETH_ALEN);
3621 if (b43_status(dev) >= B43_STAT_INITIALIZED) { 3597 if (b43_status(dev) >= B43_STAT_INITIALIZED) {
3622 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP) || 3598 if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
3623 b43_is_mode(wl, IEEE80211_IF_TYPE_MESH_POINT)) { 3599 b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) {
3624 B43_WARN_ON(vif->type != wl->if_type); 3600 B43_WARN_ON(vif->type != wl->if_type);
3625 if (conf->changed & IEEE80211_IFCC_SSID) 3601 if (conf->changed & IEEE80211_IFCC_SSID)
3626 b43_set_ssid(dev, conf->ssid, conf->ssid_len); 3602 b43_set_ssid(dev, conf->ssid, conf->ssid_len);
3627 if (conf->changed & IEEE80211_IFCC_BEACON) 3603 if (conf->changed & IEEE80211_IFCC_BEACON)
3628 b43_update_templates(wl); 3604 b43_update_templates(wl);
3629 } else if (b43_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) { 3605 } else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
3630 if (conf->changed & IEEE80211_IFCC_BEACON) 3606 if (conf->changed & IEEE80211_IFCC_BEACON)
3631 b43_update_templates(wl); 3607 b43_update_templates(wl);
3632 } 3608 }
@@ -3818,48 +3794,10 @@ static int b43_phy_versioning(struct b43_wldev *dev)
3818static void setup_struct_phy_for_init(struct b43_wldev *dev, 3794static void setup_struct_phy_for_init(struct b43_wldev *dev,
3819 struct b43_phy *phy) 3795 struct b43_phy *phy)
3820{ 3796{
3821 struct b43_txpower_lo_control *lo;
3822 int i;
3823
3824 memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
3825 memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos));
3826
3827 phy->aci_enable = 0;
3828 phy->aci_wlan_automatic = 0;
3829 phy->aci_hw_rssi = 0;
3830
3831 phy->radio_off_context.valid = 0;
3832
3833 lo = phy->lo_control;
3834 if (lo) {
3835 memset(lo, 0, sizeof(*(phy->lo_control)));
3836 lo->tx_bias = 0xFF;
3837 INIT_LIST_HEAD(&lo->calib_list);
3838 }
3839 phy->max_lb_gain = 0;
3840 phy->trsw_rx_gain = 0;
3841 phy->txpwr_offset = 0;
3842
3843 /* NRSSI */
3844 phy->nrssislope = 0;
3845 for (i = 0; i < ARRAY_SIZE(phy->nrssi); i++)
3846 phy->nrssi[i] = -1000;
3847 for (i = 0; i < ARRAY_SIZE(phy->nrssi_lt); i++)
3848 phy->nrssi_lt[i] = i;
3849
3850 phy->lofcal = 0xFFFF;
3851 phy->initval = 0xFFFF;
3852
3853 phy->interfmode = B43_INTERFMODE_NONE;
3854 phy->channel = 0xFF;
3855
3856 phy->hardware_power_control = !!modparam_hwpctl; 3797 phy->hardware_power_control = !!modparam_hwpctl;
3857 3798 phy->next_txpwr_check_time = jiffies;
3858 /* PHY TX errors counter. */ 3799 /* PHY TX errors counter. */
3859 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); 3800 atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
3860
3861 /* OFDM-table address caching. */
3862 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_UNKNOWN;
3863} 3801}
3864 3802
3865static void setup_struct_wldev_for_init(struct b43_wldev *dev) 3803static void setup_struct_wldev_for_init(struct b43_wldev *dev)
@@ -3965,7 +3903,7 @@ static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle)
3965 pu_delay = 3700; 3903 pu_delay = 3700;
3966 else 3904 else
3967 pu_delay = 1050; 3905 pu_delay = 1050;
3968 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS) || idle) 3906 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
3969 pu_delay = 500; 3907 pu_delay = 500;
3970 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) 3908 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
3971 pu_delay = max(pu_delay, (u16)2400); 3909 pu_delay = max(pu_delay, (u16)2400);
@@ -3979,7 +3917,7 @@ static void b43_set_pretbtt(struct b43_wldev *dev)
3979 u16 pretbtt; 3917 u16 pretbtt;
3980 3918
3981 /* The time value is in microseconds. */ 3919 /* The time value is in microseconds. */
3982 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) { 3920 if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) {
3983 pretbtt = 2; 3921 pretbtt = 2;
3984 } else { 3922 } else {
3985 if (dev->phy.type == B43_PHYTYPE_A) 3923 if (dev->phy.type == B43_PHYTYPE_A)
@@ -3995,7 +3933,6 @@ static void b43_set_pretbtt(struct b43_wldev *dev)
3995/* Locking: wl->mutex */ 3933/* Locking: wl->mutex */
3996static void b43_wireless_core_exit(struct b43_wldev *dev) 3934static void b43_wireless_core_exit(struct b43_wldev *dev)
3997{ 3935{
3998 struct b43_phy *phy = &dev->phy;
3999 u32 macctl; 3936 u32 macctl;
4000 3937
4001 B43_WARN_ON(b43_status(dev) > B43_STAT_INITIALIZED); 3938 B43_WARN_ON(b43_status(dev) > B43_STAT_INITIALIZED);
@@ -4016,12 +3953,7 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
4016 b43_dma_free(dev); 3953 b43_dma_free(dev);
4017 b43_pio_free(dev); 3954 b43_pio_free(dev);
4018 b43_chip_exit(dev); 3955 b43_chip_exit(dev);
4019 b43_radio_turn_off(dev, 1); 3956 dev->phy.ops->switch_analog(dev, 0);
4020 b43_switch_analog(dev, 0);
4021 if (phy->dyn_tssi_tbl)
4022 kfree(phy->tssi2dbm);
4023 kfree(phy->lo_control);
4024 phy->lo_control = NULL;
4025 if (dev->wl->current_beacon) { 3957 if (dev->wl->current_beacon) {
4026 dev_kfree_skb_any(dev->wl->current_beacon); 3958 dev_kfree_skb_any(dev->wl->current_beacon);
4027 dev->wl->current_beacon = NULL; 3959 dev->wl->current_beacon = NULL;
@@ -4052,29 +3984,23 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4052 b43_wireless_core_reset(dev, tmp); 3984 b43_wireless_core_reset(dev, tmp);
4053 } 3985 }
4054 3986
4055 if ((phy->type == B43_PHYTYPE_B) || (phy->type == B43_PHYTYPE_G)) { 3987 /* Reset all data structures. */
4056 phy->lo_control =
4057 kzalloc(sizeof(*(phy->lo_control)), GFP_KERNEL);
4058 if (!phy->lo_control) {
4059 err = -ENOMEM;
4060 goto err_busdown;
4061 }
4062 }
4063 setup_struct_wldev_for_init(dev); 3988 setup_struct_wldev_for_init(dev);
4064 3989 phy->ops->prepare_structs(dev);
4065 err = b43_phy_init_tssi2dbm_table(dev);
4066 if (err)
4067 goto err_kfree_lo_control;
4068 3990
4069 /* Enable IRQ routing to this device. */ 3991 /* Enable IRQ routing to this device. */
4070 ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); 3992 ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev);
4071 3993
4072 b43_imcfglo_timeouts_workaround(dev); 3994 b43_imcfglo_timeouts_workaround(dev);
4073 b43_bluetooth_coext_disable(dev); 3995 b43_bluetooth_coext_disable(dev);
4074 b43_phy_early_init(dev); 3996 if (phy->ops->prepare_hardware) {
3997 err = phy->ops->prepare_hardware(dev);
3998 if (err)
3999 goto err_busdown;
4000 }
4075 err = b43_chip_init(dev); 4001 err = b43_chip_init(dev);
4076 if (err) 4002 if (err)
4077 goto err_kfree_tssitbl; 4003 goto err_busdown;
4078 b43_shm_write16(dev, B43_SHM_SHARED, 4004 b43_shm_write16(dev, B43_SHM_SHARED,
4079 B43_SHM_SH_WLCOREREV, dev->dev->id.revision); 4005 B43_SHM_SH_WLCOREREV, dev->dev->id.revision);
4080 hf = b43_hf_read(dev); 4006 hf = b43_hf_read(dev);
@@ -4140,15 +4066,9 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4140out: 4066out:
4141 return err; 4067 return err;
4142 4068
4143 err_chip_exit: 4069err_chip_exit:
4144 b43_chip_exit(dev); 4070 b43_chip_exit(dev);
4145 err_kfree_tssitbl: 4071err_busdown:
4146 if (phy->dyn_tssi_tbl)
4147 kfree(phy->tssi2dbm);
4148 err_kfree_lo_control:
4149 kfree(phy->lo_control);
4150 phy->lo_control = NULL;
4151 err_busdown:
4152 ssb_bus_may_powerdown(bus); 4072 ssb_bus_may_powerdown(bus);
4153 B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); 4073 B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT);
4154 return err; 4074 return err;
@@ -4164,11 +4084,11 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
4164 4084
4165 /* TODO: allow WDS/AP devices to coexist */ 4085 /* TODO: allow WDS/AP devices to coexist */
4166 4086
4167 if (conf->type != IEEE80211_IF_TYPE_AP && 4087 if (conf->type != NL80211_IFTYPE_AP &&
4168 conf->type != IEEE80211_IF_TYPE_MESH_POINT && 4088 conf->type != NL80211_IFTYPE_MESH_POINT &&
4169 conf->type != IEEE80211_IF_TYPE_STA && 4089 conf->type != NL80211_IFTYPE_STATION &&
4170 conf->type != IEEE80211_IF_TYPE_WDS && 4090 conf->type != NL80211_IFTYPE_WDS &&
4171 conf->type != IEEE80211_IF_TYPE_IBSS) 4091 conf->type != NL80211_IFTYPE_ADHOC)
4172 return -EOPNOTSUPP; 4092 return -EOPNOTSUPP;
4173 4093
4174 mutex_lock(&wl->mutex); 4094 mutex_lock(&wl->mutex);
@@ -4283,7 +4203,6 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4283 struct b43_wldev *dev = wl->current_dev; 4203 struct b43_wldev *dev = wl->current_dev;
4284 4204
4285 b43_rfkill_exit(dev); 4205 b43_rfkill_exit(dev);
4286 cancel_work_sync(&(wl->qos_update_work));
4287 cancel_work_sync(&(wl->beacon_update_trigger)); 4206 cancel_work_sync(&(wl->beacon_update_trigger));
4288 4207
4289 mutex_lock(&wl->mutex); 4208 mutex_lock(&wl->mutex);
@@ -4291,6 +4210,8 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4291 b43_wireless_core_stop(dev); 4210 b43_wireless_core_stop(dev);
4292 b43_wireless_core_exit(dev); 4211 b43_wireless_core_exit(dev);
4293 mutex_unlock(&wl->mutex); 4212 mutex_unlock(&wl->mutex);
4213
4214 cancel_work_sync(&(wl->txpower_adjust_work));
4294} 4215}
4295 4216
4296static int b43_op_set_retry_limit(struct ieee80211_hw *hw, 4217static int b43_op_set_retry_limit(struct ieee80211_hw *hw,
@@ -4313,7 +4234,8 @@ out_unlock:
4313 return err; 4234 return err;
4314} 4235}
4315 4236
4316static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, int aid, int set) 4237static int b43_op_beacon_set_tim(struct ieee80211_hw *hw,
4238 struct ieee80211_sta *sta, bool set)
4317{ 4239{
4318 struct b43_wl *wl = hw_to_b43_wl(hw); 4240 struct b43_wl *wl = hw_to_b43_wl(hw);
4319 unsigned long flags; 4241 unsigned long flags;
@@ -4328,7 +4250,7 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, int aid, int set)
4328static void b43_op_sta_notify(struct ieee80211_hw *hw, 4250static void b43_op_sta_notify(struct ieee80211_hw *hw,
4329 struct ieee80211_vif *vif, 4251 struct ieee80211_vif *vif,
4330 enum sta_notify_cmd notify_cmd, 4252 enum sta_notify_cmd notify_cmd,
4331 const u8 *addr) 4253 struct ieee80211_sta *sta)
4332{ 4254{
4333 struct b43_wl *wl = hw_to_b43_wl(hw); 4255 struct b43_wl *wl = hw_to_b43_wl(hw);
4334 4256
@@ -4422,6 +4344,7 @@ static void b43_wireless_core_detach(struct b43_wldev *dev)
4422 /* We release firmware that late to not be required to re-request 4344 /* We release firmware that late to not be required to re-request
4423 * is all the time when we reinit the core. */ 4345 * is all the time when we reinit the core. */
4424 b43_release_firmware(dev); 4346 b43_release_firmware(dev);
4347 b43_phy_free(dev);
4425} 4348}
4426 4349
4427static int b43_wireless_core_attach(struct b43_wldev *dev) 4350static int b43_wireless_core_attach(struct b43_wldev *dev)
@@ -4495,30 +4418,35 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4495 } 4418 }
4496 } 4419 }
4497 4420
4421 err = b43_phy_allocate(dev);
4422 if (err)
4423 goto err_powerdown;
4424
4498 dev->phy.gmode = have_2ghz_phy; 4425 dev->phy.gmode = have_2ghz_phy;
4499 tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; 4426 tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
4500 b43_wireless_core_reset(dev, tmp); 4427 b43_wireless_core_reset(dev, tmp);
4501 4428
4502 err = b43_validate_chipaccess(dev); 4429 err = b43_validate_chipaccess(dev);
4503 if (err) 4430 if (err)
4504 goto err_powerdown; 4431 goto err_phy_free;
4505 err = b43_setup_bands(dev, have_2ghz_phy, have_5ghz_phy); 4432 err = b43_setup_bands(dev, have_2ghz_phy, have_5ghz_phy);
4506 if (err) 4433 if (err)
4507 goto err_powerdown; 4434 goto err_phy_free;
4508 4435
4509 /* Now set some default "current_dev" */ 4436 /* Now set some default "current_dev" */
4510 if (!wl->current_dev) 4437 if (!wl->current_dev)
4511 wl->current_dev = dev; 4438 wl->current_dev = dev;
4512 INIT_WORK(&dev->restart_work, b43_chip_reset); 4439 INIT_WORK(&dev->restart_work, b43_chip_reset);
4513 4440
4514 b43_radio_turn_off(dev, 1); 4441 dev->phy.ops->switch_analog(dev, 0);
4515 b43_switch_analog(dev, 0);
4516 ssb_device_disable(dev->dev, 0); 4442 ssb_device_disable(dev->dev, 0);
4517 ssb_bus_may_powerdown(bus); 4443 ssb_bus_may_powerdown(bus);
4518 4444
4519out: 4445out:
4520 return err; 4446 return err;
4521 4447
4448err_phy_free:
4449 b43_phy_free(dev);
4522err_powerdown: 4450err_powerdown:
4523 ssb_bus_may_powerdown(bus); 4451 ssb_bus_may_powerdown(bus);
4524 return err; 4452 return err;
@@ -4615,9 +4543,11 @@ static void b43_sprom_fixup(struct ssb_bus *bus)
4615 pdev = bus->host_pci; 4543 pdev = bus->host_pci;
4616 if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) || 4544 if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) ||
4617 IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) || 4545 IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) ||
4546 IS_PDEV(pdev, BROADCOM, 0x4320, HP, 0x12f8) ||
4618 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) || 4547 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) ||
4619 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) || 4548 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) ||
4620 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013)) 4549 IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013) ||
4550 IS_PDEV(pdev, BROADCOM, 0x4320, MOTOROLA, 0x7010))
4621 bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST; 4551 bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST;
4622 } 4552 }
4623} 4553}
@@ -4650,6 +4580,13 @@ static int b43_wireless_init(struct ssb_device *dev)
4650 IEEE80211_HW_SIGNAL_DBM | 4580 IEEE80211_HW_SIGNAL_DBM |
4651 IEEE80211_HW_NOISE_DBM; 4581 IEEE80211_HW_NOISE_DBM;
4652 4582
4583 hw->wiphy->interface_modes =
4584 BIT(NL80211_IFTYPE_AP) |
4585 BIT(NL80211_IFTYPE_MESH_POINT) |
4586 BIT(NL80211_IFTYPE_STATION) |
4587 BIT(NL80211_IFTYPE_WDS) |
4588 BIT(NL80211_IFTYPE_ADHOC);
4589
4653 hw->queues = b43_modparam_qos ? 4 : 1; 4590 hw->queues = b43_modparam_qos ? 4 : 1;
4654 SET_IEEE80211_DEV(hw, dev->dev); 4591 SET_IEEE80211_DEV(hw, dev->dev);
4655 if (is_valid_ether_addr(sprom->et1mac)) 4592 if (is_valid_ether_addr(sprom->et1mac))
@@ -4667,8 +4604,8 @@ static int b43_wireless_init(struct ssb_device *dev)
4667 spin_lock_init(&wl->shm_lock); 4604 spin_lock_init(&wl->shm_lock);
4668 mutex_init(&wl->mutex); 4605 mutex_init(&wl->mutex);
4669 INIT_LIST_HEAD(&wl->devlist); 4606 INIT_LIST_HEAD(&wl->devlist);
4670 INIT_WORK(&wl->qos_update_work, b43_qos_update_work);
4671 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); 4607 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
4608 INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
4672 4609
4673 ssb_set_devtypedata(dev, wl); 4610 ssb_set_devtypedata(dev, wl);
4674 b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id); 4611 b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id);
diff --git a/drivers/net/wireless/b43/phy.h b/drivers/net/wireless/b43/phy.h
deleted file mode 100644
index 4aab10903529..000000000000
--- a/drivers/net/wireless/b43/phy.h
+++ /dev/null
@@ -1,340 +0,0 @@
1#ifndef B43_PHY_H_
2#define B43_PHY_H_
3
4#include <linux/types.h>
5
6struct b43_wldev;
7struct b43_phy;
8
9/*** PHY Registers ***/
10
11/* Routing */
12#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
13#define B43_PHYROUTE_BASE 0x0000 /* Base registers */
14#define B43_PHYROUTE_OFDM_GPHY 0x0400 /* OFDM register routing for G-PHYs */
15#define B43_PHYROUTE_EXT_GPHY 0x0800 /* Extended G-PHY registers */
16#define B43_PHYROUTE_N_BMODE 0x0C00 /* N-PHY BMODE registers */
17
18/* CCK (B-PHY) registers. */
19#define B43_PHY_CCK(reg) ((reg) | B43_PHYROUTE_BASE)
20/* N-PHY registers. */
21#define B43_PHY_N(reg) ((reg) | B43_PHYROUTE_BASE)
22/* N-PHY BMODE registers. */
23#define B43_PHY_N_BMODE(reg) ((reg) | B43_PHYROUTE_N_BMODE)
24/* OFDM (A-PHY) registers. */
25#define B43_PHY_OFDM(reg) ((reg) | B43_PHYROUTE_OFDM_GPHY)
26/* Extended G-PHY registers. */
27#define B43_PHY_EXTG(reg) ((reg) | B43_PHYROUTE_EXT_GPHY)
28
29/* OFDM (A) PHY Registers */
30#define B43_PHY_VERSION_OFDM B43_PHY_OFDM(0x00) /* Versioning register for A-PHY */
31#define B43_PHY_BBANDCFG B43_PHY_OFDM(0x01) /* Baseband config */
32#define B43_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */
33#define B43_PHY_BBANDCFG_RXANT_SHIFT 7
34#define B43_PHY_PWRDOWN B43_PHY_OFDM(0x03) /* Powerdown */
35#define B43_PHY_CRSTHRES1_R1 B43_PHY_OFDM(0x06) /* CRS Threshold 1 (phy.rev 1 only) */
36#define B43_PHY_LNAHPFCTL B43_PHY_OFDM(0x1C) /* LNA/HPF control */
37#define B43_PHY_LPFGAINCTL B43_PHY_OFDM(0x20) /* LPF Gain control */
38#define B43_PHY_ADIVRELATED B43_PHY_OFDM(0x27) /* FIXME rename */
39#define B43_PHY_CRS0 B43_PHY_OFDM(0x29)
40#define B43_PHY_CRS0_EN 0x4000
41#define B43_PHY_PEAK_COUNT B43_PHY_OFDM(0x30)
42#define B43_PHY_ANTDWELL B43_PHY_OFDM(0x2B) /* Antenna dwell */
43#define B43_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */
44#define B43_PHY_ENCORE B43_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */
45#define B43_PHY_ENCORE_EN 0x0200 /* Encore enable */
46#define B43_PHY_LMS B43_PHY_OFDM(0x55)
47#define B43_PHY_OFDM61 B43_PHY_OFDM(0x61) /* FIXME rename */
48#define B43_PHY_OFDM61_10 0x0010 /* FIXME rename */
49#define B43_PHY_IQBAL B43_PHY_OFDM(0x69) /* I/Q balance */
50#define B43_PHY_BBTXDC_BIAS B43_PHY_OFDM(0x6B) /* Baseband TX DC bias */
51#define B43_PHY_OTABLECTL B43_PHY_OFDM(0x72) /* OFDM table control (see below) */
52#define B43_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */
53#define B43_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */
54#define B43_PHY_OTABLENR_SHIFT 10
55#define B43_PHY_OTABLEI B43_PHY_OFDM(0x73) /* OFDM table data I */
56#define B43_PHY_OTABLEQ B43_PHY_OFDM(0x74) /* OFDM table data Q */
57#define B43_PHY_HPWR_TSSICTL B43_PHY_OFDM(0x78) /* Hardware power TSSI control */
58#define B43_PHY_ADCCTL B43_PHY_OFDM(0x7A) /* ADC control */
59#define B43_PHY_IDLE_TSSI B43_PHY_OFDM(0x7B)
60#define B43_PHY_A_TEMP_SENSE B43_PHY_OFDM(0x7C) /* A PHY temperature sense */
61#define B43_PHY_NRSSITHRES B43_PHY_OFDM(0x8A) /* NRSSI threshold */
62#define B43_PHY_ANTWRSETT B43_PHY_OFDM(0x8C) /* Antenna WR settle */
63#define B43_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */
64#define B43_PHY_CLIPPWRDOWNT B43_PHY_OFDM(0x93) /* Clip powerdown threshold */
65#define B43_PHY_OFDM9B B43_PHY_OFDM(0x9B) /* FIXME rename */
66#define B43_PHY_N1P1GAIN B43_PHY_OFDM(0xA0)
67#define B43_PHY_P1P2GAIN B43_PHY_OFDM(0xA1)
68#define B43_PHY_N1N2GAIN B43_PHY_OFDM(0xA2)
69#define B43_PHY_CLIPTHRES B43_PHY_OFDM(0xA3)
70#define B43_PHY_CLIPN1P2THRES B43_PHY_OFDM(0xA4)
71#define B43_PHY_CCKSHIFTBITS_WA B43_PHY_OFDM(0xA5) /* CCK shiftbits workaround, FIXME rename */
72#define B43_PHY_CCKSHIFTBITS B43_PHY_OFDM(0xA7) /* FIXME rename */
73#define B43_PHY_DIVSRCHIDX B43_PHY_OFDM(0xA8) /* Divider search gain/index */
74#define B43_PHY_CLIPP2THRES B43_PHY_OFDM(0xA9)
75#define B43_PHY_CLIPP3THRES B43_PHY_OFDM(0xAA)
76#define B43_PHY_DIVP1P2GAIN B43_PHY_OFDM(0xAB)
77#define B43_PHY_DIVSRCHGAINBACK B43_PHY_OFDM(0xAD) /* Divider search gain back */
78#define B43_PHY_DIVSRCHGAINCHNG B43_PHY_OFDM(0xAE) /* Divider search gain change */
79#define B43_PHY_CRSTHRES1 B43_PHY_OFDM(0xC0) /* CRS Threshold 1 (phy.rev >= 2 only) */
80#define B43_PHY_CRSTHRES2 B43_PHY_OFDM(0xC1) /* CRS Threshold 2 (phy.rev >= 2 only) */
81#define B43_PHY_TSSIP_LTBASE B43_PHY_OFDM(0x380) /* TSSI power lookup table base */
82#define B43_PHY_DC_LTBASE B43_PHY_OFDM(0x3A0) /* DC lookup table base */
83#define B43_PHY_GAIN_LTBASE B43_PHY_OFDM(0x3C0) /* Gain lookup table base */
84
85/* CCK (B) PHY Registers */
86#define B43_PHY_VERSION_CCK B43_PHY_CCK(0x00) /* Versioning register for B-PHY */
87#define B43_PHY_CCKBBANDCFG B43_PHY_CCK(0x01) /* Contains antenna 0/1 control bit */
88#define B43_PHY_PGACTL B43_PHY_CCK(0x15) /* PGA control */
89#define B43_PHY_PGACTL_LPF 0x1000 /* Low pass filter (?) */
90#define B43_PHY_PGACTL_LOWBANDW 0x0040 /* Low bandwidth flag */
91#define B43_PHY_PGACTL_UNKNOWN 0xEFA0
92#define B43_PHY_FBCTL1 B43_PHY_CCK(0x18) /* Frequency bandwidth control 1 */
93#define B43_PHY_ITSSI B43_PHY_CCK(0x29) /* Idle TSSI */
94#define B43_PHY_LO_LEAKAGE B43_PHY_CCK(0x2D) /* Measured LO leakage */
95#define B43_PHY_ENERGY B43_PHY_CCK(0x33) /* Energy */
96#define B43_PHY_SYNCCTL B43_PHY_CCK(0x35)
97#define B43_PHY_FBCTL2 B43_PHY_CCK(0x38) /* Frequency bandwidth control 2 */
98#define B43_PHY_DACCTL B43_PHY_CCK(0x60) /* DAC control */
99#define B43_PHY_RCCALOVER B43_PHY_CCK(0x78) /* RC calibration override */
100
101/* Extended G-PHY Registers */
102#define B43_PHY_CLASSCTL B43_PHY_EXTG(0x02) /* Classify control */
103#define B43_PHY_GTABCTL B43_PHY_EXTG(0x03) /* G-PHY table control (see below) */
104#define B43_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */
105#define B43_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */
106#define B43_PHY_GTABNR_SHIFT 10
107#define B43_PHY_GTABDATA B43_PHY_EXTG(0x04) /* G-PHY table data */
108#define B43_PHY_LO_MASK B43_PHY_EXTG(0x0F) /* Local Oscillator control mask */
109#define B43_PHY_LO_CTL B43_PHY_EXTG(0x10) /* Local Oscillator control */
110#define B43_PHY_RFOVER B43_PHY_EXTG(0x11) /* RF override */
111#define B43_PHY_RFOVERVAL B43_PHY_EXTG(0x12) /* RF override value */
112#define B43_PHY_RFOVERVAL_EXTLNA 0x8000
113#define B43_PHY_RFOVERVAL_LNA 0x7000
114#define B43_PHY_RFOVERVAL_LNA_SHIFT 12
115#define B43_PHY_RFOVERVAL_PGA 0x0F00
116#define B43_PHY_RFOVERVAL_PGA_SHIFT 8
117#define B43_PHY_RFOVERVAL_UNK 0x0010 /* Unknown, always set. */
118#define B43_PHY_RFOVERVAL_TRSWRX 0x00E0
119#define B43_PHY_RFOVERVAL_BW 0x0003 /* Bandwidth flags */
120#define B43_PHY_RFOVERVAL_BW_LPF 0x0001 /* Low Pass Filter */
121#define B43_PHY_RFOVERVAL_BW_LBW 0x0002 /* Low Bandwidth (when set), high when unset */
122#define B43_PHY_ANALOGOVER B43_PHY_EXTG(0x14) /* Analog override */
123#define B43_PHY_ANALOGOVERVAL B43_PHY_EXTG(0x15) /* Analog override value */
124
125/*** OFDM table numbers ***/
126#define B43_OFDMTAB(number, offset) (((number) << B43_PHY_OTABLENR_SHIFT) | (offset))
127#define B43_OFDMTAB_AGC1 B43_OFDMTAB(0x00, 0)
128#define B43_OFDMTAB_GAIN0 B43_OFDMTAB(0x00, 0)
129#define B43_OFDMTAB_GAINX B43_OFDMTAB(0x01, 0) //TODO rename
130#define B43_OFDMTAB_GAIN1 B43_OFDMTAB(0x01, 4)
131#define B43_OFDMTAB_AGC3 B43_OFDMTAB(0x02, 0)
132#define B43_OFDMTAB_GAIN2 B43_OFDMTAB(0x02, 3)
133#define B43_OFDMTAB_LNAHPFGAIN1 B43_OFDMTAB(0x03, 0)
134#define B43_OFDMTAB_WRSSI B43_OFDMTAB(0x04, 0)
135#define B43_OFDMTAB_LNAHPFGAIN2 B43_OFDMTAB(0x04, 0)
136#define B43_OFDMTAB_NOISESCALE B43_OFDMTAB(0x05, 0)
137#define B43_OFDMTAB_AGC2 B43_OFDMTAB(0x06, 0)
138#define B43_OFDMTAB_ROTOR B43_OFDMTAB(0x08, 0)
139#define B43_OFDMTAB_ADVRETARD B43_OFDMTAB(0x09, 0)
140#define B43_OFDMTAB_DAC B43_OFDMTAB(0x0C, 0)
141#define B43_OFDMTAB_DC B43_OFDMTAB(0x0E, 7)
142#define B43_OFDMTAB_PWRDYN2 B43_OFDMTAB(0x0E, 12)
143#define B43_OFDMTAB_LNAGAIN B43_OFDMTAB(0x0E, 13)
144#define B43_OFDMTAB_UNKNOWN_0F B43_OFDMTAB(0x0F, 0) //TODO rename
145#define B43_OFDMTAB_UNKNOWN_APHY B43_OFDMTAB(0x0F, 7) //TODO rename
146#define B43_OFDMTAB_LPFGAIN B43_OFDMTAB(0x0F, 12)
147#define B43_OFDMTAB_RSSI B43_OFDMTAB(0x10, 0)
148#define B43_OFDMTAB_UNKNOWN_11 B43_OFDMTAB(0x11, 4) //TODO rename
149#define B43_OFDMTAB_AGC1_R1 B43_OFDMTAB(0x13, 0)
150#define B43_OFDMTAB_GAINX_R1 B43_OFDMTAB(0x14, 0) //TODO remove!
151#define B43_OFDMTAB_MINSIGSQ B43_OFDMTAB(0x14, 0)
152#define B43_OFDMTAB_AGC3_R1 B43_OFDMTAB(0x15, 0)
153#define B43_OFDMTAB_WRSSI_R1 B43_OFDMTAB(0x15, 4)
154#define B43_OFDMTAB_TSSI B43_OFDMTAB(0x15, 0)
155#define B43_OFDMTAB_DACRFPABB B43_OFDMTAB(0x16, 0)
156#define B43_OFDMTAB_DACOFF B43_OFDMTAB(0x17, 0)
157#define B43_OFDMTAB_DCBIAS B43_OFDMTAB(0x18, 0)
158
159u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset);
160void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
161 u16 offset, u16 value);
162u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
163void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
164 u16 offset, u32 value);
165
166/*** G-PHY table numbers */
167#define B43_GTAB(number, offset) (((number) << B43_PHY_GTABNR_SHIFT) | (offset))
168#define B43_GTAB_NRSSI B43_GTAB(0x00, 0)
169#define B43_GTAB_TRFEMW B43_GTAB(0x0C, 0x120)
170#define B43_GTAB_ORIGTR B43_GTAB(0x2E, 0x298)
171
172u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset); //TODO implement
173void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value); //TODO implement
174
175#define B43_DEFAULT_CHANNEL_A 36
176#define B43_DEFAULT_CHANNEL_BG 6
177
178enum {
179 B43_ANTENNA0, /* Antenna 0 */
180 B43_ANTENNA1, /* Antenna 0 */
181 B43_ANTENNA_AUTO1, /* Automatic, starting with antenna 1 */
182 B43_ANTENNA_AUTO0, /* Automatic, starting with antenna 0 */
183 B43_ANTENNA2,
184 B43_ANTENNA3 = 8,
185
186 B43_ANTENNA_AUTO = B43_ANTENNA_AUTO0,
187 B43_ANTENNA_DEFAULT = B43_ANTENNA_AUTO,
188};
189
190enum {
191 B43_INTERFMODE_NONE,
192 B43_INTERFMODE_NONWLAN,
193 B43_INTERFMODE_MANUALWLAN,
194 B43_INTERFMODE_AUTOWLAN,
195};
196
197/* Masks for the different PHY versioning registers. */
198#define B43_PHYVER_ANALOG 0xF000
199#define B43_PHYVER_ANALOG_SHIFT 12
200#define B43_PHYVER_TYPE 0x0F00
201#define B43_PHYVER_TYPE_SHIFT 8
202#define B43_PHYVER_VERSION 0x00FF
203
204void b43_phy_lock(struct b43_wldev *dev);
205void b43_phy_unlock(struct b43_wldev *dev);
206
207
208/* Read a value from a PHY register */
209u16 b43_phy_read(struct b43_wldev *dev, u16 offset);
210/* Write a value to a PHY register */
211void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val);
212/* Mask a PHY register with a mask */
213void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask);
214/* OR a PHY register with a bitmap */
215void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set);
216/* Mask and OR a PHY register with a mask and bitmap */
217void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
218
219
220int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev);
221
222void b43_phy_early_init(struct b43_wldev *dev);
223int b43_phy_init(struct b43_wldev *dev);
224
225void b43_set_rx_antenna(struct b43_wldev *dev, int antenna);
226
227void b43_phy_xmitpower(struct b43_wldev *dev);
228
229/* Returns the boolean whether the board has HardwarePowerControl */
230bool b43_has_hardware_pctl(struct b43_phy *phy);
231/* Returns the boolean whether "TX Magnification" is enabled. */
232#define has_tx_magnification(phy) \
233 (((phy)->rev >= 2) && \
234 ((phy)->radio_ver == 0x2050) && \
235 ((phy)->radio_rev == 8))
236/* Card uses the loopback gain stuff */
237#define has_loopback_gain(phy) \
238 (((phy)->rev > 1) || ((phy)->gmode))
239
240/* Radio Attenuation (RF Attenuation) */
241struct b43_rfatt {
242 u8 att; /* Attenuation value */
243 bool with_padmix; /* Flag, PAD Mixer enabled. */
244};
245struct b43_rfatt_list {
246 /* Attenuation values list */
247 const struct b43_rfatt *list;
248 u8 len;
249 /* Minimum/Maximum attenuation values */
250 u8 min_val;
251 u8 max_val;
252};
253
254/* Returns true, if the values are the same. */
255static inline bool b43_compare_rfatt(const struct b43_rfatt *a,
256 const struct b43_rfatt *b)
257{
258 return ((a->att == b->att) &&
259 (a->with_padmix == b->with_padmix));
260}
261
262/* Baseband Attenuation */
263struct b43_bbatt {
264 u8 att; /* Attenuation value */
265};
266struct b43_bbatt_list {
267 /* Attenuation values list */
268 const struct b43_bbatt *list;
269 u8 len;
270 /* Minimum/Maximum attenuation values */
271 u8 min_val;
272 u8 max_val;
273};
274
275/* Returns true, if the values are the same. */
276static inline bool b43_compare_bbatt(const struct b43_bbatt *a,
277 const struct b43_bbatt *b)
278{
279 return (a->att == b->att);
280}
281
282/* tx_control bits. */
283#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */
284#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */
285#define B43_TXCTL_TXMIX 0x10 /* TX Mixer Gain */
286
287/* Write BasebandAttenuation value to the device. */
288void b43_phy_set_baseband_attenuation(struct b43_wldev *dev,
289 u16 baseband_attenuation);
290
291extern const u8 b43_radio_channel_codes_bg[];
292
293void b43_radio_lock(struct b43_wldev *dev);
294void b43_radio_unlock(struct b43_wldev *dev);
295
296
297/* Read a value from a 16bit radio register */
298u16 b43_radio_read16(struct b43_wldev *dev, u16 offset);
299/* Write a value to a 16bit radio register */
300void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val);
301/* Mask a 16bit radio register with a mask */
302void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask);
303/* OR a 16bit radio register with a bitmap */
304void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
305/* Mask and OR a PHY register with a mask and bitmap */
306void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
307
308
309u16 b43_radio_init2050(struct b43_wldev *dev);
310void b43_radio_init2060(struct b43_wldev *dev);
311
312void b43_radio_turn_on(struct b43_wldev *dev);
313void b43_radio_turn_off(struct b43_wldev *dev, bool force);
314
315int b43_radio_selectchannel(struct b43_wldev *dev, u8 channel,
316 int synthetic_pu_workaround);
317
318u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel);
319u8 b43_radio_aci_scan(struct b43_wldev *dev);
320
321int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode);
322
323void b43_calc_nrssi_slope(struct b43_wldev *dev);
324void b43_calc_nrssi_threshold(struct b43_wldev *dev);
325s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset);
326void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val);
327void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val);
328void b43_nrssi_mem_update(struct b43_wldev *dev);
329
330void b43_radio_set_tx_iq(struct b43_wldev *dev);
331u16 b43_radio_calibrationvalue(struct b43_wldev *dev);
332
333void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
334 int *_bbatt, int *_rfatt);
335
336void b43_set_txpower_g(struct b43_wldev *dev,
337 const struct b43_bbatt *bbatt,
338 const struct b43_rfatt *rfatt, u8 tx_control);
339
340#endif /* B43_PHY_H_ */
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
new file mode 100644
index 000000000000..0f1a84c9de61
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -0,0 +1,643 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11a PHY driver
5
6 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
7 Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
8 Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
9 Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
10 Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; see the file COPYING. If not, write to
24 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
25 Boston, MA 02110-1301, USA.
26
27*/
28
29#include "b43.h"
30#include "phy_a.h"
31#include "phy_common.h"
32#include "wa.h"
33#include "tables.h"
34#include "main.h"
35
36
37/* Get the freq, as it has to be written to the device. */
38static inline u16 channel2freq_a(u8 channel)
39{
40 B43_WARN_ON(channel > 200);
41
42 return (5000 + 5 * channel);
43}
44
45static inline u16 freq_r3A_value(u16 frequency)
46{
47 u16 value;
48
49 if (frequency < 5091)
50 value = 0x0040;
51 else if (frequency < 5321)
52 value = 0x0000;
53 else if (frequency < 5806)
54 value = 0x0080;
55 else
56 value = 0x0040;
57
58 return value;
59}
60
61#if 0
62/* This function converts a TSSI value to dBm in Q5.2 */
63static s8 b43_aphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
64{
65 struct b43_phy *phy = &dev->phy;
66 struct b43_phy_a *aphy = phy->a;
67 s8 dbm = 0;
68 s32 tmp;
69
70 tmp = (aphy->tgt_idle_tssi - aphy->cur_idle_tssi + tssi);
71 tmp += 0x80;
72 tmp = clamp_val(tmp, 0x00, 0xFF);
73 dbm = aphy->tssi2dbm[tmp];
74 //TODO: There's a FIXME on the specs
75
76 return dbm;
77}
78#endif
79
80void b43_radio_set_tx_iq(struct b43_wldev *dev)
81{
82 static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
83 static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
84 u16 tmp = b43_radio_read16(dev, 0x001E);
85 int i, j;
86
87 for (i = 0; i < 5; i++) {
88 for (j = 0; j < 5; j++) {
89 if (tmp == (data_high[i] << 4 | data_low[j])) {
90 b43_phy_write(dev, 0x0069,
91 (i - j) << 8 | 0x00C0);
92 return;
93 }
94 }
95 }
96}
97
98static void aphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
99{
100 u16 freq, r8, tmp;
101
102 freq = channel2freq_a(channel);
103
104 r8 = b43_radio_read16(dev, 0x0008);
105 b43_write16(dev, 0x03F0, freq);
106 b43_radio_write16(dev, 0x0008, r8);
107
108 //TODO: write max channel TX power? to Radio 0x2D
109 tmp = b43_radio_read16(dev, 0x002E);
110 tmp &= 0x0080;
111 //TODO: OR tmp with the Power out estimation for this channel?
112 b43_radio_write16(dev, 0x002E, tmp);
113
114 if (freq >= 4920 && freq <= 5500) {
115 /*
116 * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F;
117 * = (freq * 0.025862069
118 */
119 r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */
120 }
121 b43_radio_write16(dev, 0x0007, (r8 << 4) | r8);
122 b43_radio_write16(dev, 0x0020, (r8 << 4) | r8);
123 b43_radio_write16(dev, 0x0021, (r8 << 4) | r8);
124 b43_radio_write16(dev, 0x0022, (b43_radio_read16(dev, 0x0022)
125 & 0x000F) | (r8 << 4));
126 b43_radio_write16(dev, 0x002A, (r8 << 4));
127 b43_radio_write16(dev, 0x002B, (r8 << 4));
128 b43_radio_write16(dev, 0x0008, (b43_radio_read16(dev, 0x0008)
129 & 0x00F0) | (r8 << 4));
130 b43_radio_write16(dev, 0x0029, (b43_radio_read16(dev, 0x0029)
131 & 0xFF0F) | 0x00B0);
132 b43_radio_write16(dev, 0x0035, 0x00AA);
133 b43_radio_write16(dev, 0x0036, 0x0085);
134 b43_radio_write16(dev, 0x003A, (b43_radio_read16(dev, 0x003A)
135 & 0xFF20) |
136 freq_r3A_value(freq));
137 b43_radio_write16(dev, 0x003D,
138 b43_radio_read16(dev, 0x003D) & 0x00FF);
139 b43_radio_write16(dev, 0x0081, (b43_radio_read16(dev, 0x0081)
140 & 0xFF7F) | 0x0080);
141 b43_radio_write16(dev, 0x0035,
142 b43_radio_read16(dev, 0x0035) & 0xFFEF);
143 b43_radio_write16(dev, 0x0035, (b43_radio_read16(dev, 0x0035)
144 & 0xFFEF) | 0x0010);
145 b43_radio_set_tx_iq(dev);
146 //TODO: TSSI2dbm workaround
147//FIXME b43_phy_xmitpower(dev);
148}
149
150void b43_radio_init2060(struct b43_wldev *dev)
151{
152 b43_radio_write16(dev, 0x0004, 0x00C0);
153 b43_radio_write16(dev, 0x0005, 0x0008);
154 b43_radio_write16(dev, 0x0009, 0x0040);
155 b43_radio_write16(dev, 0x0005, 0x00AA);
156 b43_radio_write16(dev, 0x0032, 0x008F);
157 b43_radio_write16(dev, 0x0006, 0x008F);
158 b43_radio_write16(dev, 0x0034, 0x008F);
159 b43_radio_write16(dev, 0x002C, 0x0007);
160 b43_radio_write16(dev, 0x0082, 0x0080);
161 b43_radio_write16(dev, 0x0080, 0x0000);
162 b43_radio_write16(dev, 0x003F, 0x00DA);
163 b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
164 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0010);
165 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020);
166 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020);
167 msleep(1); /* delay 400usec */
168
169 b43_radio_write16(dev, 0x0081,
170 (b43_radio_read16(dev, 0x0081) & ~0x0020) | 0x0010);
171 msleep(1); /* delay 400usec */
172
173 b43_radio_write16(dev, 0x0005,
174 (b43_radio_read16(dev, 0x0005) & ~0x0008) | 0x0008);
175 b43_radio_write16(dev, 0x0085, b43_radio_read16(dev, 0x0085) & ~0x0010);
176 b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
177 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0040);
178 b43_radio_write16(dev, 0x0081,
179 (b43_radio_read16(dev, 0x0081) & ~0x0040) | 0x0040);
180 b43_radio_write16(dev, 0x0005,
181 (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008);
182 b43_phy_write(dev, 0x0063, 0xDDC6);
183 b43_phy_write(dev, 0x0069, 0x07BE);
184 b43_phy_write(dev, 0x006A, 0x0000);
185
186 aphy_channel_switch(dev, dev->phy.ops->get_default_chan(dev));
187
188 msleep(1);
189}
190
191static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable)
192{
193 int i;
194
195 if (dev->phy.rev < 3) {
196 if (enable)
197 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
198 b43_ofdmtab_write16(dev,
199 B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8);
200 b43_ofdmtab_write16(dev,
201 B43_OFDMTAB_WRSSI, i, 0xFFF8);
202 }
203 else
204 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
205 b43_ofdmtab_write16(dev,
206 B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]);
207 b43_ofdmtab_write16(dev,
208 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]);
209 }
210 } else {
211 if (enable)
212 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++)
213 b43_ofdmtab_write16(dev,
214 B43_OFDMTAB_WRSSI, i, 0x0820);
215 else
216 for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++)
217 b43_ofdmtab_write16(dev,
218 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]);
219 }
220}
221
222static void b43_phy_ww(struct b43_wldev *dev)
223{
224 u16 b, curr_s, best_s = 0xFFFF;
225 int i;
226
227 b43_phy_write(dev, B43_PHY_CRS0,
228 b43_phy_read(dev, B43_PHY_CRS0) & ~B43_PHY_CRS0_EN);
229 b43_phy_write(dev, B43_PHY_OFDM(0x1B),
230 b43_phy_read(dev, B43_PHY_OFDM(0x1B)) | 0x1000);
231 b43_phy_write(dev, B43_PHY_OFDM(0x82),
232 (b43_phy_read(dev, B43_PHY_OFDM(0x82)) & 0xF0FF) | 0x0300);
233 b43_radio_write16(dev, 0x0009,
234 b43_radio_read16(dev, 0x0009) | 0x0080);
235 b43_radio_write16(dev, 0x0012,
236 (b43_radio_read16(dev, 0x0012) & 0xFFFC) | 0x0002);
237 b43_wa_initgains(dev);
238 b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5);
239 b = b43_phy_read(dev, B43_PHY_PWRDOWN);
240 b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005);
241 b43_radio_write16(dev, 0x0004,
242 b43_radio_read16(dev, 0x0004) | 0x0004);
243 for (i = 0x10; i <= 0x20; i++) {
244 b43_radio_write16(dev, 0x0013, i);
245 curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF;
246 if (!curr_s) {
247 best_s = 0x0000;
248 break;
249 } else if (curr_s >= 0x0080)
250 curr_s = 0x0100 - curr_s;
251 if (curr_s < best_s)
252 best_s = curr_s;
253 }
254 b43_phy_write(dev, B43_PHY_PWRDOWN, b);
255 b43_radio_write16(dev, 0x0004,
256 b43_radio_read16(dev, 0x0004) & 0xFFFB);
257 b43_radio_write16(dev, 0x0013, best_s);
258 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC);
259 b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80);
260 b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00);
261 b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0);
262 b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0);
263 b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF);
264 b43_phy_write(dev, B43_PHY_OFDM(0xBB),
265 (b43_phy_read(dev, B43_PHY_OFDM(0xBB)) & 0xF000) | 0x0053);
266 b43_phy_write(dev, B43_PHY_OFDM61,
267 (b43_phy_read(dev, B43_PHY_OFDM61) & 0xFE1F) | 0x0120);
268 b43_phy_write(dev, B43_PHY_OFDM(0x13),
269 (b43_phy_read(dev, B43_PHY_OFDM(0x13)) & 0x0FFF) | 0x3000);
270 b43_phy_write(dev, B43_PHY_OFDM(0x14),
271 (b43_phy_read(dev, B43_PHY_OFDM(0x14)) & 0x0FFF) | 0x3000);
272 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017);
273 for (i = 0; i < 6; i++)
274 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F);
275 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E);
276 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011);
277 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013);
278 b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030);
279 b43_phy_write(dev, B43_PHY_CRS0,
280 b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
281}
282
283static void hardware_pctl_init_aphy(struct b43_wldev *dev)
284{
285 //TODO
286}
287
288void b43_phy_inita(struct b43_wldev *dev)
289{
290 struct ssb_bus *bus = dev->dev->bus;
291 struct b43_phy *phy = &dev->phy;
292
293 /* This lowlevel A-PHY init is also called from G-PHY init.
294 * So we must not access phy->a, if called from G-PHY code.
295 */
296 B43_WARN_ON((phy->type != B43_PHYTYPE_A) &&
297 (phy->type != B43_PHYTYPE_G));
298
299 might_sleep();
300
301 if (phy->rev >= 6) {
302 if (phy->type == B43_PHYTYPE_A)
303 b43_phy_write(dev, B43_PHY_OFDM(0x1B),
304 b43_phy_read(dev, B43_PHY_OFDM(0x1B)) & ~0x1000);
305 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
306 b43_phy_write(dev, B43_PHY_ENCORE,
307 b43_phy_read(dev, B43_PHY_ENCORE) | 0x0010);
308 else
309 b43_phy_write(dev, B43_PHY_ENCORE,
310 b43_phy_read(dev, B43_PHY_ENCORE) & ~0x1010);
311 }
312
313 b43_wa_all(dev);
314
315 if (phy->type == B43_PHYTYPE_A) {
316 if (phy->gmode && (phy->rev < 3))
317 b43_phy_write(dev, 0x0034,
318 b43_phy_read(dev, 0x0034) | 0x0001);
319 b43_phy_rssiagc(dev, 0);
320
321 b43_phy_write(dev, B43_PHY_CRS0,
322 b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
323
324 b43_radio_init2060(dev);
325
326 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
327 ((bus->boardinfo.type == SSB_BOARD_BU4306) ||
328 (bus->boardinfo.type == SSB_BOARD_BU4309))) {
329 ; //TODO: A PHY LO
330 }
331
332 if (phy->rev >= 3)
333 b43_phy_ww(dev);
334
335 hardware_pctl_init_aphy(dev);
336
337 //TODO: radar detection
338 }
339
340 if ((phy->type == B43_PHYTYPE_G) &&
341 (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) {
342 b43_phy_write(dev, B43_PHY_OFDM(0x6E),
343 (b43_phy_read(dev, B43_PHY_OFDM(0x6E))
344 & 0xE000) | 0x3CF);
345 }
346}
347
348/* Initialise the TSSI->dBm lookup table */
349static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev)
350{
351 struct b43_phy *phy = &dev->phy;
352 struct b43_phy_a *aphy = phy->a;
353 s16 pab0, pab1, pab2;
354
355 pab0 = (s16) (dev->dev->bus->sprom.pa1b0);
356 pab1 = (s16) (dev->dev->bus->sprom.pa1b1);
357 pab2 = (s16) (dev->dev->bus->sprom.pa1b2);
358
359 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
360 pab0 != -1 && pab1 != -1 && pab2 != -1) {
361 /* The pabX values are set in SPROM. Use them. */
362 if ((s8) dev->dev->bus->sprom.itssi_a != 0 &&
363 (s8) dev->dev->bus->sprom.itssi_a != -1)
364 aphy->tgt_idle_tssi =
365 (s8) (dev->dev->bus->sprom.itssi_a);
366 else
367 aphy->tgt_idle_tssi = 62;
368 aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
369 pab1, pab2);
370 if (!aphy->tssi2dbm)
371 return -ENOMEM;
372 } else {
373 /* pabX values not set in SPROM,
374 * but APHY needs a generated table. */
375 aphy->tssi2dbm = NULL;
376 b43err(dev->wl, "Could not generate tssi2dBm "
377 "table (wrong SPROM info)!\n");
378 return -ENODEV;
379 }
380
381 return 0;
382}
383
384static int b43_aphy_op_allocate(struct b43_wldev *dev)
385{
386 struct b43_phy_a *aphy;
387 int err;
388
389 aphy = kzalloc(sizeof(*aphy), GFP_KERNEL);
390 if (!aphy)
391 return -ENOMEM;
392 dev->phy.a = aphy;
393
394 err = b43_aphy_init_tssi2dbm_table(dev);
395 if (err)
396 goto err_free_aphy;
397
398 return 0;
399
400err_free_aphy:
401 kfree(aphy);
402 dev->phy.a = NULL;
403
404 return err;
405}
406
407static void b43_aphy_op_prepare_structs(struct b43_wldev *dev)
408{
409 struct b43_phy *phy = &dev->phy;
410 struct b43_phy_a *aphy = phy->a;
411 const void *tssi2dbm;
412 int tgt_idle_tssi;
413
414 /* tssi2dbm table is constant, so it is initialized at alloc time.
415 * Save a copy of the pointer. */
416 tssi2dbm = aphy->tssi2dbm;
417 tgt_idle_tssi = aphy->tgt_idle_tssi;
418
419 /* Zero out the whole PHY structure. */
420 memset(aphy, 0, sizeof(*aphy));
421
422 aphy->tssi2dbm = tssi2dbm;
423 aphy->tgt_idle_tssi = tgt_idle_tssi;
424
425 //TODO init struct b43_phy_a
426
427}
428
429static void b43_aphy_op_free(struct b43_wldev *dev)
430{
431 struct b43_phy *phy = &dev->phy;
432 struct b43_phy_a *aphy = phy->a;
433
434 kfree(aphy->tssi2dbm);
435 aphy->tssi2dbm = NULL;
436
437 kfree(aphy);
438 dev->phy.a = NULL;
439}
440
441static int b43_aphy_op_init(struct b43_wldev *dev)
442{
443 b43_phy_inita(dev);
444
445 return 0;
446}
447
448static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset)
449{
450 /* OFDM registers are base-registers for the A-PHY. */
451 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
452 offset &= ~B43_PHYROUTE;
453 offset |= B43_PHYROUTE_BASE;
454 }
455
456#if B43_DEBUG
457 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
458 /* Ext-G registers are only available on G-PHYs */
459 b43err(dev->wl, "Invalid EXT-G PHY access at "
460 "0x%04X on A-PHY\n", offset);
461 dump_stack();
462 }
463 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
464 /* N-BMODE registers are only available on N-PHYs */
465 b43err(dev->wl, "Invalid N-BMODE PHY access at "
466 "0x%04X on A-PHY\n", offset);
467 dump_stack();
468 }
469#endif /* B43_DEBUG */
470
471 return offset;
472}
473
474static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg)
475{
476 reg = adjust_phyreg(dev, reg);
477 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
478 return b43_read16(dev, B43_MMIO_PHY_DATA);
479}
480
481static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
482{
483 reg = adjust_phyreg(dev, reg);
484 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
485 b43_write16(dev, B43_MMIO_PHY_DATA, value);
486}
487
488static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg)
489{
490 /* Register 1 is a 32-bit register. */
491 B43_WARN_ON(reg == 1);
492 /* A-PHY needs 0x40 for read access */
493 reg |= 0x40;
494
495 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
496 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
497}
498
499static void b43_aphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
500{
501 /* Register 1 is a 32-bit register. */
502 B43_WARN_ON(reg == 1);
503
504 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
505 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
506}
507
508static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev)
509{
510 return (dev->phy.rev >= 5);
511}
512
513static void b43_aphy_op_software_rfkill(struct b43_wldev *dev,
514 enum rfkill_state state)
515{
516 struct b43_phy *phy = &dev->phy;
517
518 if (state == RFKILL_STATE_UNBLOCKED) {
519 if (phy->radio_on)
520 return;
521 b43_radio_write16(dev, 0x0004, 0x00C0);
522 b43_radio_write16(dev, 0x0005, 0x0008);
523 b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) & 0xFFF7);
524 b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) & 0xFFF7);
525 b43_radio_init2060(dev);
526 } else {
527 b43_radio_write16(dev, 0x0004, 0x00FF);
528 b43_radio_write16(dev, 0x0005, 0x00FB);
529 b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) | 0x0008);
530 b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) | 0x0008);
531 }
532}
533
534static int b43_aphy_op_switch_channel(struct b43_wldev *dev,
535 unsigned int new_channel)
536{
537 if (new_channel > 200)
538 return -EINVAL;
539 aphy_channel_switch(dev, new_channel);
540
541 return 0;
542}
543
544static unsigned int b43_aphy_op_get_default_chan(struct b43_wldev *dev)
545{
546 return 36; /* Default to channel 36 */
547}
548
549static void b43_aphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
550{//TODO
551 struct b43_phy *phy = &dev->phy;
552 u64 hf;
553 u16 tmp;
554 int autodiv = 0;
555
556 if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
557 autodiv = 1;
558
559 hf = b43_hf_read(dev);
560 hf &= ~B43_HF_ANTDIVHELP;
561 b43_hf_write(dev, hf);
562
563 tmp = b43_phy_read(dev, B43_PHY_BBANDCFG);
564 tmp &= ~B43_PHY_BBANDCFG_RXANT;
565 tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
566 << B43_PHY_BBANDCFG_RXANT_SHIFT;
567 b43_phy_write(dev, B43_PHY_BBANDCFG, tmp);
568
569 if (autodiv) {
570 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
571 if (antenna == B43_ANTENNA_AUTO0)
572 tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
573 else
574 tmp |= B43_PHY_ANTDWELL_AUTODIV1;
575 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
576 }
577 if (phy->rev < 3) {
578 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
579 tmp = (tmp & 0xFF00) | 0x24;
580 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
581 } else {
582 tmp = b43_phy_read(dev, B43_PHY_OFDM61);
583 tmp |= 0x10;
584 b43_phy_write(dev, B43_PHY_OFDM61, tmp);
585 if (phy->analog == 3) {
586 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
587 0x1D);
588 b43_phy_write(dev, B43_PHY_ADIVRELATED,
589 8);
590 } else {
591 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
592 0x3A);
593 tmp =
594 b43_phy_read(dev,
595 B43_PHY_ADIVRELATED);
596 tmp = (tmp & 0xFF00) | 8;
597 b43_phy_write(dev, B43_PHY_ADIVRELATED,
598 tmp);
599 }
600 }
601
602 hf |= B43_HF_ANTDIVHELP;
603 b43_hf_write(dev, hf);
604}
605
606static void b43_aphy_op_adjust_txpower(struct b43_wldev *dev)
607{//TODO
608}
609
610static enum b43_txpwr_result b43_aphy_op_recalc_txpower(struct b43_wldev *dev,
611 bool ignore_tssi)
612{//TODO
613 return B43_TXPWR_RES_DONE;
614}
615
616static void b43_aphy_op_pwork_15sec(struct b43_wldev *dev)
617{//TODO
618}
619
620static void b43_aphy_op_pwork_60sec(struct b43_wldev *dev)
621{//TODO
622}
623
624const struct b43_phy_operations b43_phyops_a = {
625 .allocate = b43_aphy_op_allocate,
626 .free = b43_aphy_op_free,
627 .prepare_structs = b43_aphy_op_prepare_structs,
628 .init = b43_aphy_op_init,
629 .phy_read = b43_aphy_op_read,
630 .phy_write = b43_aphy_op_write,
631 .radio_read = b43_aphy_op_radio_read,
632 .radio_write = b43_aphy_op_radio_write,
633 .supports_hwpctl = b43_aphy_op_supports_hwpctl,
634 .software_rfkill = b43_aphy_op_software_rfkill,
635 .switch_analog = b43_phyop_switch_analog_generic,
636 .switch_channel = b43_aphy_op_switch_channel,
637 .get_default_chan = b43_aphy_op_get_default_chan,
638 .set_rx_antenna = b43_aphy_op_set_rx_antenna,
639 .recalc_txpower = b43_aphy_op_recalc_txpower,
640 .adjust_txpower = b43_aphy_op_adjust_txpower,
641 .pwork_15sec = b43_aphy_op_pwork_15sec,
642 .pwork_60sec = b43_aphy_op_pwork_60sec,
643};
diff --git a/drivers/net/wireless/b43/phy_a.h b/drivers/net/wireless/b43/phy_a.h
new file mode 100644
index 000000000000..5cfaab7b16ee
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_a.h
@@ -0,0 +1,130 @@
1#ifndef LINUX_B43_PHY_A_H_
2#define LINUX_B43_PHY_A_H_
3
4#include "phy_common.h"
5
6
7/* OFDM (A) PHY Registers */
8#define B43_PHY_VERSION_OFDM B43_PHY_OFDM(0x00) /* Versioning register for A-PHY */
9#define B43_PHY_BBANDCFG B43_PHY_OFDM(0x01) /* Baseband config */
10#define B43_PHY_BBANDCFG_RXANT 0x180 /* RX Antenna selection */
11#define B43_PHY_BBANDCFG_RXANT_SHIFT 7
12#define B43_PHY_PWRDOWN B43_PHY_OFDM(0x03) /* Powerdown */
13#define B43_PHY_CRSTHRES1_R1 B43_PHY_OFDM(0x06) /* CRS Threshold 1 (phy.rev 1 only) */
14#define B43_PHY_LNAHPFCTL B43_PHY_OFDM(0x1C) /* LNA/HPF control */
15#define B43_PHY_LPFGAINCTL B43_PHY_OFDM(0x20) /* LPF Gain control */
16#define B43_PHY_ADIVRELATED B43_PHY_OFDM(0x27) /* FIXME rename */
17#define B43_PHY_CRS0 B43_PHY_OFDM(0x29)
18#define B43_PHY_CRS0_EN 0x4000
19#define B43_PHY_PEAK_COUNT B43_PHY_OFDM(0x30)
20#define B43_PHY_ANTDWELL B43_PHY_OFDM(0x2B) /* Antenna dwell */
21#define B43_PHY_ANTDWELL_AUTODIV1 0x0100 /* Automatic RX diversity start antenna */
22#define B43_PHY_ENCORE B43_PHY_OFDM(0x49) /* "Encore" (RangeMax / BroadRange) */
23#define B43_PHY_ENCORE_EN 0x0200 /* Encore enable */
24#define B43_PHY_LMS B43_PHY_OFDM(0x55)
25#define B43_PHY_OFDM61 B43_PHY_OFDM(0x61) /* FIXME rename */
26#define B43_PHY_OFDM61_10 0x0010 /* FIXME rename */
27#define B43_PHY_IQBAL B43_PHY_OFDM(0x69) /* I/Q balance */
28#define B43_PHY_BBTXDC_BIAS B43_PHY_OFDM(0x6B) /* Baseband TX DC bias */
29#define B43_PHY_OTABLECTL B43_PHY_OFDM(0x72) /* OFDM table control (see below) */
30#define B43_PHY_OTABLEOFF 0x03FF /* OFDM table offset (see below) */
31#define B43_PHY_OTABLENR 0xFC00 /* OFDM table number (see below) */
32#define B43_PHY_OTABLENR_SHIFT 10
33#define B43_PHY_OTABLEI B43_PHY_OFDM(0x73) /* OFDM table data I */
34#define B43_PHY_OTABLEQ B43_PHY_OFDM(0x74) /* OFDM table data Q */
35#define B43_PHY_HPWR_TSSICTL B43_PHY_OFDM(0x78) /* Hardware power TSSI control */
36#define B43_PHY_ADCCTL B43_PHY_OFDM(0x7A) /* ADC control */
37#define B43_PHY_IDLE_TSSI B43_PHY_OFDM(0x7B)
38#define B43_PHY_A_TEMP_SENSE B43_PHY_OFDM(0x7C) /* A PHY temperature sense */
39#define B43_PHY_NRSSITHRES B43_PHY_OFDM(0x8A) /* NRSSI threshold */
40#define B43_PHY_ANTWRSETT B43_PHY_OFDM(0x8C) /* Antenna WR settle */
41#define B43_PHY_ANTWRSETT_ARXDIV 0x2000 /* Automatic RX diversity enabled */
42#define B43_PHY_CLIPPWRDOWNT B43_PHY_OFDM(0x93) /* Clip powerdown threshold */
43#define B43_PHY_OFDM9B B43_PHY_OFDM(0x9B) /* FIXME rename */
44#define B43_PHY_N1P1GAIN B43_PHY_OFDM(0xA0)
45#define B43_PHY_P1P2GAIN B43_PHY_OFDM(0xA1)
46#define B43_PHY_N1N2GAIN B43_PHY_OFDM(0xA2)
47#define B43_PHY_CLIPTHRES B43_PHY_OFDM(0xA3)
48#define B43_PHY_CLIPN1P2THRES B43_PHY_OFDM(0xA4)
49#define B43_PHY_CCKSHIFTBITS_WA B43_PHY_OFDM(0xA5) /* CCK shiftbits workaround, FIXME rename */
50#define B43_PHY_CCKSHIFTBITS B43_PHY_OFDM(0xA7) /* FIXME rename */
51#define B43_PHY_DIVSRCHIDX B43_PHY_OFDM(0xA8) /* Divider search gain/index */
52#define B43_PHY_CLIPP2THRES B43_PHY_OFDM(0xA9)
53#define B43_PHY_CLIPP3THRES B43_PHY_OFDM(0xAA)
54#define B43_PHY_DIVP1P2GAIN B43_PHY_OFDM(0xAB)
55#define B43_PHY_DIVSRCHGAINBACK B43_PHY_OFDM(0xAD) /* Divider search gain back */
56#define B43_PHY_DIVSRCHGAINCHNG B43_PHY_OFDM(0xAE) /* Divider search gain change */
57#define B43_PHY_CRSTHRES1 B43_PHY_OFDM(0xC0) /* CRS Threshold 1 (phy.rev >= 2 only) */
58#define B43_PHY_CRSTHRES2 B43_PHY_OFDM(0xC1) /* CRS Threshold 2 (phy.rev >= 2 only) */
59#define B43_PHY_TSSIP_LTBASE B43_PHY_OFDM(0x380) /* TSSI power lookup table base */
60#define B43_PHY_DC_LTBASE B43_PHY_OFDM(0x3A0) /* DC lookup table base */
61#define B43_PHY_GAIN_LTBASE B43_PHY_OFDM(0x3C0) /* Gain lookup table base */
62
63/*** OFDM table numbers ***/
64#define B43_OFDMTAB(number, offset) (((number) << B43_PHY_OTABLENR_SHIFT) | (offset))
65#define B43_OFDMTAB_AGC1 B43_OFDMTAB(0x00, 0)
66#define B43_OFDMTAB_GAIN0 B43_OFDMTAB(0x00, 0)
67#define B43_OFDMTAB_GAINX B43_OFDMTAB(0x01, 0) //TODO rename
68#define B43_OFDMTAB_GAIN1 B43_OFDMTAB(0x01, 4)
69#define B43_OFDMTAB_AGC3 B43_OFDMTAB(0x02, 0)
70#define B43_OFDMTAB_GAIN2 B43_OFDMTAB(0x02, 3)
71#define B43_OFDMTAB_LNAHPFGAIN1 B43_OFDMTAB(0x03, 0)
72#define B43_OFDMTAB_WRSSI B43_OFDMTAB(0x04, 0)
73#define B43_OFDMTAB_LNAHPFGAIN2 B43_OFDMTAB(0x04, 0)
74#define B43_OFDMTAB_NOISESCALE B43_OFDMTAB(0x05, 0)
75#define B43_OFDMTAB_AGC2 B43_OFDMTAB(0x06, 0)
76#define B43_OFDMTAB_ROTOR B43_OFDMTAB(0x08, 0)
77#define B43_OFDMTAB_ADVRETARD B43_OFDMTAB(0x09, 0)
78#define B43_OFDMTAB_DAC B43_OFDMTAB(0x0C, 0)
79#define B43_OFDMTAB_DC B43_OFDMTAB(0x0E, 7)
80#define B43_OFDMTAB_PWRDYN2 B43_OFDMTAB(0x0E, 12)
81#define B43_OFDMTAB_LNAGAIN B43_OFDMTAB(0x0E, 13)
82#define B43_OFDMTAB_UNKNOWN_0F B43_OFDMTAB(0x0F, 0) //TODO rename
83#define B43_OFDMTAB_UNKNOWN_APHY B43_OFDMTAB(0x0F, 7) //TODO rename
84#define B43_OFDMTAB_LPFGAIN B43_OFDMTAB(0x0F, 12)
85#define B43_OFDMTAB_RSSI B43_OFDMTAB(0x10, 0)
86#define B43_OFDMTAB_UNKNOWN_11 B43_OFDMTAB(0x11, 4) //TODO rename
87#define B43_OFDMTAB_AGC1_R1 B43_OFDMTAB(0x13, 0)
88#define B43_OFDMTAB_GAINX_R1 B43_OFDMTAB(0x14, 0) //TODO remove!
89#define B43_OFDMTAB_MINSIGSQ B43_OFDMTAB(0x14, 0)
90#define B43_OFDMTAB_AGC3_R1 B43_OFDMTAB(0x15, 0)
91#define B43_OFDMTAB_WRSSI_R1 B43_OFDMTAB(0x15, 4)
92#define B43_OFDMTAB_TSSI B43_OFDMTAB(0x15, 0)
93#define B43_OFDMTAB_DACRFPABB B43_OFDMTAB(0x16, 0)
94#define B43_OFDMTAB_DACOFF B43_OFDMTAB(0x17, 0)
95#define B43_OFDMTAB_DCBIAS B43_OFDMTAB(0x18, 0)
96
97u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset);
98void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
99 u16 offset, u16 value);
100u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset);
101void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
102 u16 offset, u32 value);
103
104
105struct b43_phy_a {
106 /* Pointer to the table used to convert a
107 * TSSI value to dBm-Q5.2 */
108 const s8 *tssi2dbm;
109 /* Target idle TSSI */
110 int tgt_idle_tssi;
111 /* Current idle TSSI */
112 int cur_idle_tssi;//FIXME value currently not set
113
114 /* A-PHY TX Power control value. */
115 u16 txpwr_offset;
116
117 //TODO lots of missing stuff
118};
119
120/**
121 * b43_phy_inita - Lowlevel A-PHY init routine.
122 * This is _only_ used by the G-PHY code.
123 */
124void b43_phy_inita(struct b43_wldev *dev);
125
126
127struct b43_phy_operations;
128extern const struct b43_phy_operations b43_phyops_a;
129
130#endif /* LINUX_B43_PHY_A_H_ */
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
new file mode 100644
index 000000000000..af37abccccb3
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -0,0 +1,381 @@
1/*
2
3 Broadcom B43 wireless driver
4 Common PHY routines
5
6 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
7 Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
8 Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
9 Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
10 Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; see the file COPYING. If not, write to
24 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
25 Boston, MA 02110-1301, USA.
26
27*/
28
29#include "phy_common.h"
30#include "phy_g.h"
31#include "phy_a.h"
32#include "phy_n.h"
33#include "phy_lp.h"
34#include "b43.h"
35#include "main.h"
36
37
38int b43_phy_allocate(struct b43_wldev *dev)
39{
40 struct b43_phy *phy = &(dev->phy);
41 int err;
42
43 phy->ops = NULL;
44
45 switch (phy->type) {
46 case B43_PHYTYPE_A:
47 phy->ops = &b43_phyops_a;
48 break;
49 case B43_PHYTYPE_G:
50 phy->ops = &b43_phyops_g;
51 break;
52 case B43_PHYTYPE_N:
53#ifdef CONFIG_B43_NPHY
54 phy->ops = &b43_phyops_n;
55#endif
56 break;
57 case B43_PHYTYPE_LP:
58#ifdef CONFIG_B43_PHY_LP
59 phy->ops = &b43_phyops_lp;
60#endif
61 break;
62 }
63 if (B43_WARN_ON(!phy->ops))
64 return -ENODEV;
65
66 err = phy->ops->allocate(dev);
67 if (err)
68 phy->ops = NULL;
69
70 return err;
71}
72
73void b43_phy_free(struct b43_wldev *dev)
74{
75 dev->phy.ops->free(dev);
76 dev->phy.ops = NULL;
77}
78
79int b43_phy_init(struct b43_wldev *dev)
80{
81 struct b43_phy *phy = &dev->phy;
82 const struct b43_phy_operations *ops = phy->ops;
83 int err;
84
85 phy->channel = ops->get_default_chan(dev);
86
87 ops->software_rfkill(dev, RFKILL_STATE_UNBLOCKED);
88 err = ops->init(dev);
89 if (err) {
90 b43err(dev->wl, "PHY init failed\n");
91 goto err_block_rf;
92 }
93 /* Make sure to switch hardware and firmware (SHM) to
94 * the default channel. */
95 err = b43_switch_channel(dev, ops->get_default_chan(dev));
96 if (err) {
97 b43err(dev->wl, "PHY init: Channel switch to default failed\n");
98 goto err_phy_exit;
99 }
100
101 return 0;
102
103err_phy_exit:
104 if (ops->exit)
105 ops->exit(dev);
106err_block_rf:
107 ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
108
109 return err;
110}
111
112void b43_phy_exit(struct b43_wldev *dev)
113{
114 const struct b43_phy_operations *ops = dev->phy.ops;
115
116 ops->software_rfkill(dev, RFKILL_STATE_SOFT_BLOCKED);
117 if (ops->exit)
118 ops->exit(dev);
119}
120
121bool b43_has_hardware_pctl(struct b43_wldev *dev)
122{
123 if (!dev->phy.hardware_power_control)
124 return 0;
125 if (!dev->phy.ops->supports_hwpctl)
126 return 0;
127 return dev->phy.ops->supports_hwpctl(dev);
128}
129
130void b43_radio_lock(struct b43_wldev *dev)
131{
132 u32 macctl;
133
134 macctl = b43_read32(dev, B43_MMIO_MACCTL);
135 B43_WARN_ON(macctl & B43_MACCTL_RADIOLOCK);
136 macctl |= B43_MACCTL_RADIOLOCK;
137 b43_write32(dev, B43_MMIO_MACCTL, macctl);
138 /* Commit the write and wait for the device
139 * to exit any radio register access. */
140 b43_read32(dev, B43_MMIO_MACCTL);
141 udelay(10);
142}
143
144void b43_radio_unlock(struct b43_wldev *dev)
145{
146 u32 macctl;
147
148 /* Commit any write */
149 b43_read16(dev, B43_MMIO_PHY_VER);
150 /* unlock */
151 macctl = b43_read32(dev, B43_MMIO_MACCTL);
152 B43_WARN_ON(!(macctl & B43_MACCTL_RADIOLOCK));
153 macctl &= ~B43_MACCTL_RADIOLOCK;
154 b43_write32(dev, B43_MMIO_MACCTL, macctl);
155}
156
157void b43_phy_lock(struct b43_wldev *dev)
158{
159#if B43_DEBUG
160 B43_WARN_ON(dev->phy.phy_locked);
161 dev->phy.phy_locked = 1;
162#endif
163 B43_WARN_ON(dev->dev->id.revision < 3);
164
165 if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
166 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
167}
168
169void b43_phy_unlock(struct b43_wldev *dev)
170{
171#if B43_DEBUG
172 B43_WARN_ON(!dev->phy.phy_locked);
173 dev->phy.phy_locked = 0;
174#endif
175 B43_WARN_ON(dev->dev->id.revision < 3);
176
177 if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP))
178 b43_power_saving_ctl_bits(dev, 0);
179}
180
181u16 b43_radio_read(struct b43_wldev *dev, u16 reg)
182{
183 return dev->phy.ops->radio_read(dev, reg);
184}
185
186void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
187{
188 dev->phy.ops->radio_write(dev, reg, value);
189}
190
191void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask)
192{
193 b43_radio_write16(dev, offset,
194 b43_radio_read16(dev, offset) & mask);
195}
196
197void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set)
198{
199 b43_radio_write16(dev, offset,
200 b43_radio_read16(dev, offset) | set);
201}
202
203void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
204{
205 b43_radio_write16(dev, offset,
206 (b43_radio_read16(dev, offset) & mask) | set);
207}
208
209u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
210{
211 return dev->phy.ops->phy_read(dev, reg);
212}
213
214void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value)
215{
216 dev->phy.ops->phy_write(dev, reg, value);
217}
218
219void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask)
220{
221 b43_phy_write(dev, offset,
222 b43_phy_read(dev, offset) & mask);
223}
224
225void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set)
226{
227 b43_phy_write(dev, offset,
228 b43_phy_read(dev, offset) | set);
229}
230
231void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
232{
233 b43_phy_write(dev, offset,
234 (b43_phy_read(dev, offset) & mask) | set);
235}
236
237int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
238{
239 struct b43_phy *phy = &(dev->phy);
240 u16 channelcookie, savedcookie;
241 int err;
242
243 if (new_channel == B43_DEFAULT_CHANNEL)
244 new_channel = phy->ops->get_default_chan(dev);
245
246 /* First we set the channel radio code to prevent the
247 * firmware from sending ghost packets.
248 */
249 channelcookie = new_channel;
250 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
251 channelcookie |= 0x100;
252 //FIXME set 40Mhz flag if required
253 savedcookie = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN);
254 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie);
255
256 /* Now try to switch the PHY hardware channel. */
257 err = phy->ops->switch_channel(dev, new_channel);
258 if (err)
259 goto err_restore_cookie;
260
261 dev->phy.channel = new_channel;
262 /* Wait for the radio to tune to the channel and stabilize. */
263 msleep(8);
264
265 return 0;
266
267err_restore_cookie:
268 b43_shm_write16(dev, B43_SHM_SHARED,
269 B43_SHM_SH_CHAN, savedcookie);
270
271 return err;
272}
273
274void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state)
275{
276 struct b43_phy *phy = &dev->phy;
277
278 if (state == RFKILL_STATE_HARD_BLOCKED) {
279 /* We cannot hardware-block the device */
280 state = RFKILL_STATE_SOFT_BLOCKED;
281 }
282
283 phy->ops->software_rfkill(dev, state);
284 phy->radio_on = (state == RFKILL_STATE_UNBLOCKED);
285}
286
287/**
288 * b43_phy_txpower_adjust_work - TX power workqueue.
289 *
290 * Workqueue for updating the TX power parameters in hardware.
291 */
292void b43_phy_txpower_adjust_work(struct work_struct *work)
293{
294 struct b43_wl *wl = container_of(work, struct b43_wl,
295 txpower_adjust_work);
296 struct b43_wldev *dev;
297
298 mutex_lock(&wl->mutex);
299 dev = wl->current_dev;
300
301 if (likely(dev && (b43_status(dev) >= B43_STAT_STARTED)))
302 dev->phy.ops->adjust_txpower(dev);
303
304 mutex_unlock(&wl->mutex);
305}
306
307/* Called with wl->irq_lock locked */
308void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags)
309{
310 struct b43_phy *phy = &dev->phy;
311 unsigned long now = jiffies;
312 enum b43_txpwr_result result;
313
314 if (!(flags & B43_TXPWR_IGNORE_TIME)) {
315 /* Check if it's time for a TXpower check. */
316 if (time_before(now, phy->next_txpwr_check_time))
317 return; /* Not yet */
318 }
319 /* The next check will be needed in two seconds, or later. */
320 phy->next_txpwr_check_time = round_jiffies(now + (HZ * 2));
321
322 if ((dev->dev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
323 (dev->dev->bus->boardinfo.type == SSB_BOARD_BU4306))
324 return; /* No software txpower adjustment needed */
325
326 result = phy->ops->recalc_txpower(dev, !!(flags & B43_TXPWR_IGNORE_TSSI));
327 if (result == B43_TXPWR_RES_DONE)
328 return; /* We are done. */
329 B43_WARN_ON(result != B43_TXPWR_RES_NEED_ADJUST);
330 B43_WARN_ON(phy->ops->adjust_txpower == NULL);
331
332 /* We must adjust the transmission power in hardware.
333 * Schedule b43_phy_txpower_adjust_work(). */
334 queue_work(dev->wl->hw->workqueue, &dev->wl->txpower_adjust_work);
335}
336
337int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset)
338{
339 const bool is_ofdm = (shm_offset != B43_SHM_SH_TSSI_CCK);
340 unsigned int a, b, c, d;
341 unsigned int average;
342 u32 tmp;
343
344 tmp = b43_shm_read32(dev, B43_SHM_SHARED, shm_offset);
345 a = tmp & 0xFF;
346 b = (tmp >> 8) & 0xFF;
347 c = (tmp >> 16) & 0xFF;
348 d = (tmp >> 24) & 0xFF;
349 if (a == 0 || a == B43_TSSI_MAX ||
350 b == 0 || b == B43_TSSI_MAX ||
351 c == 0 || c == B43_TSSI_MAX ||
352 d == 0 || d == B43_TSSI_MAX)
353 return -ENOENT;
354 /* The values are OK. Clear them. */
355 tmp = B43_TSSI_MAX | (B43_TSSI_MAX << 8) |
356 (B43_TSSI_MAX << 16) | (B43_TSSI_MAX << 24);
357 b43_shm_write32(dev, B43_SHM_SHARED, shm_offset, tmp);
358
359 if (is_ofdm) {
360 a = (a + 32) & 0x3F;
361 b = (b + 32) & 0x3F;
362 c = (c + 32) & 0x3F;
363 d = (d + 32) & 0x3F;
364 }
365
366 /* Get the average of the values with 0.5 added to each value. */
367 average = (a + b + c + d + 2) / 4;
368 if (is_ofdm) {
369 /* Adjust for CCK-boost */
370 if (b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO)
371 & B43_HF_CCKBOOST)
372 average = (average >= 13) ? (average - 13) : 0;
373 }
374
375 return average;
376}
377
378void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
379{
380 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
381}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
new file mode 100644
index 000000000000..c9f5430d1d7d
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -0,0 +1,413 @@
1#ifndef LINUX_B43_PHY_COMMON_H_
2#define LINUX_B43_PHY_COMMON_H_
3
4#include <linux/rfkill.h>
5
6struct b43_wldev;
7
8
9/* PHY register routing bits */
10#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
11#define B43_PHYROUTE_BASE 0x0000 /* Base registers */
12#define B43_PHYROUTE_OFDM_GPHY 0x0400 /* OFDM register routing for G-PHYs */
13#define B43_PHYROUTE_EXT_GPHY 0x0800 /* Extended G-PHY registers */
14#define B43_PHYROUTE_N_BMODE 0x0C00 /* N-PHY BMODE registers */
15
16/* CCK (B-PHY) registers. */
17#define B43_PHY_CCK(reg) ((reg) | B43_PHYROUTE_BASE)
18/* N-PHY registers. */
19#define B43_PHY_N(reg) ((reg) | B43_PHYROUTE_BASE)
20/* N-PHY BMODE registers. */
21#define B43_PHY_N_BMODE(reg) ((reg) | B43_PHYROUTE_N_BMODE)
22/* OFDM (A-PHY) registers. */
23#define B43_PHY_OFDM(reg) ((reg) | B43_PHYROUTE_OFDM_GPHY)
24/* Extended G-PHY registers. */
25#define B43_PHY_EXTG(reg) ((reg) | B43_PHYROUTE_EXT_GPHY)
26
27
28/* Masks for the PHY versioning registers. */
29#define B43_PHYVER_ANALOG 0xF000
30#define B43_PHYVER_ANALOG_SHIFT 12
31#define B43_PHYVER_TYPE 0x0F00
32#define B43_PHYVER_TYPE_SHIFT 8
33#define B43_PHYVER_VERSION 0x00FF
34
35/**
36 * enum b43_interference_mitigation - Interference Mitigation mode
37 *
38 * @B43_INTERFMODE_NONE: Disabled
39 * @B43_INTERFMODE_NONWLAN: Non-WLAN Interference Mitigation
40 * @B43_INTERFMODE_MANUALWLAN: WLAN Interference Mitigation
41 * @B43_INTERFMODE_AUTOWLAN: Automatic WLAN Interference Mitigation
42 */
43enum b43_interference_mitigation {
44 B43_INTERFMODE_NONE,
45 B43_INTERFMODE_NONWLAN,
46 B43_INTERFMODE_MANUALWLAN,
47 B43_INTERFMODE_AUTOWLAN,
48};
49
50/* Antenna identifiers */
51enum {
52 B43_ANTENNA0, /* Antenna 0 */
53 B43_ANTENNA1, /* Antenna 0 */
54 B43_ANTENNA_AUTO1, /* Automatic, starting with antenna 1 */
55 B43_ANTENNA_AUTO0, /* Automatic, starting with antenna 0 */
56 B43_ANTENNA2,
57 B43_ANTENNA3 = 8,
58
59 B43_ANTENNA_AUTO = B43_ANTENNA_AUTO0,
60 B43_ANTENNA_DEFAULT = B43_ANTENNA_AUTO,
61};
62
63/**
64 * enum b43_txpwr_result - Return value for the recalc_txpower PHY op.
65 *
66 * @B43_TXPWR_RES_NEED_ADJUST: Values changed. Hardware adjustment is needed.
67 * @B43_TXPWR_RES_DONE: No more work to do. Everything is done.
68 */
69enum b43_txpwr_result {
70 B43_TXPWR_RES_NEED_ADJUST,
71 B43_TXPWR_RES_DONE,
72};
73
74/**
75 * struct b43_phy_operations - Function pointers for PHY ops.
76 *
77 * @allocate: Allocate and initialise the PHY data structures.
78 * Must not be NULL.
79 * @free: Destroy and free the PHY data structures.
80 * Must not be NULL.
81 *
82 * @prepare_structs: Prepare the PHY data structures.
83 * The data structures allocated in @allocate are
84 * initialized here.
85 * Must not be NULL.
86 * @prepare_hardware: Prepare the PHY. This is called before b43_chip_init to
87 * do some early early PHY hardware init.
88 * Can be NULL, if not required.
89 * @init: Initialize the PHY.
90 * Must not be NULL.
91 * @exit: Shutdown the PHY.
92 * Can be NULL, if not required.
93 *
94 * @phy_read: Read from a PHY register.
95 * Must not be NULL.
96 * @phy_write: Write to a PHY register.
97 * Must not be NULL.
98 * @radio_read: Read from a Radio register.
99 * Must not be NULL.
100 * @radio_write: Write to a Radio register.
101 * Must not be NULL.
102 *
103 * @supports_hwpctl: Returns a boolean whether Hardware Power Control
104 * is supported or not.
105 * If NULL, hwpctl is assumed to be never supported.
106 * @software_rfkill: Turn the radio ON or OFF.
107 * Possible state values are
108 * RFKILL_STATE_SOFT_BLOCKED or
109 * RFKILL_STATE_UNBLOCKED
110 * Must not be NULL.
111 * @switch_analog: Turn the Analog on/off.
112 * Must not be NULL.
113 * @switch_channel: Switch the radio to another channel.
114 * Must not be NULL.
115 * @get_default_chan: Just returns the default channel number.
116 * Must not be NULL.
117 * @set_rx_antenna: Set the antenna used for RX.
118 * Can be NULL, if not supported.
119 * @interf_mitigation: Switch the Interference Mitigation mode.
120 * Can be NULL, if not supported.
121 *
122 * @recalc_txpower: Recalculate the transmission power parameters.
123 * This callback has to recalculate the TX power settings,
124 * but does not need to write them to the hardware, yet.
125 * Returns enum b43_txpwr_result to indicate whether the hardware
126 * needs to be adjusted.
127 * If B43_TXPWR_NEED_ADJUST is returned, @adjust_txpower
128 * will be called later.
129 * If the parameter "ignore_tssi" is true, the TSSI values should
130 * be ignored and a recalculation of the power settings should be
131 * done even if the TSSI values did not change.
132 * This callback is called with wl->irq_lock held and must not sleep.
133 * Must not be NULL.
134 * @adjust_txpower: Write the previously calculated TX power settings
135 * (from @recalc_txpower) to the hardware.
136 * This function may sleep.
137 * Can be NULL, if (and ONLY if) @recalc_txpower _always_
138 * returns B43_TXPWR_RES_DONE.
139 *
140 * @pwork_15sec: Periodic work. Called every 15 seconds.
141 * Can be NULL, if not required.
142 * @pwork_60sec: Periodic work. Called every 60 seconds.
143 * Can be NULL, if not required.
144 */
145struct b43_phy_operations {
146 /* Initialisation */
147 int (*allocate)(struct b43_wldev *dev);
148 void (*free)(struct b43_wldev *dev);
149 void (*prepare_structs)(struct b43_wldev *dev);
150 int (*prepare_hardware)(struct b43_wldev *dev);
151 int (*init)(struct b43_wldev *dev);
152 void (*exit)(struct b43_wldev *dev);
153
154 /* Register access */
155 u16 (*phy_read)(struct b43_wldev *dev, u16 reg);
156 void (*phy_write)(struct b43_wldev *dev, u16 reg, u16 value);
157 u16 (*radio_read)(struct b43_wldev *dev, u16 reg);
158 void (*radio_write)(struct b43_wldev *dev, u16 reg, u16 value);
159
160 /* Radio */
161 bool (*supports_hwpctl)(struct b43_wldev *dev);
162 void (*software_rfkill)(struct b43_wldev *dev, enum rfkill_state state);
163 void (*switch_analog)(struct b43_wldev *dev, bool on);
164 int (*switch_channel)(struct b43_wldev *dev, unsigned int new_channel);
165 unsigned int (*get_default_chan)(struct b43_wldev *dev);
166 void (*set_rx_antenna)(struct b43_wldev *dev, int antenna);
167 int (*interf_mitigation)(struct b43_wldev *dev,
168 enum b43_interference_mitigation new_mode);
169
170 /* Transmission power adjustment */
171 enum b43_txpwr_result (*recalc_txpower)(struct b43_wldev *dev,
172 bool ignore_tssi);
173 void (*adjust_txpower)(struct b43_wldev *dev);
174
175 /* Misc */
176 void (*pwork_15sec)(struct b43_wldev *dev);
177 void (*pwork_60sec)(struct b43_wldev *dev);
178};
179
180struct b43_phy_a;
181struct b43_phy_g;
182struct b43_phy_n;
183struct b43_phy_lp;
184
185struct b43_phy {
186 /* Hardware operation callbacks. */
187 const struct b43_phy_operations *ops;
188
189 /* Most hardware context information is stored in the standard-
190 * specific data structures pointed to by the pointers below.
191 * Only one of them is valid (the currently enabled PHY). */
192#ifdef CONFIG_B43_DEBUG
193 /* No union for debug build to force NULL derefs in buggy code. */
194 struct {
195#else
196 union {
197#endif
198 /* A-PHY specific information */
199 struct b43_phy_a *a;
200 /* G-PHY specific information */
201 struct b43_phy_g *g;
202 /* N-PHY specific information */
203 struct b43_phy_n *n;
204 /* LP-PHY specific information */
205 struct b43_phy_lp *lp;
206 };
207
208 /* Band support flags. */
209 bool supports_2ghz;
210 bool supports_5ghz;
211
212 /* GMODE bit enabled? */
213 bool gmode;
214
215 /* Analog Type */
216 u8 analog;
217 /* B43_PHYTYPE_ */
218 u8 type;
219 /* PHY revision number. */
220 u8 rev;
221
222 /* Radio versioning */
223 u16 radio_manuf; /* Radio manufacturer */
224 u16 radio_ver; /* Radio version */
225 u8 radio_rev; /* Radio revision */
226
227 /* Software state of the radio */
228 bool radio_on;
229
230 /* Desired TX power level (in dBm).
231 * This is set by the user and adjusted in b43_phy_xmitpower(). */
232 int desired_txpower;
233
234 /* Hardware Power Control enabled? */
235 bool hardware_power_control;
236
237 /* The time (in absolute jiffies) when the next TX power output
238 * check is needed. */
239 unsigned long next_txpwr_check_time;
240
241 /* current channel */
242 unsigned int channel;
243
244 /* PHY TX errors counter. */
245 atomic_t txerr_cnt;
246
247#ifdef CONFIG_B43_DEBUG
248 /* PHY registers locked by b43_phy_lock()? */
249 bool phy_locked;
250#endif /* B43_DEBUG */
251};
252
253
254/**
255 * b43_phy_allocate - Allocate PHY structs
256 * Allocate the PHY data structures, based on the current dev->phy.type
257 */
258int b43_phy_allocate(struct b43_wldev *dev);
259
260/**
261 * b43_phy_free - Free PHY structs
262 */
263void b43_phy_free(struct b43_wldev *dev);
264
265/**
266 * b43_phy_init - Initialise the PHY
267 */
268int b43_phy_init(struct b43_wldev *dev);
269
270/**
271 * b43_phy_exit - Cleanup PHY
272 */
273void b43_phy_exit(struct b43_wldev *dev);
274
275/**
276 * b43_has_hardware_pctl - Hardware Power Control supported?
277 * Returns a boolean, whether hardware power control is supported.
278 */
279bool b43_has_hardware_pctl(struct b43_wldev *dev);
280
281/**
282 * b43_phy_read - 16bit PHY register read access
283 */
284u16 b43_phy_read(struct b43_wldev *dev, u16 reg);
285
286/**
287 * b43_phy_write - 16bit PHY register write access
288 */
289void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value);
290
291/**
292 * b43_phy_mask - Mask a PHY register with a mask
293 */
294void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask);
295
296/**
297 * b43_phy_set - OR a PHY register with a bitmap
298 */
299void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set);
300
301/**
302 * b43_phy_maskset - Mask and OR a PHY register with a mask and bitmap
303 */
304void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
305
306/**
307 * b43_radio_read - 16bit Radio register read access
308 */
309u16 b43_radio_read(struct b43_wldev *dev, u16 reg);
310#define b43_radio_read16 b43_radio_read /* DEPRECATED */
311
312/**
313 * b43_radio_write - 16bit Radio register write access
314 */
315void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value);
316#define b43_radio_write16 b43_radio_write /* DEPRECATED */
317
318/**
319 * b43_radio_mask - Mask a 16bit radio register with a mask
320 */
321void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask);
322
323/**
324 * b43_radio_set - OR a 16bit radio register with a bitmap
325 */
326void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set);
327
328/**
329 * b43_radio_maskset - Mask and OR a radio register with a mask and bitmap
330 */
331void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set);
332
333/**
334 * b43_radio_lock - Lock firmware radio register access
335 */
336void b43_radio_lock(struct b43_wldev *dev);
337
338/**
339 * b43_radio_unlock - Unlock firmware radio register access
340 */
341void b43_radio_unlock(struct b43_wldev *dev);
342
343/**
344 * b43_phy_lock - Lock firmware PHY register access
345 */
346void b43_phy_lock(struct b43_wldev *dev);
347
348/**
349 * b43_phy_unlock - Unlock firmware PHY register access
350 */
351void b43_phy_unlock(struct b43_wldev *dev);
352
353/**
354 * b43_switch_channel - Switch to another channel
355 */
356int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel);
357/**
358 * B43_DEFAULT_CHANNEL - Switch to the default channel.
359 */
360#define B43_DEFAULT_CHANNEL UINT_MAX
361
362/**
363 * b43_software_rfkill - Turn the radio ON or OFF in software.
364 */
365void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state);
366
367/**
368 * b43_phy_txpower_check - Check TX power output.
369 *
370 * Compare the current TX power output to the desired power emission
371 * and schedule an adjustment in case it mismatches.
372 * Requires wl->irq_lock locked.
373 *
374 * @flags: OR'ed enum b43_phy_txpower_check_flags flags.
375 * See the docs below.
376 */
377void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags);
378/**
379 * enum b43_phy_txpower_check_flags - Flags for b43_phy_txpower_check()
380 *
381 * @B43_TXPWR_IGNORE_TIME: Ignore the schedule time and force-redo
382 * the check now.
383 * @B43_TXPWR_IGNORE_TSSI: Redo the recalculation, even if the average
384 * TSSI did not change.
385 */
386enum b43_phy_txpower_check_flags {
387 B43_TXPWR_IGNORE_TIME = (1 << 0),
388 B43_TXPWR_IGNORE_TSSI = (1 << 1),
389};
390
391struct work_struct;
392void b43_phy_txpower_adjust_work(struct work_struct *work);
393
394/**
395 * b43_phy_shm_tssi_read - Read the average of the last 4 TSSI from SHM.
396 *
397 * @shm_offset: The SHM address to read the values from.
398 *
399 * Returns the average of the 4 TSSI values, or a negative error code.
400 */
401int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
402
403/**
404 * b43_phy_switch_analog_generic - Generic PHY operation for switching the Analog.
405 *
406 * It does the switching based on the PHY0 core register.
407 * Do _not_ call this directly. Only use it as a switch_analog callback
408 * for struct b43_phy_operations.
409 */
410void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
411
412
413#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy_g.c
index 305d4cd6fd03..232181f6333c 100644
--- a/drivers/net/wireless/b43/phy.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1,10 +1,11 @@
1/* 1/*
2 2
3 Broadcom B43 wireless driver 3 Broadcom B43 wireless driver
4 IEEE 802.11g PHY driver
4 5
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, 6 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it> 7 Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it>
7 Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de> 8 Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
8 Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org> 9 Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org>
9 Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch> 10 Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10 11
@@ -25,38 +26,14 @@
25 26
26*/ 27*/
27 28
28#include <linux/delay.h>
29#include <linux/io.h>
30#include <linux/types.h>
31#include <linux/bitrev.h>
32
33#include "b43.h" 29#include "b43.h"
34#include "phy.h" 30#include "phy_g.h"
35#include "nphy.h" 31#include "phy_common.h"
36#include "main.h"
37#include "tables.h"
38#include "lo.h" 32#include "lo.h"
39#include "wa.h" 33#include "main.h"
40 34
41 35#include <linux/bitrev.h>
42static const s8 b43_tssi2dbm_b_table[] = { 36
43 0x4D, 0x4C, 0x4B, 0x4A,
44 0x4A, 0x49, 0x48, 0x47,
45 0x47, 0x46, 0x45, 0x45,
46 0x44, 0x43, 0x42, 0x42,
47 0x41, 0x40, 0x3F, 0x3E,
48 0x3D, 0x3C, 0x3B, 0x3A,
49 0x39, 0x38, 0x37, 0x36,
50 0x35, 0x34, 0x32, 0x31,
51 0x30, 0x2F, 0x2D, 0x2C,
52 0x2B, 0x29, 0x28, 0x26,
53 0x25, 0x23, 0x21, 0x1F,
54 0x1D, 0x1A, 0x17, 0x14,
55 0x10, 0x0C, 0x06, 0x00,
56 -7, -7, -7, -7,
57 -7, -7, -7, -7,
58 -7, -7, -7, -7,
59};
60 37
61static const s8 b43_tssi2dbm_g_table[] = { 38static const s8 b43_tssi2dbm_g_table[] = {
62 77, 77, 77, 76, 39 77, 77, 77, 76,
@@ -84,8 +61,20 @@ const u8 b43_radio_channel_codes_bg[] = {
84 72, 84, 61 72, 84,
85}; 62};
86 63
64
65static void b43_calc_nrssi_threshold(struct b43_wldev *dev);
66
67
87#define bitrev4(tmp) (bitrev8(tmp) >> 4) 68#define bitrev4(tmp) (bitrev8(tmp) >> 4)
88static void b43_phy_initg(struct b43_wldev *dev); 69
70
71/* Get the freq, as it has to be written to the device. */
72static inline u16 channel2freq_bg(u8 channel)
73{
74 B43_WARN_ON(!(channel >= 1 && channel <= 14));
75
76 return b43_radio_channel_codes_bg[channel - 1];
77}
89 78
90static void generate_rfatt_list(struct b43_wldev *dev, 79static void generate_rfatt_list(struct b43_wldev *dev,
91 struct b43_rfatt_list *list) 80 struct b43_rfatt_list *list)
@@ -130,7 +119,7 @@ static void generate_rfatt_list(struct b43_wldev *dev,
130 {.att = 9,.with_padmix = 1,}, 119 {.att = 9,.with_padmix = 1,},
131 }; 120 };
132 121
133 if (!b43_has_hardware_pctl(phy)) { 122 if (!b43_has_hardware_pctl(dev)) {
134 /* Software pctl */ 123 /* Software pctl */
135 list->list = rfatt_0; 124 list->list = rfatt_0;
136 list->len = ARRAY_SIZE(rfatt_0); 125 list->len = ARRAY_SIZE(rfatt_0);
@@ -174,140 +163,55 @@ static void generate_bbatt_list(struct b43_wldev *dev,
174 list->max_val = 8; 163 list->max_val = 8;
175} 164}
176 165
177bool b43_has_hardware_pctl(struct b43_phy *phy)
178{
179 if (!phy->hardware_power_control)
180 return 0;
181 switch (phy->type) {
182 case B43_PHYTYPE_A:
183 if (phy->rev >= 5)
184 return 1;
185 break;
186 case B43_PHYTYPE_G:
187 if (phy->rev >= 6)
188 return 1;
189 break;
190 default:
191 B43_WARN_ON(1);
192 }
193 return 0;
194}
195
196static void b43_shm_clear_tssi(struct b43_wldev *dev) 166static void b43_shm_clear_tssi(struct b43_wldev *dev)
197{ 167{
198 struct b43_phy *phy = &dev->phy; 168 b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F);
199 169 b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F);
200 switch (phy->type) { 170 b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F);
201 case B43_PHYTYPE_A: 171 b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F);
202 b43_shm_write16(dev, B43_SHM_SHARED, 0x0068, 0x7F7F);
203 b43_shm_write16(dev, B43_SHM_SHARED, 0x006a, 0x7F7F);
204 break;
205 case B43_PHYTYPE_B:
206 case B43_PHYTYPE_G:
207 b43_shm_write16(dev, B43_SHM_SHARED, 0x0058, 0x7F7F);
208 b43_shm_write16(dev, B43_SHM_SHARED, 0x005a, 0x7F7F);
209 b43_shm_write16(dev, B43_SHM_SHARED, 0x0070, 0x7F7F);
210 b43_shm_write16(dev, B43_SHM_SHARED, 0x0072, 0x7F7F);
211 break;
212 }
213}
214
215/* Lock the PHY registers against concurrent access from the microcode.
216 * This lock is nonrecursive. */
217void b43_phy_lock(struct b43_wldev *dev)
218{
219#if B43_DEBUG
220 B43_WARN_ON(dev->phy.phy_locked);
221 dev->phy.phy_locked = 1;
222#endif
223 B43_WARN_ON(dev->dev->id.revision < 3);
224
225 if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP))
226 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
227} 172}
228 173
229void b43_phy_unlock(struct b43_wldev *dev) 174/* Synthetic PU workaround */
175static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel)
230{ 176{
231#if B43_DEBUG 177 struct b43_phy *phy = &dev->phy;
232 B43_WARN_ON(!dev->phy.phy_locked);
233 dev->phy.phy_locked = 0;
234#endif
235 B43_WARN_ON(dev->dev->id.revision < 3);
236 178
237 if (!b43_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) 179 might_sleep();
238 b43_power_saving_ctl_bits(dev, 0);
239}
240 180
241/* Different PHYs require different register routing flags. 181 if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) {
242 * This adjusts (and does sanity checks on) the routing flags. 182 /* We do not need the workaround. */
243 */ 183 return;
244static inline u16 adjust_phyreg_for_phytype(struct b43_phy *phy,
245 u16 offset, struct b43_wldev *dev)
246{
247 if (phy->type == B43_PHYTYPE_A) {
248 /* OFDM registers are base-registers for the A-PHY. */
249 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
250 offset &= ~B43_PHYROUTE;
251 offset |= B43_PHYROUTE_BASE;
252 }
253 } 184 }
254 185
255#if B43_DEBUG 186 if (channel <= 10) {
256 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) { 187 b43_write16(dev, B43_MMIO_CHANNEL,
257 /* Ext-G registers are only available on G-PHYs */ 188 channel2freq_bg(channel + 4));
258 if (phy->type != B43_PHYTYPE_G) { 189 } else {
259 b43err(dev->wl, "Invalid EXT-G PHY access at " 190 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1));
260 "0x%04X on PHY type %u\n", offset, phy->type);
261 dump_stack();
262 }
263 }
264 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
265 /* N-BMODE registers are only available on N-PHYs */
266 if (phy->type != B43_PHYTYPE_N) {
267 b43err(dev->wl, "Invalid N-BMODE PHY access at "
268 "0x%04X on PHY type %u\n", offset, phy->type);
269 dump_stack();
270 }
271 } 191 }
272#endif /* B43_DEBUG */ 192 msleep(1);
273 193 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
274 return offset;
275}
276
277u16 b43_phy_read(struct b43_wldev * dev, u16 offset)
278{
279 struct b43_phy *phy = &dev->phy;
280
281 offset = adjust_phyreg_for_phytype(phy, offset, dev);
282 b43_write16(dev, B43_MMIO_PHY_CONTROL, offset);
283 return b43_read16(dev, B43_MMIO_PHY_DATA);
284} 194}
285 195
286void b43_phy_write(struct b43_wldev *dev, u16 offset, u16 val) 196/* Set the baseband attenuation value on chip. */
197void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev,
198 u16 baseband_attenuation)
287{ 199{
288 struct b43_phy *phy = &dev->phy; 200 struct b43_phy *phy = &dev->phy;
289 201
290 offset = adjust_phyreg_for_phytype(phy, offset, dev); 202 if (phy->analog == 0) {
291 b43_write16(dev, B43_MMIO_PHY_CONTROL, offset); 203 b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0)
292 b43_write16(dev, B43_MMIO_PHY_DATA, val); 204 & 0xFFF0) |
293} 205 baseband_attenuation);
294 206 } else if (phy->analog > 1) {
295void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask) 207 b43_phy_write(dev, B43_PHY_DACCTL,
296{ 208 (b43_phy_read(dev, B43_PHY_DACCTL)
297 b43_phy_write(dev, offset, 209 & 0xFFC3) | (baseband_attenuation << 2));
298 b43_phy_read(dev, offset) & mask); 210 } else {
299} 211 b43_phy_write(dev, B43_PHY_DACCTL,
300 212 (b43_phy_read(dev, B43_PHY_DACCTL)
301void b43_phy_set(struct b43_wldev *dev, u16 offset, u16 set) 213 & 0xFF87) | (baseband_attenuation << 3));
302{ 214 }
303 b43_phy_write(dev, offset,
304 b43_phy_read(dev, offset) | set);
305}
306
307void b43_phy_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
308{
309 b43_phy_write(dev, offset,
310 (b43_phy_read(dev, offset) & mask) | set);
311} 215}
312 216
313/* Adjust the transmission power output (G-PHY) */ 217/* Adjust the transmission power output (G-PHY) */
@@ -316,7 +220,8 @@ void b43_set_txpower_g(struct b43_wldev *dev,
316 const struct b43_rfatt *rfatt, u8 tx_control) 220 const struct b43_rfatt *rfatt, u8 tx_control)
317{ 221{
318 struct b43_phy *phy = &dev->phy; 222 struct b43_phy *phy = &dev->phy;
319 struct b43_txpower_lo_control *lo = phy->lo_control; 223 struct b43_phy_g *gphy = phy->g;
224 struct b43_txpower_lo_control *lo = gphy->lo_control;
320 u16 bb, rf; 225 u16 bb, rf;
321 u16 tx_bias, tx_magn; 226 u16 tx_bias, tx_magn;
322 227
@@ -327,11 +232,12 @@ void b43_set_txpower_g(struct b43_wldev *dev,
327 if (unlikely(tx_bias == 0xFF)) 232 if (unlikely(tx_bias == 0xFF))
328 tx_bias = 0; 233 tx_bias = 0;
329 234
330 /* Save the values for later */ 235 /* Save the values for later. Use memmove, because it's valid
331 phy->tx_control = tx_control; 236 * to pass &gphy->rfatt as rfatt pointer argument. Same for bbatt. */
332 memcpy(&phy->rfatt, rfatt, sizeof(*rfatt)); 237 gphy->tx_control = tx_control;
333 phy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX); 238 memmove(&gphy->rfatt, rfatt, sizeof(*rfatt));
334 memcpy(&phy->bbatt, bbatt, sizeof(*bbatt)); 239 gphy->rfatt.with_padmix = !!(tx_control & B43_TXCTL_TXMIX);
240 memmove(&gphy->bbatt, bbatt, sizeof(*bbatt));
335 241
336 if (b43_debug(dev, B43_DBG_XMITPOWER)) { 242 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
337 b43dbg(dev->wl, "Tuning TX-power to bbatt(%u), " 243 b43dbg(dev->wl, "Tuning TX-power to bbatt(%u), "
@@ -340,7 +246,7 @@ void b43_set_txpower_g(struct b43_wldev *dev,
340 bb, rf, tx_control, tx_bias, tx_magn); 246 bb, rf, tx_control, tx_bias, tx_magn);
341 } 247 }
342 248
343 b43_phy_set_baseband_attenuation(dev, bb); 249 b43_gphy_set_baseband_attenuation(dev, bb);
344 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RFATT, rf); 250 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RFATT, rf);
345 if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) { 251 if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) {
346 b43_radio_write16(dev, 0x43, 252 b43_radio_write16(dev, 0x43,
@@ -358,179 +264,23 @@ void b43_set_txpower_g(struct b43_wldev *dev,
358 b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52) 264 b43_radio_write16(dev, 0x52, (b43_radio_read16(dev, 0x52)
359 & 0xFFF0) | (tx_bias & 0x000F)); 265 & 0xFFF0) | (tx_bias & 0x000F));
360 } 266 }
361 if (phy->type == B43_PHYTYPE_G) 267 b43_lo_g_adjust(dev);
362 b43_lo_g_adjust(dev);
363}
364
365static void default_baseband_attenuation(struct b43_wldev *dev,
366 struct b43_bbatt *bb)
367{
368 struct b43_phy *phy = &dev->phy;
369
370 if (phy->radio_ver == 0x2050 && phy->radio_rev < 6)
371 bb->att = 0;
372 else
373 bb->att = 2;
374}
375
376static void default_radio_attenuation(struct b43_wldev *dev,
377 struct b43_rfatt *rf)
378{
379 struct ssb_bus *bus = dev->dev->bus;
380 struct b43_phy *phy = &dev->phy;
381
382 rf->with_padmix = 0;
383
384 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
385 bus->boardinfo.type == SSB_BOARD_BCM4309G) {
386 if (bus->boardinfo.rev < 0x43) {
387 rf->att = 2;
388 return;
389 } else if (bus->boardinfo.rev < 0x51) {
390 rf->att = 3;
391 return;
392 }
393 }
394
395 if (phy->type == B43_PHYTYPE_A) {
396 rf->att = 0x60;
397 return;
398 }
399
400 switch (phy->radio_ver) {
401 case 0x2053:
402 switch (phy->radio_rev) {
403 case 1:
404 rf->att = 6;
405 return;
406 }
407 break;
408 case 0x2050:
409 switch (phy->radio_rev) {
410 case 0:
411 rf->att = 5;
412 return;
413 case 1:
414 if (phy->type == B43_PHYTYPE_G) {
415 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
416 && bus->boardinfo.type == SSB_BOARD_BCM4309G
417 && bus->boardinfo.rev >= 30)
418 rf->att = 3;
419 else if (bus->boardinfo.vendor ==
420 SSB_BOARDVENDOR_BCM
421 && bus->boardinfo.type ==
422 SSB_BOARD_BU4306)
423 rf->att = 3;
424 else
425 rf->att = 1;
426 } else {
427 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
428 && bus->boardinfo.type == SSB_BOARD_BCM4309G
429 && bus->boardinfo.rev >= 30)
430 rf->att = 7;
431 else
432 rf->att = 6;
433 }
434 return;
435 case 2:
436 if (phy->type == B43_PHYTYPE_G) {
437 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
438 && bus->boardinfo.type == SSB_BOARD_BCM4309G
439 && bus->boardinfo.rev >= 30)
440 rf->att = 3;
441 else if (bus->boardinfo.vendor ==
442 SSB_BOARDVENDOR_BCM
443 && bus->boardinfo.type ==
444 SSB_BOARD_BU4306)
445 rf->att = 5;
446 else if (bus->chip_id == 0x4320)
447 rf->att = 4;
448 else
449 rf->att = 3;
450 } else
451 rf->att = 6;
452 return;
453 case 3:
454 rf->att = 5;
455 return;
456 case 4:
457 case 5:
458 rf->att = 1;
459 return;
460 case 6:
461 case 7:
462 rf->att = 5;
463 return;
464 case 8:
465 rf->att = 0xA;
466 rf->with_padmix = 1;
467 return;
468 case 9:
469 default:
470 rf->att = 5;
471 return;
472 }
473 }
474 rf->att = 5;
475}
476
477static u16 default_tx_control(struct b43_wldev *dev)
478{
479 struct b43_phy *phy = &dev->phy;
480
481 if (phy->radio_ver != 0x2050)
482 return 0;
483 if (phy->radio_rev == 1)
484 return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX;
485 if (phy->radio_rev < 6)
486 return B43_TXCTL_PA2DB;
487 if (phy->radio_rev == 8)
488 return B43_TXCTL_TXMIX;
489 return 0;
490}
491
492/* This func is called "PHY calibrate" in the specs... */
493void b43_phy_early_init(struct b43_wldev *dev)
494{
495 struct b43_phy *phy = &dev->phy;
496 struct b43_txpower_lo_control *lo = phy->lo_control;
497
498 default_baseband_attenuation(dev, &phy->bbatt);
499 default_radio_attenuation(dev, &phy->rfatt);
500 phy->tx_control = (default_tx_control(dev) << 4);
501
502 /* Commit previous writes */
503 b43_read32(dev, B43_MMIO_MACCTL);
504
505 if (phy->type == B43_PHYTYPE_B || phy->type == B43_PHYTYPE_G) {
506 generate_rfatt_list(dev, &lo->rfatt_list);
507 generate_bbatt_list(dev, &lo->bbatt_list);
508 }
509 if (phy->type == B43_PHYTYPE_G && phy->rev == 1) {
510 /* Workaround: Temporarly disable gmode through the early init
511 * phase, as the gmode stuff is not needed for phy rev 1 */
512 phy->gmode = 0;
513 b43_wireless_core_reset(dev, 0);
514 b43_phy_initg(dev);
515 phy->gmode = 1;
516 b43_wireless_core_reset(dev, B43_TMSLOW_GMODE);
517 }
518} 268}
519 269
520/* GPHY_TSSI_Power_Lookup_Table_Init */ 270/* GPHY_TSSI_Power_Lookup_Table_Init */
521static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev) 271static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev)
522{ 272{
523 struct b43_phy *phy = &dev->phy; 273 struct b43_phy_g *gphy = dev->phy.g;
524 int i; 274 int i;
525 u16 value; 275 u16 value;
526 276
527 for (i = 0; i < 32; i++) 277 for (i = 0; i < 32; i++)
528 b43_ofdmtab_write16(dev, 0x3C20, i, phy->tssi2dbm[i]); 278 b43_ofdmtab_write16(dev, 0x3C20, i, gphy->tssi2dbm[i]);
529 for (i = 32; i < 64; i++) 279 for (i = 32; i < 64; i++)
530 b43_ofdmtab_write16(dev, 0x3C00, i - 32, phy->tssi2dbm[i]); 280 b43_ofdmtab_write16(dev, 0x3C00, i - 32, gphy->tssi2dbm[i]);
531 for (i = 0; i < 64; i += 2) { 281 for (i = 0; i < 64; i += 2) {
532 value = (u16) phy->tssi2dbm[i]; 282 value = (u16) gphy->tssi2dbm[i];
533 value |= ((u16) phy->tssi2dbm[i + 1]) << 8; 283 value |= ((u16) gphy->tssi2dbm[i + 1]) << 8;
534 b43_phy_write(dev, 0x380 + (i / 2), value); 284 b43_phy_write(dev, 0x380 + (i / 2), value);
535 } 285 }
536} 286}
@@ -539,7 +289,8 @@ static void b43_gphy_tssi_power_lt_init(struct b43_wldev *dev)
539static void b43_gphy_gain_lt_init(struct b43_wldev *dev) 289static void b43_gphy_gain_lt_init(struct b43_wldev *dev)
540{ 290{
541 struct b43_phy *phy = &dev->phy; 291 struct b43_phy *phy = &dev->phy;
542 struct b43_txpower_lo_control *lo = phy->lo_control; 292 struct b43_phy_g *gphy = phy->g;
293 struct b43_txpower_lo_control *lo = gphy->lo_control;
543 u16 nr_written = 0; 294 u16 nr_written = 0;
544 u16 tmp; 295 u16 tmp;
545 u8 rf, bb; 296 u8 rf, bb;
@@ -561,1509 +312,6 @@ static void b43_gphy_gain_lt_init(struct b43_wldev *dev)
561 } 312 }
562} 313}
563 314
564static void hardware_pctl_init_aphy(struct b43_wldev *dev)
565{
566 //TODO
567}
568
569static void hardware_pctl_init_gphy(struct b43_wldev *dev)
570{
571 struct b43_phy *phy = &dev->phy;
572
573 b43_phy_write(dev, 0x0036, (b43_phy_read(dev, 0x0036) & 0xFFC0)
574 | (phy->tgt_idle_tssi - phy->cur_idle_tssi));
575 b43_phy_write(dev, 0x0478, (b43_phy_read(dev, 0x0478) & 0xFF00)
576 | (phy->tgt_idle_tssi - phy->cur_idle_tssi));
577 b43_gphy_tssi_power_lt_init(dev);
578 b43_gphy_gain_lt_init(dev);
579 b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) & 0xFFBF);
580 b43_phy_write(dev, 0x0014, 0x0000);
581
582 B43_WARN_ON(phy->rev < 6);
583 b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
584 | 0x0800);
585 b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
586 & 0xFEFF);
587 b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801)
588 & 0xFFBF);
589
590 b43_gphy_dc_lt_init(dev, 1);
591}
592
593/* HardwarePowerControl init for A and G PHY */
594static void b43_hardware_pctl_init(struct b43_wldev *dev)
595{
596 struct b43_phy *phy = &dev->phy;
597
598 if (!b43_has_hardware_pctl(phy)) {
599 /* No hardware power control */
600 b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL);
601 return;
602 }
603 /* Init the hwpctl related hardware */
604 switch (phy->type) {
605 case B43_PHYTYPE_A:
606 hardware_pctl_init_aphy(dev);
607 break;
608 case B43_PHYTYPE_G:
609 hardware_pctl_init_gphy(dev);
610 break;
611 default:
612 B43_WARN_ON(1);
613 }
614 /* Enable hardware pctl in firmware. */
615 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
616}
617
618static void b43_hardware_pctl_early_init(struct b43_wldev *dev)
619{
620 struct b43_phy *phy = &dev->phy;
621
622 if (!b43_has_hardware_pctl(phy)) {
623 b43_phy_write(dev, 0x047A, 0xC111);
624 return;
625 }
626
627 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) & 0xFEFF);
628 b43_phy_write(dev, 0x002F, 0x0202);
629 b43_phy_write(dev, 0x047C, b43_phy_read(dev, 0x047C) | 0x0002);
630 b43_phy_write(dev, 0x047A, b43_phy_read(dev, 0x047A) | 0xF000);
631 if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) {
632 b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
633 & 0xFF0F) | 0x0010);
634 b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
635 | 0x8000);
636 b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
637 & 0xFFC0) | 0x0010);
638 b43_phy_write(dev, 0x002E, 0xC07F);
639 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
640 | 0x0400);
641 } else {
642 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
643 | 0x0200);
644 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
645 | 0x0400);
646 b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
647 & 0x7FFF);
648 b43_phy_write(dev, 0x004F, b43_phy_read(dev, 0x004F)
649 & 0xFFFE);
650 b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
651 & 0xFFC0) | 0x0010);
652 b43_phy_write(dev, 0x002E, 0xC07F);
653 b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
654 & 0xFF0F) | 0x0010);
655 }
656}
657
658/* Intialize B/G PHY power control
659 * as described in http://bcm-specs.sipsolutions.net/InitPowerControl
660 */
661static void b43_phy_init_pctl(struct b43_wldev *dev)
662{
663 struct ssb_bus *bus = dev->dev->bus;
664 struct b43_phy *phy = &dev->phy;
665 struct b43_rfatt old_rfatt;
666 struct b43_bbatt old_bbatt;
667 u8 old_tx_control = 0;
668
669 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
670 (bus->boardinfo.type == SSB_BOARD_BU4306))
671 return;
672
673 b43_phy_write(dev, 0x0028, 0x8018);
674
675 /* This does something with the Analog... */
676 b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0)
677 & 0xFFDF);
678
679 if (phy->type == B43_PHYTYPE_G && !phy->gmode)
680 return;
681 b43_hardware_pctl_early_init(dev);
682 if (phy->cur_idle_tssi == 0) {
683 if (phy->radio_ver == 0x2050 && phy->analog == 0) {
684 b43_radio_write16(dev, 0x0076,
685 (b43_radio_read16(dev, 0x0076)
686 & 0x00F7) | 0x0084);
687 } else {
688 struct b43_rfatt rfatt;
689 struct b43_bbatt bbatt;
690
691 memcpy(&old_rfatt, &phy->rfatt, sizeof(old_rfatt));
692 memcpy(&old_bbatt, &phy->bbatt, sizeof(old_bbatt));
693 old_tx_control = phy->tx_control;
694
695 bbatt.att = 11;
696 if (phy->radio_rev == 8) {
697 rfatt.att = 15;
698 rfatt.with_padmix = 1;
699 } else {
700 rfatt.att = 9;
701 rfatt.with_padmix = 0;
702 }
703 b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
704 }
705 b43_dummy_transmission(dev);
706 phy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI);
707 if (B43_DEBUG) {
708 /* Current-Idle-TSSI sanity check. */
709 if (abs(phy->cur_idle_tssi - phy->tgt_idle_tssi) >= 20) {
710 b43dbg(dev->wl,
711 "!WARNING! Idle-TSSI phy->cur_idle_tssi "
712 "measuring failed. (cur=%d, tgt=%d). Disabling TX power "
713 "adjustment.\n", phy->cur_idle_tssi,
714 phy->tgt_idle_tssi);
715 phy->cur_idle_tssi = 0;
716 }
717 }
718 if (phy->radio_ver == 0x2050 && phy->analog == 0) {
719 b43_radio_write16(dev, 0x0076,
720 b43_radio_read16(dev, 0x0076)
721 & 0xFF7B);
722 } else {
723 b43_set_txpower_g(dev, &old_bbatt,
724 &old_rfatt, old_tx_control);
725 }
726 }
727 b43_hardware_pctl_init(dev);
728 b43_shm_clear_tssi(dev);
729}
730
731static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable)
732{
733 int i;
734
735 if (dev->phy.rev < 3) {
736 if (enable)
737 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
738 b43_ofdmtab_write16(dev,
739 B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8);
740 b43_ofdmtab_write16(dev,
741 B43_OFDMTAB_WRSSI, i, 0xFFF8);
742 }
743 else
744 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) {
745 b43_ofdmtab_write16(dev,
746 B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]);
747 b43_ofdmtab_write16(dev,
748 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]);
749 }
750 } else {
751 if (enable)
752 for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++)
753 b43_ofdmtab_write16(dev,
754 B43_OFDMTAB_WRSSI, i, 0x0820);
755 else
756 for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++)
757 b43_ofdmtab_write16(dev,
758 B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]);
759 }
760}
761
762static void b43_phy_ww(struct b43_wldev *dev)
763{
764 u16 b, curr_s, best_s = 0xFFFF;
765 int i;
766
767 b43_phy_write(dev, B43_PHY_CRS0,
768 b43_phy_read(dev, B43_PHY_CRS0) & ~B43_PHY_CRS0_EN);
769 b43_phy_write(dev, B43_PHY_OFDM(0x1B),
770 b43_phy_read(dev, B43_PHY_OFDM(0x1B)) | 0x1000);
771 b43_phy_write(dev, B43_PHY_OFDM(0x82),
772 (b43_phy_read(dev, B43_PHY_OFDM(0x82)) & 0xF0FF) | 0x0300);
773 b43_radio_write16(dev, 0x0009,
774 b43_radio_read16(dev, 0x0009) | 0x0080);
775 b43_radio_write16(dev, 0x0012,
776 (b43_radio_read16(dev, 0x0012) & 0xFFFC) | 0x0002);
777 b43_wa_initgains(dev);
778 b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5);
779 b = b43_phy_read(dev, B43_PHY_PWRDOWN);
780 b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005);
781 b43_radio_write16(dev, 0x0004,
782 b43_radio_read16(dev, 0x0004) | 0x0004);
783 for (i = 0x10; i <= 0x20; i++) {
784 b43_radio_write16(dev, 0x0013, i);
785 curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF;
786 if (!curr_s) {
787 best_s = 0x0000;
788 break;
789 } else if (curr_s >= 0x0080)
790 curr_s = 0x0100 - curr_s;
791 if (curr_s < best_s)
792 best_s = curr_s;
793 }
794 b43_phy_write(dev, B43_PHY_PWRDOWN, b);
795 b43_radio_write16(dev, 0x0004,
796 b43_radio_read16(dev, 0x0004) & 0xFFFB);
797 b43_radio_write16(dev, 0x0013, best_s);
798 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC);
799 b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80);
800 b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00);
801 b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0);
802 b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0);
803 b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF);
804 b43_phy_write(dev, B43_PHY_OFDM(0xBB),
805 (b43_phy_read(dev, B43_PHY_OFDM(0xBB)) & 0xF000) | 0x0053);
806 b43_phy_write(dev, B43_PHY_OFDM61,
807 (b43_phy_read(dev, B43_PHY_OFDM61) & 0xFE1F) | 0x0120);
808 b43_phy_write(dev, B43_PHY_OFDM(0x13),
809 (b43_phy_read(dev, B43_PHY_OFDM(0x13)) & 0x0FFF) | 0x3000);
810 b43_phy_write(dev, B43_PHY_OFDM(0x14),
811 (b43_phy_read(dev, B43_PHY_OFDM(0x14)) & 0x0FFF) | 0x3000);
812 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017);
813 for (i = 0; i < 6; i++)
814 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F);
815 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E);
816 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011);
817 b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013);
818 b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030);
819 b43_phy_write(dev, B43_PHY_CRS0,
820 b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
821}
822
823/* Initialize APHY. This is also called for the GPHY in some cases. */
824static void b43_phy_inita(struct b43_wldev *dev)
825{
826 struct ssb_bus *bus = dev->dev->bus;
827 struct b43_phy *phy = &dev->phy;
828
829 might_sleep();
830
831 if (phy->rev >= 6) {
832 if (phy->type == B43_PHYTYPE_A)
833 b43_phy_write(dev, B43_PHY_OFDM(0x1B),
834 b43_phy_read(dev, B43_PHY_OFDM(0x1B)) & ~0x1000);
835 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
836 b43_phy_write(dev, B43_PHY_ENCORE,
837 b43_phy_read(dev, B43_PHY_ENCORE) | 0x0010);
838 else
839 b43_phy_write(dev, B43_PHY_ENCORE,
840 b43_phy_read(dev, B43_PHY_ENCORE) & ~0x1010);
841 }
842
843 b43_wa_all(dev);
844
845 if (phy->type == B43_PHYTYPE_A) {
846 if (phy->gmode && (phy->rev < 3))
847 b43_phy_write(dev, 0x0034,
848 b43_phy_read(dev, 0x0034) | 0x0001);
849 b43_phy_rssiagc(dev, 0);
850
851 b43_phy_write(dev, B43_PHY_CRS0,
852 b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN);
853
854 b43_radio_init2060(dev);
855
856 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
857 ((bus->boardinfo.type == SSB_BOARD_BU4306) ||
858 (bus->boardinfo.type == SSB_BOARD_BU4309))) {
859 ; //TODO: A PHY LO
860 }
861
862 if (phy->rev >= 3)
863 b43_phy_ww(dev);
864
865 hardware_pctl_init_aphy(dev);
866
867 //TODO: radar detection
868 }
869
870 if ((phy->type == B43_PHYTYPE_G) &&
871 (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) {
872 b43_phy_write(dev, B43_PHY_OFDM(0x6E),
873 (b43_phy_read(dev, B43_PHY_OFDM(0x6E))
874 & 0xE000) | 0x3CF);
875 }
876}
877
878static void b43_phy_initb5(struct b43_wldev *dev)
879{
880 struct ssb_bus *bus = dev->dev->bus;
881 struct b43_phy *phy = &dev->phy;
882 u16 offset, value;
883 u8 old_channel;
884
885 if (phy->analog == 1) {
886 b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A)
887 | 0x0050);
888 }
889 if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) &&
890 (bus->boardinfo.type != SSB_BOARD_BU4306)) {
891 value = 0x2120;
892 for (offset = 0x00A8; offset < 0x00C7; offset++) {
893 b43_phy_write(dev, offset, value);
894 value += 0x202;
895 }
896 }
897 b43_phy_write(dev, 0x0035, (b43_phy_read(dev, 0x0035) & 0xF0FF)
898 | 0x0700);
899 if (phy->radio_ver == 0x2050)
900 b43_phy_write(dev, 0x0038, 0x0667);
901
902 if (phy->gmode || phy->rev >= 2) {
903 if (phy->radio_ver == 0x2050) {
904 b43_radio_write16(dev, 0x007A,
905 b43_radio_read16(dev, 0x007A)
906 | 0x0020);
907 b43_radio_write16(dev, 0x0051,
908 b43_radio_read16(dev, 0x0051)
909 | 0x0004);
910 }
911 b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000);
912
913 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
914 b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
915
916 b43_phy_write(dev, 0x001C, 0x186A);
917
918 b43_phy_write(dev, 0x0013,
919 (b43_phy_read(dev, 0x0013) & 0x00FF) | 0x1900);
920 b43_phy_write(dev, 0x0035,
921 (b43_phy_read(dev, 0x0035) & 0xFFC0) | 0x0064);
922 b43_phy_write(dev, 0x005D,
923 (b43_phy_read(dev, 0x005D) & 0xFF80) | 0x000A);
924 }
925
926 if (dev->bad_frames_preempt) {
927 b43_phy_write(dev, B43_PHY_RADIO_BITFIELD,
928 b43_phy_read(dev,
929 B43_PHY_RADIO_BITFIELD) | (1 << 11));
930 }
931
932 if (phy->analog == 1) {
933 b43_phy_write(dev, 0x0026, 0xCE00);
934 b43_phy_write(dev, 0x0021, 0x3763);
935 b43_phy_write(dev, 0x0022, 0x1BC3);
936 b43_phy_write(dev, 0x0023, 0x06F9);
937 b43_phy_write(dev, 0x0024, 0x037E);
938 } else
939 b43_phy_write(dev, 0x0026, 0xCC00);
940 b43_phy_write(dev, 0x0030, 0x00C6);
941 b43_write16(dev, 0x03EC, 0x3F22);
942
943 if (phy->analog == 1)
944 b43_phy_write(dev, 0x0020, 0x3E1C);
945 else
946 b43_phy_write(dev, 0x0020, 0x301C);
947
948 if (phy->analog == 0)
949 b43_write16(dev, 0x03E4, 0x3000);
950
951 old_channel = phy->channel;
952 /* Force to channel 7, even if not supported. */
953 b43_radio_selectchannel(dev, 7, 0);
954
955 if (phy->radio_ver != 0x2050) {
956 b43_radio_write16(dev, 0x0075, 0x0080);
957 b43_radio_write16(dev, 0x0079, 0x0081);
958 }
959
960 b43_radio_write16(dev, 0x0050, 0x0020);
961 b43_radio_write16(dev, 0x0050, 0x0023);
962
963 if (phy->radio_ver == 0x2050) {
964 b43_radio_write16(dev, 0x0050, 0x0020);
965 b43_radio_write16(dev, 0x005A, 0x0070);
966 }
967
968 b43_radio_write16(dev, 0x005B, 0x007B);
969 b43_radio_write16(dev, 0x005C, 0x00B0);
970
971 b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0007);
972
973 b43_radio_selectchannel(dev, old_channel, 0);
974
975 b43_phy_write(dev, 0x0014, 0x0080);
976 b43_phy_write(dev, 0x0032, 0x00CA);
977 b43_phy_write(dev, 0x002A, 0x88A3);
978
979 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
980
981 if (phy->radio_ver == 0x2050)
982 b43_radio_write16(dev, 0x005D, 0x000D);
983
984 b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
985}
986
987static void b43_phy_initb6(struct b43_wldev *dev)
988{
989 struct b43_phy *phy = &dev->phy;
990 u16 offset, val;
991 u8 old_channel;
992
993 b43_phy_write(dev, 0x003E, 0x817A);
994 b43_radio_write16(dev, 0x007A,
995 (b43_radio_read16(dev, 0x007A) | 0x0058));
996 if (phy->radio_rev == 4 || phy->radio_rev == 5) {
997 b43_radio_write16(dev, 0x51, 0x37);
998 b43_radio_write16(dev, 0x52, 0x70);
999 b43_radio_write16(dev, 0x53, 0xB3);
1000 b43_radio_write16(dev, 0x54, 0x9B);
1001 b43_radio_write16(dev, 0x5A, 0x88);
1002 b43_radio_write16(dev, 0x5B, 0x88);
1003 b43_radio_write16(dev, 0x5D, 0x88);
1004 b43_radio_write16(dev, 0x5E, 0x88);
1005 b43_radio_write16(dev, 0x7D, 0x88);
1006 b43_hf_write(dev, b43_hf_read(dev)
1007 | B43_HF_TSSIRPSMW);
1008 }
1009 B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7); /* We had code for these revs here... */
1010 if (phy->radio_rev == 8) {
1011 b43_radio_write16(dev, 0x51, 0);
1012 b43_radio_write16(dev, 0x52, 0x40);
1013 b43_radio_write16(dev, 0x53, 0xB7);
1014 b43_radio_write16(dev, 0x54, 0x98);
1015 b43_radio_write16(dev, 0x5A, 0x88);
1016 b43_radio_write16(dev, 0x5B, 0x6B);
1017 b43_radio_write16(dev, 0x5C, 0x0F);
1018 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) {
1019 b43_radio_write16(dev, 0x5D, 0xFA);
1020 b43_radio_write16(dev, 0x5E, 0xD8);
1021 } else {
1022 b43_radio_write16(dev, 0x5D, 0xF5);
1023 b43_radio_write16(dev, 0x5E, 0xB8);
1024 }
1025 b43_radio_write16(dev, 0x0073, 0x0003);
1026 b43_radio_write16(dev, 0x007D, 0x00A8);
1027 b43_radio_write16(dev, 0x007C, 0x0001);
1028 b43_radio_write16(dev, 0x007E, 0x0008);
1029 }
1030 val = 0x1E1F;
1031 for (offset = 0x0088; offset < 0x0098; offset++) {
1032 b43_phy_write(dev, offset, val);
1033 val -= 0x0202;
1034 }
1035 val = 0x3E3F;
1036 for (offset = 0x0098; offset < 0x00A8; offset++) {
1037 b43_phy_write(dev, offset, val);
1038 val -= 0x0202;
1039 }
1040 val = 0x2120;
1041 for (offset = 0x00A8; offset < 0x00C8; offset++) {
1042 b43_phy_write(dev, offset, (val & 0x3F3F));
1043 val += 0x0202;
1044 }
1045 if (phy->type == B43_PHYTYPE_G) {
1046 b43_radio_write16(dev, 0x007A,
1047 b43_radio_read16(dev, 0x007A) | 0x0020);
1048 b43_radio_write16(dev, 0x0051,
1049 b43_radio_read16(dev, 0x0051) | 0x0004);
1050 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
1051 b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
1052 b43_phy_write(dev, 0x5B, 0);
1053 b43_phy_write(dev, 0x5C, 0);
1054 }
1055
1056 old_channel = phy->channel;
1057 if (old_channel >= 8)
1058 b43_radio_selectchannel(dev, 1, 0);
1059 else
1060 b43_radio_selectchannel(dev, 13, 0);
1061
1062 b43_radio_write16(dev, 0x0050, 0x0020);
1063 b43_radio_write16(dev, 0x0050, 0x0023);
1064 udelay(40);
1065 if (phy->radio_rev < 6 || phy->radio_rev == 8) {
1066 b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C)
1067 | 0x0002));
1068 b43_radio_write16(dev, 0x50, 0x20);
1069 }
1070 if (phy->radio_rev <= 2) {
1071 b43_radio_write16(dev, 0x7C, 0x20);
1072 b43_radio_write16(dev, 0x5A, 0x70);
1073 b43_radio_write16(dev, 0x5B, 0x7B);
1074 b43_radio_write16(dev, 0x5C, 0xB0);
1075 }
1076 b43_radio_write16(dev, 0x007A,
1077 (b43_radio_read16(dev, 0x007A) & 0x00F8) | 0x0007);
1078
1079 b43_radio_selectchannel(dev, old_channel, 0);
1080
1081 b43_phy_write(dev, 0x0014, 0x0200);
1082 if (phy->radio_rev >= 6)
1083 b43_phy_write(dev, 0x2A, 0x88C2);
1084 else
1085 b43_phy_write(dev, 0x2A, 0x8AC0);
1086 b43_phy_write(dev, 0x0038, 0x0668);
1087 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt, phy->tx_control);
1088 if (phy->radio_rev <= 5) {
1089 b43_phy_write(dev, 0x5D, (b43_phy_read(dev, 0x5D)
1090 & 0xFF80) | 0x0003);
1091 }
1092 if (phy->radio_rev <= 2)
1093 b43_radio_write16(dev, 0x005D, 0x000D);
1094
1095 if (phy->analog == 4) {
1096 b43_write16(dev, 0x3E4, 9);
1097 b43_phy_write(dev, 0x61, b43_phy_read(dev, 0x61)
1098 & 0x0FFF);
1099 } else {
1100 b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0)
1101 | 0x0004);
1102 }
1103 if (phy->type == B43_PHYTYPE_B)
1104 B43_WARN_ON(1);
1105 else if (phy->type == B43_PHYTYPE_G)
1106 b43_write16(dev, 0x03E6, 0x0);
1107}
1108
1109static void b43_calc_loopback_gain(struct b43_wldev *dev)
1110{
1111 struct b43_phy *phy = &dev->phy;
1112 u16 backup_phy[16] = { 0 };
1113 u16 backup_radio[3];
1114 u16 backup_bband;
1115 u16 i, j, loop_i_max;
1116 u16 trsw_rx;
1117 u16 loop1_outer_done, loop1_inner_done;
1118
1119 backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0);
1120 backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG);
1121 backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER);
1122 backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL);
1123 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1124 backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER);
1125 backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL);
1126 }
1127 backup_phy[6] = b43_phy_read(dev, B43_PHY_CCK(0x5A));
1128 backup_phy[7] = b43_phy_read(dev, B43_PHY_CCK(0x59));
1129 backup_phy[8] = b43_phy_read(dev, B43_PHY_CCK(0x58));
1130 backup_phy[9] = b43_phy_read(dev, B43_PHY_CCK(0x0A));
1131 backup_phy[10] = b43_phy_read(dev, B43_PHY_CCK(0x03));
1132 backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK);
1133 backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL);
1134 backup_phy[13] = b43_phy_read(dev, B43_PHY_CCK(0x2B));
1135 backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL);
1136 backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE);
1137 backup_bband = phy->bbatt.att;
1138 backup_radio[0] = b43_radio_read16(dev, 0x52);
1139 backup_radio[1] = b43_radio_read16(dev, 0x43);
1140 backup_radio[2] = b43_radio_read16(dev, 0x7A);
1141
1142 b43_phy_write(dev, B43_PHY_CRS0,
1143 b43_phy_read(dev, B43_PHY_CRS0) & 0x3FFF);
1144 b43_phy_write(dev, B43_PHY_CCKBBANDCFG,
1145 b43_phy_read(dev, B43_PHY_CCKBBANDCFG) | 0x8000);
1146 b43_phy_write(dev, B43_PHY_RFOVER,
1147 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0002);
1148 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1149 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFD);
1150 b43_phy_write(dev, B43_PHY_RFOVER,
1151 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0001);
1152 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1153 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFE);
1154 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1155 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1156 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0001);
1157 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1158 b43_phy_read(dev,
1159 B43_PHY_ANALOGOVERVAL) & 0xFFFE);
1160 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1161 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0002);
1162 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1163 b43_phy_read(dev,
1164 B43_PHY_ANALOGOVERVAL) & 0xFFFD);
1165 }
1166 b43_phy_write(dev, B43_PHY_RFOVER,
1167 b43_phy_read(dev, B43_PHY_RFOVER) | 0x000C);
1168 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1169 b43_phy_read(dev, B43_PHY_RFOVERVAL) | 0x000C);
1170 b43_phy_write(dev, B43_PHY_RFOVER,
1171 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0030);
1172 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1173 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1174 & 0xFFCF) | 0x10);
1175
1176 b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0780);
1177 b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810);
1178 b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D);
1179
1180 b43_phy_write(dev, B43_PHY_CCK(0x0A),
1181 b43_phy_read(dev, B43_PHY_CCK(0x0A)) | 0x2000);
1182 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1183 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1184 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0004);
1185 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1186 b43_phy_read(dev,
1187 B43_PHY_ANALOGOVERVAL) & 0xFFFB);
1188 }
1189 b43_phy_write(dev, B43_PHY_CCK(0x03),
1190 (b43_phy_read(dev, B43_PHY_CCK(0x03))
1191 & 0xFF9F) | 0x40);
1192
1193 if (phy->radio_rev == 8) {
1194 b43_radio_write16(dev, 0x43, 0x000F);
1195 } else {
1196 b43_radio_write16(dev, 0x52, 0);
1197 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43)
1198 & 0xFFF0) | 0x9);
1199 }
1200 b43_phy_set_baseband_attenuation(dev, 11);
1201
1202 if (phy->rev >= 3)
1203 b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020);
1204 else
1205 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020);
1206 b43_phy_write(dev, B43_PHY_LO_CTL, 0);
1207
1208 b43_phy_write(dev, B43_PHY_CCK(0x2B),
1209 (b43_phy_read(dev, B43_PHY_CCK(0x2B))
1210 & 0xFFC0) | 0x01);
1211 b43_phy_write(dev, B43_PHY_CCK(0x2B),
1212 (b43_phy_read(dev, B43_PHY_CCK(0x2B))
1213 & 0xC0FF) | 0x800);
1214
1215 b43_phy_write(dev, B43_PHY_RFOVER,
1216 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0100);
1217 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1218 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xCFFF);
1219
1220 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) {
1221 if (phy->rev >= 7) {
1222 b43_phy_write(dev, B43_PHY_RFOVER,
1223 b43_phy_read(dev, B43_PHY_RFOVER)
1224 | 0x0800);
1225 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1226 b43_phy_read(dev, B43_PHY_RFOVERVAL)
1227 | 0x8000);
1228 }
1229 }
1230 b43_radio_write16(dev, 0x7A, b43_radio_read16(dev, 0x7A)
1231 & 0x00F7);
1232
1233 j = 0;
1234 loop_i_max = (phy->radio_rev == 8) ? 15 : 9;
1235 for (i = 0; i < loop_i_max; i++) {
1236 for (j = 0; j < 16; j++) {
1237 b43_radio_write16(dev, 0x43, i);
1238 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1239 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1240 & 0xF0FF) | (j << 8));
1241 b43_phy_write(dev, B43_PHY_PGACTL,
1242 (b43_phy_read(dev, B43_PHY_PGACTL)
1243 & 0x0FFF) | 0xA000);
1244 b43_phy_write(dev, B43_PHY_PGACTL,
1245 b43_phy_read(dev, B43_PHY_PGACTL)
1246 | 0xF000);
1247 udelay(20);
1248 if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
1249 goto exit_loop1;
1250 }
1251 }
1252 exit_loop1:
1253 loop1_outer_done = i;
1254 loop1_inner_done = j;
1255 if (j >= 8) {
1256 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1257 b43_phy_read(dev, B43_PHY_RFOVERVAL)
1258 | 0x30);
1259 trsw_rx = 0x1B;
1260 for (j = j - 8; j < 16; j++) {
1261 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1262 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1263 & 0xF0FF) | (j << 8));
1264 b43_phy_write(dev, B43_PHY_PGACTL,
1265 (b43_phy_read(dev, B43_PHY_PGACTL)
1266 & 0x0FFF) | 0xA000);
1267 b43_phy_write(dev, B43_PHY_PGACTL,
1268 b43_phy_read(dev, B43_PHY_PGACTL)
1269 | 0xF000);
1270 udelay(20);
1271 trsw_rx -= 3;
1272 if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
1273 goto exit_loop2;
1274 }
1275 } else
1276 trsw_rx = 0x18;
1277 exit_loop2:
1278
1279 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1280 b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]);
1281 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]);
1282 }
1283 b43_phy_write(dev, B43_PHY_CCK(0x5A), backup_phy[6]);
1284 b43_phy_write(dev, B43_PHY_CCK(0x59), backup_phy[7]);
1285 b43_phy_write(dev, B43_PHY_CCK(0x58), backup_phy[8]);
1286 b43_phy_write(dev, B43_PHY_CCK(0x0A), backup_phy[9]);
1287 b43_phy_write(dev, B43_PHY_CCK(0x03), backup_phy[10]);
1288 b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]);
1289 b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]);
1290 b43_phy_write(dev, B43_PHY_CCK(0x2B), backup_phy[13]);
1291 b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]);
1292
1293 b43_phy_set_baseband_attenuation(dev, backup_bband);
1294
1295 b43_radio_write16(dev, 0x52, backup_radio[0]);
1296 b43_radio_write16(dev, 0x43, backup_radio[1]);
1297 b43_radio_write16(dev, 0x7A, backup_radio[2]);
1298
1299 b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003);
1300 udelay(10);
1301 b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]);
1302 b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]);
1303 b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]);
1304 b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]);
1305
1306 phy->max_lb_gain =
1307 ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11;
1308 phy->trsw_rx_gain = trsw_rx * 2;
1309}
1310
1311static void b43_phy_initg(struct b43_wldev *dev)
1312{
1313 struct b43_phy *phy = &dev->phy;
1314 u16 tmp;
1315
1316 if (phy->rev == 1)
1317 b43_phy_initb5(dev);
1318 else
1319 b43_phy_initb6(dev);
1320
1321 if (phy->rev >= 2 || phy->gmode)
1322 b43_phy_inita(dev);
1323
1324 if (phy->rev >= 2) {
1325 b43_phy_write(dev, B43_PHY_ANALOGOVER, 0);
1326 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0);
1327 }
1328 if (phy->rev == 2) {
1329 b43_phy_write(dev, B43_PHY_RFOVER, 0);
1330 b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
1331 }
1332 if (phy->rev > 5) {
1333 b43_phy_write(dev, B43_PHY_RFOVER, 0x400);
1334 b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
1335 }
1336 if (phy->gmode || phy->rev >= 2) {
1337 tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM);
1338 tmp &= B43_PHYVER_VERSION;
1339 if (tmp == 3 || tmp == 5) {
1340 b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816);
1341 b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006);
1342 }
1343 if (tmp == 5) {
1344 b43_phy_write(dev, B43_PHY_OFDM(0xCC),
1345 (b43_phy_read(dev, B43_PHY_OFDM(0xCC))
1346 & 0x00FF) | 0x1F00);
1347 }
1348 }
1349 if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2)
1350 b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78);
1351 if (phy->radio_rev == 8) {
1352 b43_phy_write(dev, B43_PHY_EXTG(0x01),
1353 b43_phy_read(dev, B43_PHY_EXTG(0x01))
1354 | 0x80);
1355 b43_phy_write(dev, B43_PHY_OFDM(0x3E),
1356 b43_phy_read(dev, B43_PHY_OFDM(0x3E))
1357 | 0x4);
1358 }
1359 if (has_loopback_gain(phy))
1360 b43_calc_loopback_gain(dev);
1361
1362 if (phy->radio_rev != 8) {
1363 if (phy->initval == 0xFFFF)
1364 phy->initval = b43_radio_init2050(dev);
1365 else
1366 b43_radio_write16(dev, 0x0078, phy->initval);
1367 }
1368 b43_lo_g_init(dev);
1369 if (has_tx_magnification(phy)) {
1370 b43_radio_write16(dev, 0x52,
1371 (b43_radio_read16(dev, 0x52) & 0xFF00)
1372 | phy->lo_control->tx_bias | phy->
1373 lo_control->tx_magn);
1374 } else {
1375 b43_radio_write16(dev, 0x52,
1376 (b43_radio_read16(dev, 0x52) & 0xFFF0)
1377 | phy->lo_control->tx_bias);
1378 }
1379 if (phy->rev >= 6) {
1380 b43_phy_write(dev, B43_PHY_CCK(0x36),
1381 (b43_phy_read(dev, B43_PHY_CCK(0x36))
1382 & 0x0FFF) | (phy->lo_control->
1383 tx_bias << 12));
1384 }
1385 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
1386 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
1387 else
1388 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
1389 if (phy->rev < 2)
1390 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
1391 else
1392 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
1393 if (phy->gmode || phy->rev >= 2) {
1394 b43_lo_g_adjust(dev);
1395 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
1396 }
1397
1398 if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
1399 /* The specs state to update the NRSSI LT with
1400 * the value 0x7FFFFFFF here. I think that is some weird
1401 * compiler optimization in the original driver.
1402 * Essentially, what we do here is resetting all NRSSI LT
1403 * entries to -32 (see the clamp_val() in nrssi_hw_update())
1404 */
1405 b43_nrssi_hw_update(dev, 0xFFFF); //FIXME?
1406 b43_calc_nrssi_threshold(dev);
1407 } else if (phy->gmode || phy->rev >= 2) {
1408 if (phy->nrssi[0] == -1000) {
1409 B43_WARN_ON(phy->nrssi[1] != -1000);
1410 b43_calc_nrssi_slope(dev);
1411 } else
1412 b43_calc_nrssi_threshold(dev);
1413 }
1414 if (phy->radio_rev == 8)
1415 b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230);
1416 b43_phy_init_pctl(dev);
1417 /* FIXME: The spec says in the following if, the 0 should be replaced
1418 'if OFDM may not be used in the current locale'
1419 but OFDM is legal everywhere */
1420 if ((dev->dev->bus->chip_id == 0x4306
1421 && dev->dev->bus->chip_package == 2) || 0) {
1422 b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0)
1423 & 0xBFFF);
1424 b43_phy_write(dev, B43_PHY_OFDM(0xC3),
1425 b43_phy_read(dev, B43_PHY_OFDM(0xC3))
1426 & 0x7FFF);
1427 }
1428}
1429
1430/* Set the baseband attenuation value on chip. */
1431void b43_phy_set_baseband_attenuation(struct b43_wldev *dev,
1432 u16 baseband_attenuation)
1433{
1434 struct b43_phy *phy = &dev->phy;
1435
1436 if (phy->analog == 0) {
1437 b43_write16(dev, B43_MMIO_PHY0, (b43_read16(dev, B43_MMIO_PHY0)
1438 & 0xFFF0) |
1439 baseband_attenuation);
1440 } else if (phy->analog > 1) {
1441 b43_phy_write(dev, B43_PHY_DACCTL,
1442 (b43_phy_read(dev, B43_PHY_DACCTL)
1443 & 0xFFC3) | (baseband_attenuation << 2));
1444 } else {
1445 b43_phy_write(dev, B43_PHY_DACCTL,
1446 (b43_phy_read(dev, B43_PHY_DACCTL)
1447 & 0xFF87) | (baseband_attenuation << 3));
1448 }
1449}
1450
1451/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
1452 * This function converts a TSSI value to dBm in Q5.2
1453 */
1454static s8 b43_phy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
1455{
1456 struct b43_phy *phy = &dev->phy;
1457 s8 dbm = 0;
1458 s32 tmp;
1459
1460 tmp = (phy->tgt_idle_tssi - phy->cur_idle_tssi + tssi);
1461
1462 switch (phy->type) {
1463 case B43_PHYTYPE_A:
1464 tmp += 0x80;
1465 tmp = clamp_val(tmp, 0x00, 0xFF);
1466 dbm = phy->tssi2dbm[tmp];
1467 //TODO: There's a FIXME on the specs
1468 break;
1469 case B43_PHYTYPE_B:
1470 case B43_PHYTYPE_G:
1471 tmp = clamp_val(tmp, 0x00, 0x3F);
1472 dbm = phy->tssi2dbm[tmp];
1473 break;
1474 default:
1475 B43_WARN_ON(1);
1476 }
1477
1478 return dbm;
1479}
1480
1481void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
1482 int *_bbatt, int *_rfatt)
1483{
1484 int rfatt = *_rfatt;
1485 int bbatt = *_bbatt;
1486 struct b43_txpower_lo_control *lo = dev->phy.lo_control;
1487
1488 /* Get baseband and radio attenuation values into their permitted ranges.
1489 * Radio attenuation affects power level 4 times as much as baseband. */
1490
1491 /* Range constants */
1492 const int rf_min = lo->rfatt_list.min_val;
1493 const int rf_max = lo->rfatt_list.max_val;
1494 const int bb_min = lo->bbatt_list.min_val;
1495 const int bb_max = lo->bbatt_list.max_val;
1496
1497 while (1) {
1498 if (rfatt > rf_max && bbatt > bb_max - 4)
1499 break; /* Can not get it into ranges */
1500 if (rfatt < rf_min && bbatt < bb_min + 4)
1501 break; /* Can not get it into ranges */
1502 if (bbatt > bb_max && rfatt > rf_max - 1)
1503 break; /* Can not get it into ranges */
1504 if (bbatt < bb_min && rfatt < rf_min + 1)
1505 break; /* Can not get it into ranges */
1506
1507 if (bbatt > bb_max) {
1508 bbatt -= 4;
1509 rfatt += 1;
1510 continue;
1511 }
1512 if (bbatt < bb_min) {
1513 bbatt += 4;
1514 rfatt -= 1;
1515 continue;
1516 }
1517 if (rfatt > rf_max) {
1518 rfatt -= 1;
1519 bbatt += 4;
1520 continue;
1521 }
1522 if (rfatt < rf_min) {
1523 rfatt += 1;
1524 bbatt -= 4;
1525 continue;
1526 }
1527 break;
1528 }
1529
1530 *_rfatt = clamp_val(rfatt, rf_min, rf_max);
1531 *_bbatt = clamp_val(bbatt, bb_min, bb_max);
1532}
1533
1534/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
1535void b43_phy_xmitpower(struct b43_wldev *dev)
1536{
1537 struct ssb_bus *bus = dev->dev->bus;
1538 struct b43_phy *phy = &dev->phy;
1539
1540 if (phy->cur_idle_tssi == 0)
1541 return;
1542 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
1543 (bus->boardinfo.type == SSB_BOARD_BU4306))
1544 return;
1545#ifdef CONFIG_B43_DEBUG
1546 if (phy->manual_txpower_control)
1547 return;
1548#endif
1549
1550 switch (phy->type) {
1551 case B43_PHYTYPE_A:{
1552
1553 //TODO: Nothing for A PHYs yet :-/
1554
1555 break;
1556 }
1557 case B43_PHYTYPE_B:
1558 case B43_PHYTYPE_G:{
1559 u16 tmp;
1560 s8 v0, v1, v2, v3;
1561 s8 average;
1562 int max_pwr;
1563 int desired_pwr, estimated_pwr, pwr_adjust;
1564 int rfatt_delta, bbatt_delta;
1565 int rfatt, bbatt;
1566 u8 tx_control;
1567
1568 tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x0058);
1569 v0 = (s8) (tmp & 0x00FF);
1570 v1 = (s8) ((tmp & 0xFF00) >> 8);
1571 tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x005A);
1572 v2 = (s8) (tmp & 0x00FF);
1573 v3 = (s8) ((tmp & 0xFF00) >> 8);
1574 tmp = 0;
1575
1576 if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F
1577 || v3 == 0x7F) {
1578 tmp =
1579 b43_shm_read16(dev, B43_SHM_SHARED, 0x0070);
1580 v0 = (s8) (tmp & 0x00FF);
1581 v1 = (s8) ((tmp & 0xFF00) >> 8);
1582 tmp =
1583 b43_shm_read16(dev, B43_SHM_SHARED, 0x0072);
1584 v2 = (s8) (tmp & 0x00FF);
1585 v3 = (s8) ((tmp & 0xFF00) >> 8);
1586 if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F
1587 || v3 == 0x7F)
1588 return;
1589 v0 = (v0 + 0x20) & 0x3F;
1590 v1 = (v1 + 0x20) & 0x3F;
1591 v2 = (v2 + 0x20) & 0x3F;
1592 v3 = (v3 + 0x20) & 0x3F;
1593 tmp = 1;
1594 }
1595 b43_shm_clear_tssi(dev);
1596
1597 average = (v0 + v1 + v2 + v3 + 2) / 4;
1598
1599 if (tmp
1600 && (b43_shm_read16(dev, B43_SHM_SHARED, 0x005E) &
1601 0x8))
1602 average -= 13;
1603
1604 estimated_pwr =
1605 b43_phy_estimate_power_out(dev, average);
1606
1607 max_pwr = dev->dev->bus->sprom.maxpwr_bg;
1608 if ((dev->dev->bus->sprom.boardflags_lo
1609 & B43_BFL_PACTRL) && (phy->type == B43_PHYTYPE_G))
1610 max_pwr -= 0x3;
1611 if (unlikely(max_pwr <= 0)) {
1612 b43warn(dev->wl,
1613 "Invalid max-TX-power value in SPROM.\n");
1614 max_pwr = 60; /* fake it */
1615 dev->dev->bus->sprom.maxpwr_bg = max_pwr;
1616 }
1617
1618 /*TODO:
1619 max_pwr = min(REG - dev->dev->bus->sprom.antennagain_bgphy - 0x6, max_pwr)
1620 where REG is the max power as per the regulatory domain
1621 */
1622
1623 /* Get desired power (in Q5.2) */
1624 desired_pwr = INT_TO_Q52(phy->power_level);
1625 /* And limit it. max_pwr already is Q5.2 */
1626 desired_pwr = clamp_val(desired_pwr, 0, max_pwr);
1627 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
1628 b43dbg(dev->wl,
1629 "Current TX power output: " Q52_FMT
1630 " dBm, " "Desired TX power output: "
1631 Q52_FMT " dBm\n", Q52_ARG(estimated_pwr),
1632 Q52_ARG(desired_pwr));
1633 }
1634
1635 /* Calculate the adjustment delta. */
1636 pwr_adjust = desired_pwr - estimated_pwr;
1637
1638 /* RF attenuation delta. */
1639 rfatt_delta = ((pwr_adjust + 7) / 8);
1640 /* Lower attenuation => Bigger power output. Negate it. */
1641 rfatt_delta = -rfatt_delta;
1642
1643 /* Baseband attenuation delta. */
1644 bbatt_delta = pwr_adjust / 2;
1645 /* Lower attenuation => Bigger power output. Negate it. */
1646 bbatt_delta = -bbatt_delta;
1647 /* RF att affects power level 4 times as much as
1648 * Baseband attennuation. Subtract it. */
1649 bbatt_delta -= 4 * rfatt_delta;
1650
1651 /* So do we finally need to adjust something? */
1652 if ((rfatt_delta == 0) && (bbatt_delta == 0))
1653 return;
1654
1655 /* Calculate the new attenuation values. */
1656 bbatt = phy->bbatt.att;
1657 bbatt += bbatt_delta;
1658 rfatt = phy->rfatt.att;
1659 rfatt += rfatt_delta;
1660
1661 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
1662 tx_control = phy->tx_control;
1663 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) {
1664 if (rfatt <= 1) {
1665 if (tx_control == 0) {
1666 tx_control =
1667 B43_TXCTL_PA2DB |
1668 B43_TXCTL_TXMIX;
1669 rfatt += 2;
1670 bbatt += 2;
1671 } else if (dev->dev->bus->sprom.
1672 boardflags_lo &
1673 B43_BFL_PACTRL) {
1674 bbatt += 4 * (rfatt - 2);
1675 rfatt = 2;
1676 }
1677 } else if (rfatt > 4 && tx_control) {
1678 tx_control = 0;
1679 if (bbatt < 3) {
1680 rfatt -= 3;
1681 bbatt += 2;
1682 } else {
1683 rfatt -= 2;
1684 bbatt -= 2;
1685 }
1686 }
1687 }
1688 /* Save the control values */
1689 phy->tx_control = tx_control;
1690 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
1691 phy->rfatt.att = rfatt;
1692 phy->bbatt.att = bbatt;
1693
1694 /* Adjust the hardware */
1695 b43_phy_lock(dev);
1696 b43_radio_lock(dev);
1697 b43_set_txpower_g(dev, &phy->bbatt, &phy->rfatt,
1698 phy->tx_control);
1699 b43_radio_unlock(dev);
1700 b43_phy_unlock(dev);
1701 break;
1702 }
1703 case B43_PHYTYPE_N:
1704 b43_nphy_xmitpower(dev);
1705 break;
1706 default:
1707 B43_WARN_ON(1);
1708 }
1709}
1710
1711static inline s32 b43_tssi2dbm_ad(s32 num, s32 den)
1712{
1713 if (num < 0)
1714 return num / den;
1715 else
1716 return (num + den / 2) / den;
1717}
1718
1719static inline
1720 s8 b43_tssi2dbm_entry(s8 entry[], u8 index, s16 pab0, s16 pab1, s16 pab2)
1721{
1722 s32 m1, m2, f = 256, q, delta;
1723 s8 i = 0;
1724
1725 m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32);
1726 m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1);
1727 do {
1728 if (i > 15)
1729 return -EINVAL;
1730 q = b43_tssi2dbm_ad(f * 4096 -
1731 b43_tssi2dbm_ad(m2 * f, 16) * f, 2048);
1732 delta = abs(q - f);
1733 f = q;
1734 i++;
1735 } while (delta >= 2);
1736 entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128);
1737 return 0;
1738}
1739
1740/* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table */
1741int b43_phy_init_tssi2dbm_table(struct b43_wldev *dev)
1742{
1743 struct b43_phy *phy = &dev->phy;
1744 s16 pab0, pab1, pab2;
1745 u8 idx;
1746 s8 *dyn_tssi2dbm;
1747
1748 if (phy->type == B43_PHYTYPE_A) {
1749 pab0 = (s16) (dev->dev->bus->sprom.pa1b0);
1750 pab1 = (s16) (dev->dev->bus->sprom.pa1b1);
1751 pab2 = (s16) (dev->dev->bus->sprom.pa1b2);
1752 } else {
1753 pab0 = (s16) (dev->dev->bus->sprom.pa0b0);
1754 pab1 = (s16) (dev->dev->bus->sprom.pa0b1);
1755 pab2 = (s16) (dev->dev->bus->sprom.pa0b2);
1756 }
1757
1758 if ((dev->dev->bus->chip_id == 0x4301) && (phy->radio_ver != 0x2050)) {
1759 phy->tgt_idle_tssi = 0x34;
1760 phy->tssi2dbm = b43_tssi2dbm_b_table;
1761 return 0;
1762 }
1763
1764 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
1765 pab0 != -1 && pab1 != -1 && pab2 != -1) {
1766 /* The pabX values are set in SPROM. Use them. */
1767 if (phy->type == B43_PHYTYPE_A) {
1768 if ((s8) dev->dev->bus->sprom.itssi_a != 0 &&
1769 (s8) dev->dev->bus->sprom.itssi_a != -1)
1770 phy->tgt_idle_tssi =
1771 (s8) (dev->dev->bus->sprom.itssi_a);
1772 else
1773 phy->tgt_idle_tssi = 62;
1774 } else {
1775 if ((s8) dev->dev->bus->sprom.itssi_bg != 0 &&
1776 (s8) dev->dev->bus->sprom.itssi_bg != -1)
1777 phy->tgt_idle_tssi =
1778 (s8) (dev->dev->bus->sprom.itssi_bg);
1779 else
1780 phy->tgt_idle_tssi = 62;
1781 }
1782 dyn_tssi2dbm = kmalloc(64, GFP_KERNEL);
1783 if (dyn_tssi2dbm == NULL) {
1784 b43err(dev->wl, "Could not allocate memory "
1785 "for tssi2dbm table\n");
1786 return -ENOMEM;
1787 }
1788 for (idx = 0; idx < 64; idx++)
1789 if (b43_tssi2dbm_entry
1790 (dyn_tssi2dbm, idx, pab0, pab1, pab2)) {
1791 phy->tssi2dbm = NULL;
1792 b43err(dev->wl, "Could not generate "
1793 "tssi2dBm table\n");
1794 kfree(dyn_tssi2dbm);
1795 return -ENODEV;
1796 }
1797 phy->tssi2dbm = dyn_tssi2dbm;
1798 phy->dyn_tssi_tbl = 1;
1799 } else {
1800 /* pabX values not set in SPROM. */
1801 switch (phy->type) {
1802 case B43_PHYTYPE_A:
1803 /* APHY needs a generated table. */
1804 phy->tssi2dbm = NULL;
1805 b43err(dev->wl, "Could not generate tssi2dBm "
1806 "table (wrong SPROM info)!\n");
1807 return -ENODEV;
1808 case B43_PHYTYPE_B:
1809 phy->tgt_idle_tssi = 0x34;
1810 phy->tssi2dbm = b43_tssi2dbm_b_table;
1811 break;
1812 case B43_PHYTYPE_G:
1813 phy->tgt_idle_tssi = 0x34;
1814 phy->tssi2dbm = b43_tssi2dbm_g_table;
1815 break;
1816 }
1817 }
1818
1819 return 0;
1820}
1821
1822int b43_phy_init(struct b43_wldev *dev)
1823{
1824 struct b43_phy *phy = &dev->phy;
1825 bool unsupported = 0;
1826 int err = 0;
1827
1828 switch (phy->type) {
1829 case B43_PHYTYPE_A:
1830 if (phy->rev == 2 || phy->rev == 3)
1831 b43_phy_inita(dev);
1832 else
1833 unsupported = 1;
1834 break;
1835 case B43_PHYTYPE_G:
1836 b43_phy_initg(dev);
1837 break;
1838 case B43_PHYTYPE_N:
1839 err = b43_phy_initn(dev);
1840 break;
1841 default:
1842 unsupported = 1;
1843 }
1844 if (unsupported)
1845 b43err(dev->wl, "Unknown PHYTYPE found\n");
1846
1847 return err;
1848}
1849
1850void b43_set_rx_antenna(struct b43_wldev *dev, int antenna)
1851{
1852 struct b43_phy *phy = &dev->phy;
1853 u64 hf;
1854 u16 tmp;
1855 int autodiv = 0;
1856
1857 if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
1858 autodiv = 1;
1859
1860 hf = b43_hf_read(dev);
1861 hf &= ~B43_HF_ANTDIVHELP;
1862 b43_hf_write(dev, hf);
1863
1864 switch (phy->type) {
1865 case B43_PHYTYPE_A:
1866 case B43_PHYTYPE_G:
1867 tmp = b43_phy_read(dev, B43_PHY_BBANDCFG);
1868 tmp &= ~B43_PHY_BBANDCFG_RXANT;
1869 tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
1870 << B43_PHY_BBANDCFG_RXANT_SHIFT;
1871 b43_phy_write(dev, B43_PHY_BBANDCFG, tmp);
1872
1873 if (autodiv) {
1874 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
1875 if (antenna == B43_ANTENNA_AUTO0)
1876 tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
1877 else
1878 tmp |= B43_PHY_ANTDWELL_AUTODIV1;
1879 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
1880 }
1881 if (phy->type == B43_PHYTYPE_G) {
1882 tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT);
1883 if (autodiv)
1884 tmp |= B43_PHY_ANTWRSETT_ARXDIV;
1885 else
1886 tmp &= ~B43_PHY_ANTWRSETT_ARXDIV;
1887 b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp);
1888 if (phy->rev >= 2) {
1889 tmp = b43_phy_read(dev, B43_PHY_OFDM61);
1890 tmp |= B43_PHY_OFDM61_10;
1891 b43_phy_write(dev, B43_PHY_OFDM61, tmp);
1892
1893 tmp =
1894 b43_phy_read(dev, B43_PHY_DIVSRCHGAINBACK);
1895 tmp = (tmp & 0xFF00) | 0x15;
1896 b43_phy_write(dev, B43_PHY_DIVSRCHGAINBACK,
1897 tmp);
1898
1899 if (phy->rev == 2) {
1900 b43_phy_write(dev, B43_PHY_ADIVRELATED,
1901 8);
1902 } else {
1903 tmp =
1904 b43_phy_read(dev,
1905 B43_PHY_ADIVRELATED);
1906 tmp = (tmp & 0xFF00) | 8;
1907 b43_phy_write(dev, B43_PHY_ADIVRELATED,
1908 tmp);
1909 }
1910 }
1911 if (phy->rev >= 6)
1912 b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC);
1913 } else {
1914 if (phy->rev < 3) {
1915 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
1916 tmp = (tmp & 0xFF00) | 0x24;
1917 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
1918 } else {
1919 tmp = b43_phy_read(dev, B43_PHY_OFDM61);
1920 tmp |= 0x10;
1921 b43_phy_write(dev, B43_PHY_OFDM61, tmp);
1922 if (phy->analog == 3) {
1923 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
1924 0x1D);
1925 b43_phy_write(dev, B43_PHY_ADIVRELATED,
1926 8);
1927 } else {
1928 b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT,
1929 0x3A);
1930 tmp =
1931 b43_phy_read(dev,
1932 B43_PHY_ADIVRELATED);
1933 tmp = (tmp & 0xFF00) | 8;
1934 b43_phy_write(dev, B43_PHY_ADIVRELATED,
1935 tmp);
1936 }
1937 }
1938 }
1939 break;
1940 case B43_PHYTYPE_B:
1941 tmp = b43_phy_read(dev, B43_PHY_CCKBBANDCFG);
1942 tmp &= ~B43_PHY_BBANDCFG_RXANT;
1943 tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
1944 << B43_PHY_BBANDCFG_RXANT_SHIFT;
1945 b43_phy_write(dev, B43_PHY_CCKBBANDCFG, tmp);
1946 break;
1947 case B43_PHYTYPE_N:
1948 b43_nphy_set_rxantenna(dev, antenna);
1949 break;
1950 default:
1951 B43_WARN_ON(1);
1952 }
1953
1954 hf |= B43_HF_ANTDIVHELP;
1955 b43_hf_write(dev, hf);
1956}
1957
1958/* Get the freq, as it has to be written to the device. */
1959static inline u16 channel2freq_bg(u8 channel)
1960{
1961 B43_WARN_ON(!(channel >= 1 && channel <= 14));
1962
1963 return b43_radio_channel_codes_bg[channel - 1];
1964}
1965
1966/* Get the freq, as it has to be written to the device. */
1967static inline u16 channel2freq_a(u8 channel)
1968{
1969 B43_WARN_ON(channel > 200);
1970
1971 return (5000 + 5 * channel);
1972}
1973
1974void b43_radio_lock(struct b43_wldev *dev)
1975{
1976 u32 macctl;
1977
1978 macctl = b43_read32(dev, B43_MMIO_MACCTL);
1979 B43_WARN_ON(macctl & B43_MACCTL_RADIOLOCK);
1980 macctl |= B43_MACCTL_RADIOLOCK;
1981 b43_write32(dev, B43_MMIO_MACCTL, macctl);
1982 /* Commit the write and wait for the device
1983 * to exit any radio register access. */
1984 b43_read32(dev, B43_MMIO_MACCTL);
1985 udelay(10);
1986}
1987
1988void b43_radio_unlock(struct b43_wldev *dev)
1989{
1990 u32 macctl;
1991
1992 /* Commit any write */
1993 b43_read16(dev, B43_MMIO_PHY_VER);
1994 /* unlock */
1995 macctl = b43_read32(dev, B43_MMIO_MACCTL);
1996 B43_WARN_ON(!(macctl & B43_MACCTL_RADIOLOCK));
1997 macctl &= ~B43_MACCTL_RADIOLOCK;
1998 b43_write32(dev, B43_MMIO_MACCTL, macctl);
1999}
2000
2001u16 b43_radio_read16(struct b43_wldev *dev, u16 offset)
2002{
2003 struct b43_phy *phy = &dev->phy;
2004
2005 /* Offset 1 is a 32-bit register. */
2006 B43_WARN_ON(offset == 1);
2007
2008 switch (phy->type) {
2009 case B43_PHYTYPE_A:
2010 offset |= 0x40;
2011 break;
2012 case B43_PHYTYPE_B:
2013 if (phy->radio_ver == 0x2053) {
2014 if (offset < 0x70)
2015 offset += 0x80;
2016 else if (offset < 0x80)
2017 offset += 0x70;
2018 } else if (phy->radio_ver == 0x2050) {
2019 offset |= 0x80;
2020 } else
2021 B43_WARN_ON(1);
2022 break;
2023 case B43_PHYTYPE_G:
2024 offset |= 0x80;
2025 break;
2026 case B43_PHYTYPE_N:
2027 offset |= 0x100;
2028 break;
2029 case B43_PHYTYPE_LP:
2030 /* No adjustment required. */
2031 break;
2032 default:
2033 B43_WARN_ON(1);
2034 }
2035
2036 b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset);
2037 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
2038}
2039
2040void b43_radio_write16(struct b43_wldev *dev, u16 offset, u16 val)
2041{
2042 /* Offset 1 is a 32-bit register. */
2043 B43_WARN_ON(offset == 1);
2044
2045 b43_write16(dev, B43_MMIO_RADIO_CONTROL, offset);
2046 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, val);
2047}
2048
2049void b43_radio_mask(struct b43_wldev *dev, u16 offset, u16 mask)
2050{
2051 b43_radio_write16(dev, offset,
2052 b43_radio_read16(dev, offset) & mask);
2053}
2054
2055void b43_radio_set(struct b43_wldev *dev, u16 offset, u16 set)
2056{
2057 b43_radio_write16(dev, offset,
2058 b43_radio_read16(dev, offset) | set);
2059}
2060
2061void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
2062{
2063 b43_radio_write16(dev, offset,
2064 (b43_radio_read16(dev, offset) & mask) | set);
2065}
2066
2067static void b43_set_all_gains(struct b43_wldev *dev, 315static void b43_set_all_gains(struct b43_wldev *dev,
2068 s16 first, s16 second, s16 third) 316 s16 first, s16 second, s16 third)
2069{ 317{
@@ -2134,108 +382,10 @@ static void b43_set_original_gains(struct b43_wldev *dev)
2134 b43_dummy_transmission(dev); 382 b43_dummy_transmission(dev);
2135} 383}
2136 384
2137/* Synthetic PU workaround */
2138static void b43_synth_pu_workaround(struct b43_wldev *dev, u8 channel)
2139{
2140 struct b43_phy *phy = &dev->phy;
2141
2142 might_sleep();
2143
2144 if (phy->radio_ver != 0x2050 || phy->radio_rev >= 6) {
2145 /* We do not need the workaround. */
2146 return;
2147 }
2148
2149 if (channel <= 10) {
2150 b43_write16(dev, B43_MMIO_CHANNEL,
2151 channel2freq_bg(channel + 4));
2152 } else {
2153 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(1));
2154 }
2155 msleep(1);
2156 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
2157}
2158
2159u8 b43_radio_aci_detect(struct b43_wldev *dev, u8 channel)
2160{
2161 struct b43_phy *phy = &dev->phy;
2162 u8 ret = 0;
2163 u16 saved, rssi, temp;
2164 int i, j = 0;
2165
2166 saved = b43_phy_read(dev, 0x0403);
2167 b43_radio_selectchannel(dev, channel, 0);
2168 b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5);
2169 if (phy->aci_hw_rssi)
2170 rssi = b43_phy_read(dev, 0x048A) & 0x3F;
2171 else
2172 rssi = saved & 0x3F;
2173 /* clamp temp to signed 5bit */
2174 if (rssi > 32)
2175 rssi -= 64;
2176 for (i = 0; i < 100; i++) {
2177 temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F;
2178 if (temp > 32)
2179 temp -= 64;
2180 if (temp < rssi)
2181 j++;
2182 if (j >= 20)
2183 ret = 1;
2184 }
2185 b43_phy_write(dev, 0x0403, saved);
2186
2187 return ret;
2188}
2189
2190u8 b43_radio_aci_scan(struct b43_wldev * dev)
2191{
2192 struct b43_phy *phy = &dev->phy;
2193 u8 ret[13];
2194 unsigned int channel = phy->channel;
2195 unsigned int i, j, start, end;
2196
2197 if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0)))
2198 return 0;
2199
2200 b43_phy_lock(dev);
2201 b43_radio_lock(dev);
2202 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
2203 b43_phy_write(dev, B43_PHY_G_CRS,
2204 b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
2205 b43_set_all_gains(dev, 3, 8, 1);
2206
2207 start = (channel - 5 > 0) ? channel - 5 : 1;
2208 end = (channel + 5 < 14) ? channel + 5 : 13;
2209
2210 for (i = start; i <= end; i++) {
2211 if (abs(channel - i) > 2)
2212 ret[i - 1] = b43_radio_aci_detect(dev, i);
2213 }
2214 b43_radio_selectchannel(dev, channel, 0);
2215 b43_phy_write(dev, 0x0802,
2216 (b43_phy_read(dev, 0x0802) & 0xFFFC) | 0x0003);
2217 b43_phy_write(dev, 0x0403, b43_phy_read(dev, 0x0403) & 0xFFF8);
2218 b43_phy_write(dev, B43_PHY_G_CRS,
2219 b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
2220 b43_set_original_gains(dev);
2221 for (i = 0; i < 13; i++) {
2222 if (!ret[i])
2223 continue;
2224 end = (i + 5 < 13) ? i + 5 : 13;
2225 for (j = i; j < end; j++)
2226 ret[j] = 1;
2227 }
2228 b43_radio_unlock(dev);
2229 b43_phy_unlock(dev);
2230
2231 return ret[channel - 1];
2232}
2233
2234/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 385/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
2235void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val) 386void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val)
2236{ 387{
2237 b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); 388 b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset);
2238 mmiowb();
2239 b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val); 389 b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val);
2240} 390}
2241 391
@@ -2267,17 +417,17 @@ void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
2267/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 417/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
2268void b43_nrssi_mem_update(struct b43_wldev *dev) 418void b43_nrssi_mem_update(struct b43_wldev *dev)
2269{ 419{
2270 struct b43_phy *phy = &dev->phy; 420 struct b43_phy_g *gphy = dev->phy.g;
2271 s16 i, delta; 421 s16 i, delta;
2272 s32 tmp; 422 s32 tmp;
2273 423
2274 delta = 0x1F - phy->nrssi[0]; 424 delta = 0x1F - gphy->nrssi[0];
2275 for (i = 0; i < 64; i++) { 425 for (i = 0; i < 64; i++) {
2276 tmp = (i - delta) * phy->nrssislope; 426 tmp = (i - delta) * gphy->nrssislope;
2277 tmp /= 0x10000; 427 tmp /= 0x10000;
2278 tmp += 0x3A; 428 tmp += 0x3A;
2279 tmp = clamp_val(tmp, 0, 0x3F); 429 tmp = clamp_val(tmp, 0, 0x3F);
2280 phy->nrssi_lt[i] = tmp; 430 gphy->nrssi_lt[i] = tmp;
2281 } 431 }
2282} 432}
2283 433
@@ -2442,347 +592,230 @@ static void b43_calc_nrssi_offset(struct b43_wldev *dev)
2442void b43_calc_nrssi_slope(struct b43_wldev *dev) 592void b43_calc_nrssi_slope(struct b43_wldev *dev)
2443{ 593{
2444 struct b43_phy *phy = &dev->phy; 594 struct b43_phy *phy = &dev->phy;
595 struct b43_phy_g *gphy = phy->g;
2445 u16 backup[18] = { 0 }; 596 u16 backup[18] = { 0 };
2446 u16 tmp; 597 u16 tmp;
2447 s16 nrssi0, nrssi1; 598 s16 nrssi0, nrssi1;
2448 599
2449 switch (phy->type) { 600 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2450 case B43_PHYTYPE_B:
2451 backup[0] = b43_radio_read16(dev, 0x007A);
2452 backup[1] = b43_radio_read16(dev, 0x0052);
2453 backup[2] = b43_radio_read16(dev, 0x0043);
2454 backup[3] = b43_phy_read(dev, 0x0030);
2455 backup[4] = b43_phy_read(dev, 0x0026);
2456 backup[5] = b43_phy_read(dev, 0x0015);
2457 backup[6] = b43_phy_read(dev, 0x002A);
2458 backup[7] = b43_phy_read(dev, 0x0020);
2459 backup[8] = b43_phy_read(dev, 0x005A);
2460 backup[9] = b43_phy_read(dev, 0x0059);
2461 backup[10] = b43_phy_read(dev, 0x0058);
2462 backup[11] = b43_read16(dev, 0x03E2);
2463 backup[12] = b43_read16(dev, 0x03E6);
2464 backup[13] = b43_read16(dev, B43_MMIO_CHANNEL_EXT);
2465
2466 tmp = b43_radio_read16(dev, 0x007A);
2467 tmp &= (phy->rev >= 5) ? 0x007F : 0x000F;
2468 b43_radio_write16(dev, 0x007A, tmp);
2469 b43_phy_write(dev, 0x0030, 0x00FF);
2470 b43_write16(dev, 0x03EC, 0x7F7F);
2471 b43_phy_write(dev, 0x0026, 0x0000);
2472 b43_phy_write(dev, 0x0015, b43_phy_read(dev, 0x0015) | 0x0020);
2473 b43_phy_write(dev, 0x002A, 0x08A3);
2474 b43_radio_write16(dev, 0x007A,
2475 b43_radio_read16(dev, 0x007A) | 0x0080);
2476 601
2477 nrssi0 = (s16) b43_phy_read(dev, 0x0027); 602 if (phy->radio_rev >= 9)
2478 b43_radio_write16(dev, 0x007A, 603 return;
2479 b43_radio_read16(dev, 0x007A) & 0x007F); 604 if (phy->radio_rev == 8)
2480 if (phy->rev >= 2) { 605 b43_calc_nrssi_offset(dev);
2481 b43_write16(dev, 0x03E6, 0x0040);
2482 } else if (phy->rev == 0) {
2483 b43_write16(dev, 0x03E6, 0x0122);
2484 } else {
2485 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2486 b43_read16(dev,
2487 B43_MMIO_CHANNEL_EXT) & 0x2000);
2488 }
2489 b43_phy_write(dev, 0x0020, 0x3F3F);
2490 b43_phy_write(dev, 0x0015, 0xF330);
2491 b43_radio_write16(dev, 0x005A, 0x0060);
2492 b43_radio_write16(dev, 0x0043,
2493 b43_radio_read16(dev, 0x0043) & 0x00F0);
2494 b43_phy_write(dev, 0x005A, 0x0480);
2495 b43_phy_write(dev, 0x0059, 0x0810);
2496 b43_phy_write(dev, 0x0058, 0x000D);
2497 udelay(20);
2498
2499 nrssi1 = (s16) b43_phy_read(dev, 0x0027);
2500 b43_phy_write(dev, 0x0030, backup[3]);
2501 b43_radio_write16(dev, 0x007A, backup[0]);
2502 b43_write16(dev, 0x03E2, backup[11]);
2503 b43_phy_write(dev, 0x0026, backup[4]);
2504 b43_phy_write(dev, 0x0015, backup[5]);
2505 b43_phy_write(dev, 0x002A, backup[6]);
2506 b43_synth_pu_workaround(dev, phy->channel);
2507 if (phy->rev != 0)
2508 b43_write16(dev, 0x03F4, backup[13]);
2509
2510 b43_phy_write(dev, 0x0020, backup[7]);
2511 b43_phy_write(dev, 0x005A, backup[8]);
2512 b43_phy_write(dev, 0x0059, backup[9]);
2513 b43_phy_write(dev, 0x0058, backup[10]);
2514 b43_radio_write16(dev, 0x0052, backup[1]);
2515 b43_radio_write16(dev, 0x0043, backup[2]);
2516
2517 if (nrssi0 == nrssi1)
2518 phy->nrssislope = 0x00010000;
2519 else
2520 phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
2521
2522 if (nrssi0 <= -4) {
2523 phy->nrssi[0] = nrssi0;
2524 phy->nrssi[1] = nrssi1;
2525 }
2526 break;
2527 case B43_PHYTYPE_G:
2528 if (phy->radio_rev >= 9)
2529 return;
2530 if (phy->radio_rev == 8)
2531 b43_calc_nrssi_offset(dev);
2532 606
2533 b43_phy_write(dev, B43_PHY_G_CRS, 607 b43_phy_write(dev, B43_PHY_G_CRS,
2534 b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF); 608 b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
2535 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC); 609 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
2536 backup[7] = b43_read16(dev, 0x03E2); 610 backup[7] = b43_read16(dev, 0x03E2);
2537 b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000); 611 b43_write16(dev, 0x03E2, b43_read16(dev, 0x03E2) | 0x8000);
2538 backup[0] = b43_radio_read16(dev, 0x007A); 612 backup[0] = b43_radio_read16(dev, 0x007A);
2539 backup[1] = b43_radio_read16(dev, 0x0052); 613 backup[1] = b43_radio_read16(dev, 0x0052);
2540 backup[2] = b43_radio_read16(dev, 0x0043); 614 backup[2] = b43_radio_read16(dev, 0x0043);
2541 backup[3] = b43_phy_read(dev, 0x0015); 615 backup[3] = b43_phy_read(dev, 0x0015);
2542 backup[4] = b43_phy_read(dev, 0x005A); 616 backup[4] = b43_phy_read(dev, 0x005A);
2543 backup[5] = b43_phy_read(dev, 0x0059); 617 backup[5] = b43_phy_read(dev, 0x0059);
2544 backup[6] = b43_phy_read(dev, 0x0058); 618 backup[6] = b43_phy_read(dev, 0x0058);
2545 backup[8] = b43_read16(dev, 0x03E6); 619 backup[8] = b43_read16(dev, 0x03E6);
2546 backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT); 620 backup[9] = b43_read16(dev, B43_MMIO_CHANNEL_EXT);
2547 if (phy->rev >= 3) { 621 if (phy->rev >= 3) {
2548 backup[10] = b43_phy_read(dev, 0x002E); 622 backup[10] = b43_phy_read(dev, 0x002E);
2549 backup[11] = b43_phy_read(dev, 0x002F); 623 backup[11] = b43_phy_read(dev, 0x002F);
2550 backup[12] = b43_phy_read(dev, 0x080F); 624 backup[12] = b43_phy_read(dev, 0x080F);
2551 backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL); 625 backup[13] = b43_phy_read(dev, B43_PHY_G_LO_CONTROL);
2552 backup[14] = b43_phy_read(dev, 0x0801); 626 backup[14] = b43_phy_read(dev, 0x0801);
2553 backup[15] = b43_phy_read(dev, 0x0060); 627 backup[15] = b43_phy_read(dev, 0x0060);
2554 backup[16] = b43_phy_read(dev, 0x0014); 628 backup[16] = b43_phy_read(dev, 0x0014);
2555 backup[17] = b43_phy_read(dev, 0x0478); 629 backup[17] = b43_phy_read(dev, 0x0478);
2556 b43_phy_write(dev, 0x002E, 0); 630 b43_phy_write(dev, 0x002E, 0);
2557 b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0); 631 b43_phy_write(dev, B43_PHY_G_LO_CONTROL, 0);
2558 switch (phy->rev) { 632 switch (phy->rev) {
2559 case 4: 633 case 4:
2560 case 6: 634 case 6:
2561 case 7: 635 case 7:
2562 b43_phy_write(dev, 0x0478, 636 b43_phy_write(dev, 0x0478,
2563 b43_phy_read(dev, 0x0478) 637 b43_phy_read(dev, 0x0478)
2564 | 0x0100); 638 | 0x0100);
2565 b43_phy_write(dev, 0x0801, 639 b43_phy_write(dev, 0x0801,
2566 b43_phy_read(dev, 0x0801) 640 b43_phy_read(dev, 0x0801)
2567 | 0x0040);
2568 break;
2569 case 3:
2570 case 5:
2571 b43_phy_write(dev, 0x0801,
2572 b43_phy_read(dev, 0x0801)
2573 & 0xFFBF);
2574 break;
2575 }
2576 b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060)
2577 | 0x0040); 641 | 0x0040);
2578 b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014) 642 break;
2579 | 0x0200); 643 case 3:
2580 } 644 case 5:
2581 b43_radio_write16(dev, 0x007A, 645 b43_phy_write(dev, 0x0801,
2582 b43_radio_read16(dev, 0x007A) | 0x0070); 646 b43_phy_read(dev, 0x0801)
2583 b43_set_all_gains(dev, 0, 8, 0); 647 & 0xFFBF);
2584 b43_radio_write16(dev, 0x007A, 648 break;
2585 b43_radio_read16(dev, 0x007A) & 0x00F7);
2586 if (phy->rev >= 2) {
2587 b43_phy_write(dev, 0x0811,
2588 (b43_phy_read(dev, 0x0811) & 0xFFCF) |
2589 0x0030);
2590 b43_phy_write(dev, 0x0812,
2591 (b43_phy_read(dev, 0x0812) & 0xFFCF) |
2592 0x0010);
2593 } 649 }
2594 b43_radio_write16(dev, 0x007A, 650 b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060)
2595 b43_radio_read16(dev, 0x007A) | 0x0080); 651 | 0x0040);
2596 udelay(20); 652 b43_phy_write(dev, 0x0014, b43_phy_read(dev, 0x0014)
653 | 0x0200);
654 }
655 b43_radio_write16(dev, 0x007A,
656 b43_radio_read16(dev, 0x007A) | 0x0070);
657 b43_set_all_gains(dev, 0, 8, 0);
658 b43_radio_write16(dev, 0x007A,
659 b43_radio_read16(dev, 0x007A) & 0x00F7);
660 if (phy->rev >= 2) {
661 b43_phy_write(dev, 0x0811,
662 (b43_phy_read(dev, 0x0811) & 0xFFCF) |
663 0x0030);
664 b43_phy_write(dev, 0x0812,
665 (b43_phy_read(dev, 0x0812) & 0xFFCF) |
666 0x0010);
667 }
668 b43_radio_write16(dev, 0x007A,
669 b43_radio_read16(dev, 0x007A) | 0x0080);
670 udelay(20);
2597 671
2598 nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); 672 nrssi0 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F);
2599 if (nrssi0 >= 0x0020) 673 if (nrssi0 >= 0x0020)
2600 nrssi0 -= 0x0040; 674 nrssi0 -= 0x0040;
2601 675
2602 b43_radio_write16(dev, 0x007A, 676 b43_radio_write16(dev, 0x007A,
2603 b43_radio_read16(dev, 0x007A) & 0x007F); 677 b43_radio_read16(dev, 0x007A) & 0x007F);
2604 if (phy->rev >= 2) { 678 if (phy->rev >= 2) {
2605 b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003) 679 b43_phy_write(dev, 0x0003, (b43_phy_read(dev, 0x0003)
2606 & 0xFF9F) | 0x0040); 680 & 0xFF9F) | 0x0040);
2607 } 681 }
2608
2609 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2610 b43_read16(dev, B43_MMIO_CHANNEL_EXT)
2611 | 0x2000);
2612 b43_radio_write16(dev, 0x007A,
2613 b43_radio_read16(dev, 0x007A) | 0x000F);
2614 b43_phy_write(dev, 0x0015, 0xF330);
2615 if (phy->rev >= 2) {
2616 b43_phy_write(dev, 0x0812,
2617 (b43_phy_read(dev, 0x0812) & 0xFFCF) |
2618 0x0020);
2619 b43_phy_write(dev, 0x0811,
2620 (b43_phy_read(dev, 0x0811) & 0xFFCF) |
2621 0x0020);
2622 }
2623 682
2624 b43_set_all_gains(dev, 3, 0, 1); 683 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2625 if (phy->radio_rev == 8) { 684 b43_read16(dev, B43_MMIO_CHANNEL_EXT)
2626 b43_radio_write16(dev, 0x0043, 0x001F); 685 | 0x2000);
2627 } else { 686 b43_radio_write16(dev, 0x007A,
2628 tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F; 687 b43_radio_read16(dev, 0x007A) | 0x000F);
2629 b43_radio_write16(dev, 0x0052, tmp | 0x0060); 688 b43_phy_write(dev, 0x0015, 0xF330);
2630 tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0; 689 if (phy->rev >= 2) {
2631 b43_radio_write16(dev, 0x0043, tmp | 0x0009); 690 b43_phy_write(dev, 0x0812,
2632 } 691 (b43_phy_read(dev, 0x0812) & 0xFFCF) |
2633 b43_phy_write(dev, 0x005A, 0x0480); 692 0x0020);
2634 b43_phy_write(dev, 0x0059, 0x0810); 693 b43_phy_write(dev, 0x0811,
2635 b43_phy_write(dev, 0x0058, 0x000D); 694 (b43_phy_read(dev, 0x0811) & 0xFFCF) |
2636 udelay(20); 695 0x0020);
2637 nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F); 696 }
2638 if (nrssi1 >= 0x0020)
2639 nrssi1 -= 0x0040;
2640 if (nrssi0 == nrssi1)
2641 phy->nrssislope = 0x00010000;
2642 else
2643 phy->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
2644 if (nrssi0 >= -4) {
2645 phy->nrssi[0] = nrssi1;
2646 phy->nrssi[1] = nrssi0;
2647 }
2648 if (phy->rev >= 3) {
2649 b43_phy_write(dev, 0x002E, backup[10]);
2650 b43_phy_write(dev, 0x002F, backup[11]);
2651 b43_phy_write(dev, 0x080F, backup[12]);
2652 b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]);
2653 }
2654 if (phy->rev >= 2) {
2655 b43_phy_write(dev, 0x0812,
2656 b43_phy_read(dev, 0x0812) & 0xFFCF);
2657 b43_phy_write(dev, 0x0811,
2658 b43_phy_read(dev, 0x0811) & 0xFFCF);
2659 }
2660 697
2661 b43_radio_write16(dev, 0x007A, backup[0]); 698 b43_set_all_gains(dev, 3, 0, 1);
2662 b43_radio_write16(dev, 0x0052, backup[1]); 699 if (phy->radio_rev == 8) {
2663 b43_radio_write16(dev, 0x0043, backup[2]); 700 b43_radio_write16(dev, 0x0043, 0x001F);
2664 b43_write16(dev, 0x03E2, backup[7]); 701 } else {
2665 b43_write16(dev, 0x03E6, backup[8]); 702 tmp = b43_radio_read16(dev, 0x0052) & 0xFF0F;
2666 b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]); 703 b43_radio_write16(dev, 0x0052, tmp | 0x0060);
2667 b43_phy_write(dev, 0x0015, backup[3]); 704 tmp = b43_radio_read16(dev, 0x0043) & 0xFFF0;
2668 b43_phy_write(dev, 0x005A, backup[4]); 705 b43_radio_write16(dev, 0x0043, tmp | 0x0009);
2669 b43_phy_write(dev, 0x0059, backup[5]); 706 }
2670 b43_phy_write(dev, 0x0058, backup[6]); 707 b43_phy_write(dev, 0x005A, 0x0480);
2671 b43_synth_pu_workaround(dev, phy->channel); 708 b43_phy_write(dev, 0x0059, 0x0810);
2672 b43_phy_write(dev, 0x0802, 709 b43_phy_write(dev, 0x0058, 0x000D);
2673 b43_phy_read(dev, 0x0802) | (0x0001 | 0x0002)); 710 udelay(20);
2674 b43_set_original_gains(dev); 711 nrssi1 = (s16) ((b43_phy_read(dev, 0x047F) >> 8) & 0x003F);
2675 b43_phy_write(dev, B43_PHY_G_CRS, 712 if (nrssi1 >= 0x0020)
2676 b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000); 713 nrssi1 -= 0x0040;
2677 if (phy->rev >= 3) { 714 if (nrssi0 == nrssi1)
2678 b43_phy_write(dev, 0x0801, backup[14]); 715 gphy->nrssislope = 0x00010000;
2679 b43_phy_write(dev, 0x0060, backup[15]); 716 else
2680 b43_phy_write(dev, 0x0014, backup[16]); 717 gphy->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
2681 b43_phy_write(dev, 0x0478, backup[17]); 718 if (nrssi0 >= -4) {
2682 } 719 gphy->nrssi[0] = nrssi1;
2683 b43_nrssi_mem_update(dev); 720 gphy->nrssi[1] = nrssi0;
2684 b43_calc_nrssi_threshold(dev); 721 }
2685 break; 722 if (phy->rev >= 3) {
2686 default: 723 b43_phy_write(dev, 0x002E, backup[10]);
2687 B43_WARN_ON(1); 724 b43_phy_write(dev, 0x002F, backup[11]);
725 b43_phy_write(dev, 0x080F, backup[12]);
726 b43_phy_write(dev, B43_PHY_G_LO_CONTROL, backup[13]);
2688 } 727 }
728 if (phy->rev >= 2) {
729 b43_phy_write(dev, 0x0812,
730 b43_phy_read(dev, 0x0812) & 0xFFCF);
731 b43_phy_write(dev, 0x0811,
732 b43_phy_read(dev, 0x0811) & 0xFFCF);
733 }
734
735 b43_radio_write16(dev, 0x007A, backup[0]);
736 b43_radio_write16(dev, 0x0052, backup[1]);
737 b43_radio_write16(dev, 0x0043, backup[2]);
738 b43_write16(dev, 0x03E2, backup[7]);
739 b43_write16(dev, 0x03E6, backup[8]);
740 b43_write16(dev, B43_MMIO_CHANNEL_EXT, backup[9]);
741 b43_phy_write(dev, 0x0015, backup[3]);
742 b43_phy_write(dev, 0x005A, backup[4]);
743 b43_phy_write(dev, 0x0059, backup[5]);
744 b43_phy_write(dev, 0x0058, backup[6]);
745 b43_synth_pu_workaround(dev, phy->channel);
746 b43_phy_write(dev, 0x0802,
747 b43_phy_read(dev, 0x0802) | (0x0001 | 0x0002));
748 b43_set_original_gains(dev);
749 b43_phy_write(dev, B43_PHY_G_CRS,
750 b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
751 if (phy->rev >= 3) {
752 b43_phy_write(dev, 0x0801, backup[14]);
753 b43_phy_write(dev, 0x0060, backup[15]);
754 b43_phy_write(dev, 0x0014, backup[16]);
755 b43_phy_write(dev, 0x0478, backup[17]);
756 }
757 b43_nrssi_mem_update(dev);
758 b43_calc_nrssi_threshold(dev);
2689} 759}
2690 760
2691void b43_calc_nrssi_threshold(struct b43_wldev *dev) 761static void b43_calc_nrssi_threshold(struct b43_wldev *dev)
2692{ 762{
2693 struct b43_phy *phy = &dev->phy; 763 struct b43_phy *phy = &dev->phy;
2694 s32 threshold; 764 struct b43_phy_g *gphy = phy->g;
2695 s32 a, b; 765 s32 a, b;
2696 s16 tmp16; 766 s16 tmp16;
2697 u16 tmp_u16; 767 u16 tmp_u16;
2698 768
2699 switch (phy->type) { 769 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2700 case B43_PHYTYPE_B:{ 770
2701 if (phy->radio_ver != 0x2050) 771 if (!phy->gmode ||
2702 return; 772 !(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
2703 if (! 773 tmp16 = b43_nrssi_hw_read(dev, 0x20);
2704 (dev->dev->bus->sprom. 774 if (tmp16 >= 0x20)
2705 boardflags_lo & B43_BFL_RSSI)) 775 tmp16 -= 0x40;
2706 return; 776 if (tmp16 < 3) {
2707 777 b43_phy_write(dev, 0x048A,
2708 if (phy->radio_rev >= 6) { 778 (b43_phy_read(dev, 0x048A)
2709 threshold = 779 & 0xF000) | 0x09EB);
2710 (phy->nrssi[1] - phy->nrssi[0]) * 32; 780 } else {
2711 threshold += 20 * (phy->nrssi[0] + 1); 781 b43_phy_write(dev, 0x048A,
2712 threshold /= 40; 782 (b43_phy_read(dev, 0x048A)
2713 } else 783 & 0xF000) | 0x0AED);
2714 threshold = phy->nrssi[1] - 5;
2715
2716 threshold = clamp_val(threshold, 0, 0x3E);
2717 b43_phy_read(dev, 0x0020); /* dummy read */
2718 b43_phy_write(dev, 0x0020,
2719 (((u16) threshold) << 8) | 0x001C);
2720
2721 if (phy->radio_rev >= 6) {
2722 b43_phy_write(dev, 0x0087, 0x0E0D);
2723 b43_phy_write(dev, 0x0086, 0x0C0B);
2724 b43_phy_write(dev, 0x0085, 0x0A09);
2725 b43_phy_write(dev, 0x0084, 0x0808);
2726 b43_phy_write(dev, 0x0083, 0x0808);
2727 b43_phy_write(dev, 0x0082, 0x0604);
2728 b43_phy_write(dev, 0x0081, 0x0302);
2729 b43_phy_write(dev, 0x0080, 0x0100);
2730 }
2731 break;
2732 } 784 }
2733 case B43_PHYTYPE_G: 785 } else {
2734 if (!phy->gmode || 786 if (gphy->interfmode == B43_INTERFMODE_NONWLAN) {
2735 !(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { 787 a = 0xE;
2736 tmp16 = b43_nrssi_hw_read(dev, 0x20); 788 b = 0xA;
2737 if (tmp16 >= 0x20) 789 } else if (!gphy->aci_wlan_automatic && gphy->aci_enable) {
2738 tmp16 -= 0x40; 790 a = 0x13;
2739 if (tmp16 < 3) { 791 b = 0x12;
2740 b43_phy_write(dev, 0x048A,
2741 (b43_phy_read(dev, 0x048A)
2742 & 0xF000) | 0x09EB);
2743 } else {
2744 b43_phy_write(dev, 0x048A,
2745 (b43_phy_read(dev, 0x048A)
2746 & 0xF000) | 0x0AED);
2747 }
2748 } else { 792 } else {
2749 if (phy->interfmode == B43_INTERFMODE_NONWLAN) { 793 a = 0xE;
2750 a = 0xE; 794 b = 0x11;
2751 b = 0xA;
2752 } else if (!phy->aci_wlan_automatic && phy->aci_enable) {
2753 a = 0x13;
2754 b = 0x12;
2755 } else {
2756 a = 0xE;
2757 b = 0x11;
2758 }
2759
2760 a = a * (phy->nrssi[1] - phy->nrssi[0]);
2761 a += (phy->nrssi[0] << 6);
2762 if (a < 32)
2763 a += 31;
2764 else
2765 a += 32;
2766 a = a >> 6;
2767 a = clamp_val(a, -31, 31);
2768
2769 b = b * (phy->nrssi[1] - phy->nrssi[0]);
2770 b += (phy->nrssi[0] << 6);
2771 if (b < 32)
2772 b += 31;
2773 else
2774 b += 32;
2775 b = b >> 6;
2776 b = clamp_val(b, -31, 31);
2777
2778 tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000;
2779 tmp_u16 |= ((u32) b & 0x0000003F);
2780 tmp_u16 |= (((u32) a & 0x0000003F) << 6);
2781 b43_phy_write(dev, 0x048A, tmp_u16);
2782 } 795 }
2783 break; 796
2784 default: 797 a = a * (gphy->nrssi[1] - gphy->nrssi[0]);
2785 B43_WARN_ON(1); 798 a += (gphy->nrssi[0] << 6);
799 if (a < 32)
800 a += 31;
801 else
802 a += 32;
803 a = a >> 6;
804 a = clamp_val(a, -31, 31);
805
806 b = b * (gphy->nrssi[1] - gphy->nrssi[0]);
807 b += (gphy->nrssi[0] << 6);
808 if (b < 32)
809 b += 31;
810 else
811 b += 32;
812 b = b >> 6;
813 b = clamp_val(b, -31, 31);
814
815 tmp_u16 = b43_phy_read(dev, 0x048A) & 0xF000;
816 tmp_u16 |= ((u32) b & 0x0000003F);
817 tmp_u16 |= (((u32) a & 0x0000003F) << 6);
818 b43_phy_write(dev, 0x048A, tmp_u16);
2786 } 819 }
2787} 820}
2788 821
@@ -2860,9 +893,10 @@ static void
2860b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode) 893b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
2861{ 894{
2862 struct b43_phy *phy = &dev->phy; 895 struct b43_phy *phy = &dev->phy;
896 struct b43_phy_g *gphy = phy->g;
2863 u16 tmp, flipped; 897 u16 tmp, flipped;
2864 size_t stackidx = 0; 898 size_t stackidx = 0;
2865 u32 *stack = phy->interfstack; 899 u32 *stack = gphy->interfstack;
2866 900
2867 switch (mode) { 901 switch (mode) {
2868 case B43_INTERFMODE_NONWLAN: 902 case B43_INTERFMODE_NONWLAN:
@@ -2928,7 +962,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
2928 if (b43_phy_read(dev, 0x0033) & 0x0800) 962 if (b43_phy_read(dev, 0x0033) & 0x0800)
2929 break; 963 break;
2930 964
2931 phy->aci_enable = 1; 965 gphy->aci_enable = 1;
2932 966
2933 phy_stacksave(B43_PHY_RADIO_BITFIELD); 967 phy_stacksave(B43_PHY_RADIO_BITFIELD);
2934 phy_stacksave(B43_PHY_G_CRS); 968 phy_stacksave(B43_PHY_G_CRS);
@@ -3064,7 +1098,8 @@ static void
3064b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode) 1098b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
3065{ 1099{
3066 struct b43_phy *phy = &dev->phy; 1100 struct b43_phy *phy = &dev->phy;
3067 u32 *stack = phy->interfstack; 1101 struct b43_phy_g *gphy = phy->g;
1102 u32 *stack = gphy->interfstack;
3068 1103
3069 switch (mode) { 1104 switch (mode) {
3070 case B43_INTERFMODE_NONWLAN: 1105 case B43_INTERFMODE_NONWLAN:
@@ -3103,7 +1138,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
3103 if (!(b43_phy_read(dev, 0x0033) & 0x0800)) 1138 if (!(b43_phy_read(dev, 0x0033) & 0x0800))
3104 break; 1139 break;
3105 1140
3106 phy->aci_enable = 0; 1141 gphy->aci_enable = 0;
3107 1142
3108 phy_stackrestore(B43_PHY_RADIO_BITFIELD); 1143 phy_stackrestore(B43_PHY_RADIO_BITFIELD);
3109 phy_stackrestore(B43_PHY_G_CRS); 1144 phy_stackrestore(B43_PHY_G_CRS);
@@ -3153,47 +1188,6 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
3153#undef ofdmtab_stacksave 1188#undef ofdmtab_stacksave
3154#undef ofdmtab_stackrestore 1189#undef ofdmtab_stackrestore
3155 1190
3156int b43_radio_set_interference_mitigation(struct b43_wldev *dev, int mode)
3157{
3158 struct b43_phy *phy = &dev->phy;
3159 int currentmode;
3160
3161 if ((phy->type != B43_PHYTYPE_G) || (phy->rev == 0) || (!phy->gmode))
3162 return -ENODEV;
3163
3164 phy->aci_wlan_automatic = 0;
3165 switch (mode) {
3166 case B43_INTERFMODE_AUTOWLAN:
3167 phy->aci_wlan_automatic = 1;
3168 if (phy->aci_enable)
3169 mode = B43_INTERFMODE_MANUALWLAN;
3170 else
3171 mode = B43_INTERFMODE_NONE;
3172 break;
3173 case B43_INTERFMODE_NONE:
3174 case B43_INTERFMODE_NONWLAN:
3175 case B43_INTERFMODE_MANUALWLAN:
3176 break;
3177 default:
3178 return -EINVAL;
3179 }
3180
3181 currentmode = phy->interfmode;
3182 if (currentmode == mode)
3183 return 0;
3184 if (currentmode != B43_INTERFMODE_NONE)
3185 b43_radio_interference_mitigation_disable(dev, currentmode);
3186
3187 if (mode == B43_INTERFMODE_NONE) {
3188 phy->aci_enable = 0;
3189 phy->aci_hw_rssi = 0;
3190 } else
3191 b43_radio_interference_mitigation_enable(dev, mode);
3192 phy->interfmode = mode;
3193
3194 return 0;
3195}
3196
3197static u16 b43_radio_core_calibration_value(struct b43_wldev *dev) 1191static u16 b43_radio_core_calibration_value(struct b43_wldev *dev)
3198{ 1192{
3199 u16 reg, index, ret; 1193 u16 reg, index, ret;
@@ -3219,13 +1213,14 @@ static u16 radio2050_rfover_val(struct b43_wldev *dev,
3219 u16 phy_register, unsigned int lpd) 1213 u16 phy_register, unsigned int lpd)
3220{ 1214{
3221 struct b43_phy *phy = &dev->phy; 1215 struct b43_phy *phy = &dev->phy;
1216 struct b43_phy_g *gphy = phy->g;
3222 struct ssb_sprom *sprom = &(dev->dev->bus->sprom); 1217 struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
3223 1218
3224 if (!phy->gmode) 1219 if (!phy->gmode)
3225 return 0; 1220 return 0;
3226 1221
3227 if (has_loopback_gain(phy)) { 1222 if (has_loopback_gain(phy)) {
3228 int max_lb_gain = phy->max_lb_gain; 1223 int max_lb_gain = gphy->max_lb_gain;
3229 u16 extlna; 1224 u16 extlna;
3230 u16 i; 1225 u16 i;
3231 1226
@@ -3606,301 +1601,1682 @@ u16 b43_radio_init2050(struct b43_wldev *dev)
3606 return ret; 1601 return ret;
3607} 1602}
3608 1603
3609void b43_radio_init2060(struct b43_wldev *dev) 1604static void b43_phy_initb5(struct b43_wldev *dev)
3610{ 1605{
3611 int err; 1606 struct ssb_bus *bus = dev->dev->bus;
1607 struct b43_phy *phy = &dev->phy;
1608 struct b43_phy_g *gphy = phy->g;
1609 u16 offset, value;
1610 u8 old_channel;
3612 1611
3613 b43_radio_write16(dev, 0x0004, 0x00C0); 1612 if (phy->analog == 1) {
3614 b43_radio_write16(dev, 0x0005, 0x0008); 1613 b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A)
3615 b43_radio_write16(dev, 0x0009, 0x0040); 1614 | 0x0050);
3616 b43_radio_write16(dev, 0x0005, 0x00AA); 1615 }
3617 b43_radio_write16(dev, 0x0032, 0x008F); 1616 if ((bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM) &&
3618 b43_radio_write16(dev, 0x0006, 0x008F); 1617 (bus->boardinfo.type != SSB_BOARD_BU4306)) {
3619 b43_radio_write16(dev, 0x0034, 0x008F); 1618 value = 0x2120;
3620 b43_radio_write16(dev, 0x002C, 0x0007); 1619 for (offset = 0x00A8; offset < 0x00C7; offset++) {
3621 b43_radio_write16(dev, 0x0082, 0x0080); 1620 b43_phy_write(dev, offset, value);
3622 b43_radio_write16(dev, 0x0080, 0x0000); 1621 value += 0x202;
3623 b43_radio_write16(dev, 0x003F, 0x00DA); 1622 }
3624 b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008); 1623 }
3625 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0010); 1624 b43_phy_write(dev, 0x0035, (b43_phy_read(dev, 0x0035) & 0xF0FF)
3626 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020); 1625 | 0x0700);
3627 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0020); 1626 if (phy->radio_ver == 0x2050)
3628 msleep(1); /* delay 400usec */ 1627 b43_phy_write(dev, 0x0038, 0x0667);
3629
3630 b43_radio_write16(dev, 0x0081,
3631 (b43_radio_read16(dev, 0x0081) & ~0x0020) | 0x0010);
3632 msleep(1); /* delay 400usec */
3633
3634 b43_radio_write16(dev, 0x0005,
3635 (b43_radio_read16(dev, 0x0005) & ~0x0008) | 0x0008);
3636 b43_radio_write16(dev, 0x0085, b43_radio_read16(dev, 0x0085) & ~0x0010);
3637 b43_radio_write16(dev, 0x0005, b43_radio_read16(dev, 0x0005) & ~0x0008);
3638 b43_radio_write16(dev, 0x0081, b43_radio_read16(dev, 0x0081) & ~0x0040);
3639 b43_radio_write16(dev, 0x0081,
3640 (b43_radio_read16(dev, 0x0081) & ~0x0040) | 0x0040);
3641 b43_radio_write16(dev, 0x0005,
3642 (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008);
3643 b43_phy_write(dev, 0x0063, 0xDDC6);
3644 b43_phy_write(dev, 0x0069, 0x07BE);
3645 b43_phy_write(dev, 0x006A, 0x0000);
3646
3647 err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_A, 0);
3648 B43_WARN_ON(err);
3649 1628
3650 msleep(1); 1629 if (phy->gmode || phy->rev >= 2) {
1630 if (phy->radio_ver == 0x2050) {
1631 b43_radio_write16(dev, 0x007A,
1632 b43_radio_read16(dev, 0x007A)
1633 | 0x0020);
1634 b43_radio_write16(dev, 0x0051,
1635 b43_radio_read16(dev, 0x0051)
1636 | 0x0004);
1637 }
1638 b43_write16(dev, B43_MMIO_PHY_RADIO, 0x0000);
1639
1640 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
1641 b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
1642
1643 b43_phy_write(dev, 0x001C, 0x186A);
1644
1645 b43_phy_write(dev, 0x0013,
1646 (b43_phy_read(dev, 0x0013) & 0x00FF) | 0x1900);
1647 b43_phy_write(dev, 0x0035,
1648 (b43_phy_read(dev, 0x0035) & 0xFFC0) | 0x0064);
1649 b43_phy_write(dev, 0x005D,
1650 (b43_phy_read(dev, 0x005D) & 0xFF80) | 0x000A);
1651 }
1652
1653 if (dev->bad_frames_preempt) {
1654 b43_phy_write(dev, B43_PHY_RADIO_BITFIELD,
1655 b43_phy_read(dev,
1656 B43_PHY_RADIO_BITFIELD) | (1 << 11));
1657 }
1658
1659 if (phy->analog == 1) {
1660 b43_phy_write(dev, 0x0026, 0xCE00);
1661 b43_phy_write(dev, 0x0021, 0x3763);
1662 b43_phy_write(dev, 0x0022, 0x1BC3);
1663 b43_phy_write(dev, 0x0023, 0x06F9);
1664 b43_phy_write(dev, 0x0024, 0x037E);
1665 } else
1666 b43_phy_write(dev, 0x0026, 0xCC00);
1667 b43_phy_write(dev, 0x0030, 0x00C6);
1668 b43_write16(dev, 0x03EC, 0x3F22);
1669
1670 if (phy->analog == 1)
1671 b43_phy_write(dev, 0x0020, 0x3E1C);
1672 else
1673 b43_phy_write(dev, 0x0020, 0x301C);
1674
1675 if (phy->analog == 0)
1676 b43_write16(dev, 0x03E4, 0x3000);
1677
1678 old_channel = phy->channel;
1679 /* Force to channel 7, even if not supported. */
1680 b43_gphy_channel_switch(dev, 7, 0);
1681
1682 if (phy->radio_ver != 0x2050) {
1683 b43_radio_write16(dev, 0x0075, 0x0080);
1684 b43_radio_write16(dev, 0x0079, 0x0081);
1685 }
1686
1687 b43_radio_write16(dev, 0x0050, 0x0020);
1688 b43_radio_write16(dev, 0x0050, 0x0023);
1689
1690 if (phy->radio_ver == 0x2050) {
1691 b43_radio_write16(dev, 0x0050, 0x0020);
1692 b43_radio_write16(dev, 0x005A, 0x0070);
1693 }
1694
1695 b43_radio_write16(dev, 0x005B, 0x007B);
1696 b43_radio_write16(dev, 0x005C, 0x00B0);
1697
1698 b43_radio_write16(dev, 0x007A, b43_radio_read16(dev, 0x007A) | 0x0007);
1699
1700 b43_gphy_channel_switch(dev, old_channel, 0);
1701
1702 b43_phy_write(dev, 0x0014, 0x0080);
1703 b43_phy_write(dev, 0x0032, 0x00CA);
1704 b43_phy_write(dev, 0x002A, 0x88A3);
1705
1706 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
1707
1708 if (phy->radio_ver == 0x2050)
1709 b43_radio_write16(dev, 0x005D, 0x000D);
1710
1711 b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
3651} 1712}
3652 1713
3653static inline u16 freq_r3A_value(u16 frequency) 1714static void b43_phy_initb6(struct b43_wldev *dev)
3654{ 1715{
3655 u16 value; 1716 struct b43_phy *phy = &dev->phy;
1717 struct b43_phy_g *gphy = phy->g;
1718 u16 offset, val;
1719 u8 old_channel;
1720
1721 b43_phy_write(dev, 0x003E, 0x817A);
1722 b43_radio_write16(dev, 0x007A,
1723 (b43_radio_read16(dev, 0x007A) | 0x0058));
1724 if (phy->radio_rev == 4 || phy->radio_rev == 5) {
1725 b43_radio_write16(dev, 0x51, 0x37);
1726 b43_radio_write16(dev, 0x52, 0x70);
1727 b43_radio_write16(dev, 0x53, 0xB3);
1728 b43_radio_write16(dev, 0x54, 0x9B);
1729 b43_radio_write16(dev, 0x5A, 0x88);
1730 b43_radio_write16(dev, 0x5B, 0x88);
1731 b43_radio_write16(dev, 0x5D, 0x88);
1732 b43_radio_write16(dev, 0x5E, 0x88);
1733 b43_radio_write16(dev, 0x7D, 0x88);
1734 b43_hf_write(dev, b43_hf_read(dev)
1735 | B43_HF_TSSIRPSMW);
1736 }
1737 B43_WARN_ON(phy->radio_rev == 6 || phy->radio_rev == 7); /* We had code for these revs here... */
1738 if (phy->radio_rev == 8) {
1739 b43_radio_write16(dev, 0x51, 0);
1740 b43_radio_write16(dev, 0x52, 0x40);
1741 b43_radio_write16(dev, 0x53, 0xB7);
1742 b43_radio_write16(dev, 0x54, 0x98);
1743 b43_radio_write16(dev, 0x5A, 0x88);
1744 b43_radio_write16(dev, 0x5B, 0x6B);
1745 b43_radio_write16(dev, 0x5C, 0x0F);
1746 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) {
1747 b43_radio_write16(dev, 0x5D, 0xFA);
1748 b43_radio_write16(dev, 0x5E, 0xD8);
1749 } else {
1750 b43_radio_write16(dev, 0x5D, 0xF5);
1751 b43_radio_write16(dev, 0x5E, 0xB8);
1752 }
1753 b43_radio_write16(dev, 0x0073, 0x0003);
1754 b43_radio_write16(dev, 0x007D, 0x00A8);
1755 b43_radio_write16(dev, 0x007C, 0x0001);
1756 b43_radio_write16(dev, 0x007E, 0x0008);
1757 }
1758 val = 0x1E1F;
1759 for (offset = 0x0088; offset < 0x0098; offset++) {
1760 b43_phy_write(dev, offset, val);
1761 val -= 0x0202;
1762 }
1763 val = 0x3E3F;
1764 for (offset = 0x0098; offset < 0x00A8; offset++) {
1765 b43_phy_write(dev, offset, val);
1766 val -= 0x0202;
1767 }
1768 val = 0x2120;
1769 for (offset = 0x00A8; offset < 0x00C8; offset++) {
1770 b43_phy_write(dev, offset, (val & 0x3F3F));
1771 val += 0x0202;
1772 }
1773 if (phy->type == B43_PHYTYPE_G) {
1774 b43_radio_write16(dev, 0x007A,
1775 b43_radio_read16(dev, 0x007A) | 0x0020);
1776 b43_radio_write16(dev, 0x0051,
1777 b43_radio_read16(dev, 0x0051) | 0x0004);
1778 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) | 0x0100);
1779 b43_phy_write(dev, 0x042B, b43_phy_read(dev, 0x042B) | 0x2000);
1780 b43_phy_write(dev, 0x5B, 0);
1781 b43_phy_write(dev, 0x5C, 0);
1782 }
1783
1784 old_channel = phy->channel;
1785 if (old_channel >= 8)
1786 b43_gphy_channel_switch(dev, 1, 0);
1787 else
1788 b43_gphy_channel_switch(dev, 13, 0);
1789
1790 b43_radio_write16(dev, 0x0050, 0x0020);
1791 b43_radio_write16(dev, 0x0050, 0x0023);
1792 udelay(40);
1793 if (phy->radio_rev < 6 || phy->radio_rev == 8) {
1794 b43_radio_write16(dev, 0x7C, (b43_radio_read16(dev, 0x7C)
1795 | 0x0002));
1796 b43_radio_write16(dev, 0x50, 0x20);
1797 }
1798 if (phy->radio_rev <= 2) {
1799 b43_radio_write16(dev, 0x7C, 0x20);
1800 b43_radio_write16(dev, 0x5A, 0x70);
1801 b43_radio_write16(dev, 0x5B, 0x7B);
1802 b43_radio_write16(dev, 0x5C, 0xB0);
1803 }
1804 b43_radio_write16(dev, 0x007A,
1805 (b43_radio_read16(dev, 0x007A) & 0x00F8) | 0x0007);
1806
1807 b43_gphy_channel_switch(dev, old_channel, 0);
3656 1808
3657 if (frequency < 5091) 1809 b43_phy_write(dev, 0x0014, 0x0200);
3658 value = 0x0040; 1810 if (phy->radio_rev >= 6)
3659 else if (frequency < 5321) 1811 b43_phy_write(dev, 0x2A, 0x88C2);
3660 value = 0x0000;
3661 else if (frequency < 5806)
3662 value = 0x0080;
3663 else 1812 else
3664 value = 0x0040; 1813 b43_phy_write(dev, 0x2A, 0x8AC0);
1814 b43_phy_write(dev, 0x0038, 0x0668);
1815 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt, gphy->tx_control);
1816 if (phy->radio_rev <= 5) {
1817 b43_phy_write(dev, 0x5D, (b43_phy_read(dev, 0x5D)
1818 & 0xFF80) | 0x0003);
1819 }
1820 if (phy->radio_rev <= 2)
1821 b43_radio_write16(dev, 0x005D, 0x000D);
3665 1822
3666 return value; 1823 if (phy->analog == 4) {
1824 b43_write16(dev, 0x3E4, 9);
1825 b43_phy_write(dev, 0x61, b43_phy_read(dev, 0x61)
1826 & 0x0FFF);
1827 } else {
1828 b43_phy_write(dev, 0x0002, (b43_phy_read(dev, 0x0002) & 0xFFC0)
1829 | 0x0004);
1830 }
1831 if (phy->type == B43_PHYTYPE_B)
1832 B43_WARN_ON(1);
1833 else if (phy->type == B43_PHYTYPE_G)
1834 b43_write16(dev, 0x03E6, 0x0);
3667} 1835}
3668 1836
3669void b43_radio_set_tx_iq(struct b43_wldev *dev) 1837static void b43_calc_loopback_gain(struct b43_wldev *dev)
3670{ 1838{
3671 static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; 1839 struct b43_phy *phy = &dev->phy;
3672 static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; 1840 struct b43_phy_g *gphy = phy->g;
3673 u16 tmp = b43_radio_read16(dev, 0x001E); 1841 u16 backup_phy[16] = { 0 };
3674 int i, j; 1842 u16 backup_radio[3];
1843 u16 backup_bband;
1844 u16 i, j, loop_i_max;
1845 u16 trsw_rx;
1846 u16 loop1_outer_done, loop1_inner_done;
3675 1847
3676 for (i = 0; i < 5; i++) { 1848 backup_phy[0] = b43_phy_read(dev, B43_PHY_CRS0);
3677 for (j = 0; j < 5; j++) { 1849 backup_phy[1] = b43_phy_read(dev, B43_PHY_CCKBBANDCFG);
3678 if (tmp == (data_high[i] << 4 | data_low[j])) { 1850 backup_phy[2] = b43_phy_read(dev, B43_PHY_RFOVER);
3679 b43_phy_write(dev, 0x0069, 1851 backup_phy[3] = b43_phy_read(dev, B43_PHY_RFOVERVAL);
3680 (i - j) << 8 | 0x00C0); 1852 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
3681 return; 1853 backup_phy[4] = b43_phy_read(dev, B43_PHY_ANALOGOVER);
1854 backup_phy[5] = b43_phy_read(dev, B43_PHY_ANALOGOVERVAL);
1855 }
1856 backup_phy[6] = b43_phy_read(dev, B43_PHY_CCK(0x5A));
1857 backup_phy[7] = b43_phy_read(dev, B43_PHY_CCK(0x59));
1858 backup_phy[8] = b43_phy_read(dev, B43_PHY_CCK(0x58));
1859 backup_phy[9] = b43_phy_read(dev, B43_PHY_CCK(0x0A));
1860 backup_phy[10] = b43_phy_read(dev, B43_PHY_CCK(0x03));
1861 backup_phy[11] = b43_phy_read(dev, B43_PHY_LO_MASK);
1862 backup_phy[12] = b43_phy_read(dev, B43_PHY_LO_CTL);
1863 backup_phy[13] = b43_phy_read(dev, B43_PHY_CCK(0x2B));
1864 backup_phy[14] = b43_phy_read(dev, B43_PHY_PGACTL);
1865 backup_phy[15] = b43_phy_read(dev, B43_PHY_LO_LEAKAGE);
1866 backup_bband = gphy->bbatt.att;
1867 backup_radio[0] = b43_radio_read16(dev, 0x52);
1868 backup_radio[1] = b43_radio_read16(dev, 0x43);
1869 backup_radio[2] = b43_radio_read16(dev, 0x7A);
1870
1871 b43_phy_write(dev, B43_PHY_CRS0,
1872 b43_phy_read(dev, B43_PHY_CRS0) & 0x3FFF);
1873 b43_phy_write(dev, B43_PHY_CCKBBANDCFG,
1874 b43_phy_read(dev, B43_PHY_CCKBBANDCFG) | 0x8000);
1875 b43_phy_write(dev, B43_PHY_RFOVER,
1876 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0002);
1877 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1878 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFD);
1879 b43_phy_write(dev, B43_PHY_RFOVER,
1880 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0001);
1881 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1882 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xFFFE);
1883 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1884 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1885 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0001);
1886 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1887 b43_phy_read(dev,
1888 B43_PHY_ANALOGOVERVAL) & 0xFFFE);
1889 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1890 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0002);
1891 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1892 b43_phy_read(dev,
1893 B43_PHY_ANALOGOVERVAL) & 0xFFFD);
1894 }
1895 b43_phy_write(dev, B43_PHY_RFOVER,
1896 b43_phy_read(dev, B43_PHY_RFOVER) | 0x000C);
1897 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1898 b43_phy_read(dev, B43_PHY_RFOVERVAL) | 0x000C);
1899 b43_phy_write(dev, B43_PHY_RFOVER,
1900 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0030);
1901 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1902 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1903 & 0xFFCF) | 0x10);
1904
1905 b43_phy_write(dev, B43_PHY_CCK(0x5A), 0x0780);
1906 b43_phy_write(dev, B43_PHY_CCK(0x59), 0xC810);
1907 b43_phy_write(dev, B43_PHY_CCK(0x58), 0x000D);
1908
1909 b43_phy_write(dev, B43_PHY_CCK(0x0A),
1910 b43_phy_read(dev, B43_PHY_CCK(0x0A)) | 0x2000);
1911 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
1912 b43_phy_write(dev, B43_PHY_ANALOGOVER,
1913 b43_phy_read(dev, B43_PHY_ANALOGOVER) | 0x0004);
1914 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL,
1915 b43_phy_read(dev,
1916 B43_PHY_ANALOGOVERVAL) & 0xFFFB);
1917 }
1918 b43_phy_write(dev, B43_PHY_CCK(0x03),
1919 (b43_phy_read(dev, B43_PHY_CCK(0x03))
1920 & 0xFF9F) | 0x40);
1921
1922 if (phy->radio_rev == 8) {
1923 b43_radio_write16(dev, 0x43, 0x000F);
1924 } else {
1925 b43_radio_write16(dev, 0x52, 0);
1926 b43_radio_write16(dev, 0x43, (b43_radio_read16(dev, 0x43)
1927 & 0xFFF0) | 0x9);
1928 }
1929 b43_gphy_set_baseband_attenuation(dev, 11);
1930
1931 if (phy->rev >= 3)
1932 b43_phy_write(dev, B43_PHY_LO_MASK, 0xC020);
1933 else
1934 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8020);
1935 b43_phy_write(dev, B43_PHY_LO_CTL, 0);
1936
1937 b43_phy_write(dev, B43_PHY_CCK(0x2B),
1938 (b43_phy_read(dev, B43_PHY_CCK(0x2B))
1939 & 0xFFC0) | 0x01);
1940 b43_phy_write(dev, B43_PHY_CCK(0x2B),
1941 (b43_phy_read(dev, B43_PHY_CCK(0x2B))
1942 & 0xC0FF) | 0x800);
1943
1944 b43_phy_write(dev, B43_PHY_RFOVER,
1945 b43_phy_read(dev, B43_PHY_RFOVER) | 0x0100);
1946 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1947 b43_phy_read(dev, B43_PHY_RFOVERVAL) & 0xCFFF);
1948
1949 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) {
1950 if (phy->rev >= 7) {
1951 b43_phy_write(dev, B43_PHY_RFOVER,
1952 b43_phy_read(dev, B43_PHY_RFOVER)
1953 | 0x0800);
1954 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1955 b43_phy_read(dev, B43_PHY_RFOVERVAL)
1956 | 0x8000);
1957 }
1958 }
1959 b43_radio_write16(dev, 0x7A, b43_radio_read16(dev, 0x7A)
1960 & 0x00F7);
1961
1962 j = 0;
1963 loop_i_max = (phy->radio_rev == 8) ? 15 : 9;
1964 for (i = 0; i < loop_i_max; i++) {
1965 for (j = 0; j < 16; j++) {
1966 b43_radio_write16(dev, 0x43, i);
1967 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1968 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1969 & 0xF0FF) | (j << 8));
1970 b43_phy_write(dev, B43_PHY_PGACTL,
1971 (b43_phy_read(dev, B43_PHY_PGACTL)
1972 & 0x0FFF) | 0xA000);
1973 b43_phy_write(dev, B43_PHY_PGACTL,
1974 b43_phy_read(dev, B43_PHY_PGACTL)
1975 | 0xF000);
1976 udelay(20);
1977 if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
1978 goto exit_loop1;
1979 }
1980 }
1981 exit_loop1:
1982 loop1_outer_done = i;
1983 loop1_inner_done = j;
1984 if (j >= 8) {
1985 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1986 b43_phy_read(dev, B43_PHY_RFOVERVAL)
1987 | 0x30);
1988 trsw_rx = 0x1B;
1989 for (j = j - 8; j < 16; j++) {
1990 b43_phy_write(dev, B43_PHY_RFOVERVAL,
1991 (b43_phy_read(dev, B43_PHY_RFOVERVAL)
1992 & 0xF0FF) | (j << 8));
1993 b43_phy_write(dev, B43_PHY_PGACTL,
1994 (b43_phy_read(dev, B43_PHY_PGACTL)
1995 & 0x0FFF) | 0xA000);
1996 b43_phy_write(dev, B43_PHY_PGACTL,
1997 b43_phy_read(dev, B43_PHY_PGACTL)
1998 | 0xF000);
1999 udelay(20);
2000 trsw_rx -= 3;
2001 if (b43_phy_read(dev, B43_PHY_LO_LEAKAGE) >= 0xDFC)
2002 goto exit_loop2;
2003 }
2004 } else
2005 trsw_rx = 0x18;
2006 exit_loop2:
2007
2008 if (phy->rev != 1) { /* Not in specs, but needed to prevent PPC machine check */
2009 b43_phy_write(dev, B43_PHY_ANALOGOVER, backup_phy[4]);
2010 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, backup_phy[5]);
2011 }
2012 b43_phy_write(dev, B43_PHY_CCK(0x5A), backup_phy[6]);
2013 b43_phy_write(dev, B43_PHY_CCK(0x59), backup_phy[7]);
2014 b43_phy_write(dev, B43_PHY_CCK(0x58), backup_phy[8]);
2015 b43_phy_write(dev, B43_PHY_CCK(0x0A), backup_phy[9]);
2016 b43_phy_write(dev, B43_PHY_CCK(0x03), backup_phy[10]);
2017 b43_phy_write(dev, B43_PHY_LO_MASK, backup_phy[11]);
2018 b43_phy_write(dev, B43_PHY_LO_CTL, backup_phy[12]);
2019 b43_phy_write(dev, B43_PHY_CCK(0x2B), backup_phy[13]);
2020 b43_phy_write(dev, B43_PHY_PGACTL, backup_phy[14]);
2021
2022 b43_gphy_set_baseband_attenuation(dev, backup_bband);
2023
2024 b43_radio_write16(dev, 0x52, backup_radio[0]);
2025 b43_radio_write16(dev, 0x43, backup_radio[1]);
2026 b43_radio_write16(dev, 0x7A, backup_radio[2]);
2027
2028 b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2] | 0x0003);
2029 udelay(10);
2030 b43_phy_write(dev, B43_PHY_RFOVER, backup_phy[2]);
2031 b43_phy_write(dev, B43_PHY_RFOVERVAL, backup_phy[3]);
2032 b43_phy_write(dev, B43_PHY_CRS0, backup_phy[0]);
2033 b43_phy_write(dev, B43_PHY_CCKBBANDCFG, backup_phy[1]);
2034
2035 gphy->max_lb_gain =
2036 ((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11;
2037 gphy->trsw_rx_gain = trsw_rx * 2;
2038}
2039
2040static void b43_hardware_pctl_early_init(struct b43_wldev *dev)
2041{
2042 struct b43_phy *phy = &dev->phy;
2043
2044 if (!b43_has_hardware_pctl(dev)) {
2045 b43_phy_write(dev, 0x047A, 0xC111);
2046 return;
2047 }
2048
2049 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036) & 0xFEFF);
2050 b43_phy_write(dev, 0x002F, 0x0202);
2051 b43_phy_write(dev, 0x047C, b43_phy_read(dev, 0x047C) | 0x0002);
2052 b43_phy_write(dev, 0x047A, b43_phy_read(dev, 0x047A) | 0xF000);
2053 if (phy->radio_ver == 0x2050 && phy->radio_rev == 8) {
2054 b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
2055 & 0xFF0F) | 0x0010);
2056 b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
2057 | 0x8000);
2058 b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
2059 & 0xFFC0) | 0x0010);
2060 b43_phy_write(dev, 0x002E, 0xC07F);
2061 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
2062 | 0x0400);
2063 } else {
2064 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
2065 | 0x0200);
2066 b43_phy_write(dev, 0x0036, b43_phy_read(dev, 0x0036)
2067 | 0x0400);
2068 b43_phy_write(dev, 0x005D, b43_phy_read(dev, 0x005D)
2069 & 0x7FFF);
2070 b43_phy_write(dev, 0x004F, b43_phy_read(dev, 0x004F)
2071 & 0xFFFE);
2072 b43_phy_write(dev, 0x004E, (b43_phy_read(dev, 0x004E)
2073 & 0xFFC0) | 0x0010);
2074 b43_phy_write(dev, 0x002E, 0xC07F);
2075 b43_phy_write(dev, 0x047A, (b43_phy_read(dev, 0x047A)
2076 & 0xFF0F) | 0x0010);
2077 }
2078}
2079
2080/* Hardware power control for G-PHY */
2081static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev)
2082{
2083 struct b43_phy *phy = &dev->phy;
2084 struct b43_phy_g *gphy = phy->g;
2085
2086 if (!b43_has_hardware_pctl(dev)) {
2087 /* No hardware power control */
2088 b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_HWPCTL);
2089 return;
2090 }
2091
2092 b43_phy_write(dev, 0x0036, (b43_phy_read(dev, 0x0036) & 0xFFC0)
2093 | (gphy->tgt_idle_tssi - gphy->cur_idle_tssi));
2094 b43_phy_write(dev, 0x0478, (b43_phy_read(dev, 0x0478) & 0xFF00)
2095 | (gphy->tgt_idle_tssi - gphy->cur_idle_tssi));
2096 b43_gphy_tssi_power_lt_init(dev);
2097 b43_gphy_gain_lt_init(dev);
2098 b43_phy_write(dev, 0x0060, b43_phy_read(dev, 0x0060) & 0xFFBF);
2099 b43_phy_write(dev, 0x0014, 0x0000);
2100
2101 B43_WARN_ON(phy->rev < 6);
2102 b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
2103 | 0x0800);
2104 b43_phy_write(dev, 0x0478, b43_phy_read(dev, 0x0478)
2105 & 0xFEFF);
2106 b43_phy_write(dev, 0x0801, b43_phy_read(dev, 0x0801)
2107 & 0xFFBF);
2108
2109 b43_gphy_dc_lt_init(dev, 1);
2110
2111 /* Enable hardware pctl in firmware. */
2112 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
2113}
2114
2115/* Intialize B/G PHY power control */
2116static void b43_phy_init_pctl(struct b43_wldev *dev)
2117{
2118 struct ssb_bus *bus = dev->dev->bus;
2119 struct b43_phy *phy = &dev->phy;
2120 struct b43_phy_g *gphy = phy->g;
2121 struct b43_rfatt old_rfatt;
2122 struct b43_bbatt old_bbatt;
2123 u8 old_tx_control = 0;
2124
2125 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2126
2127 if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) &&
2128 (bus->boardinfo.type == SSB_BOARD_BU4306))
2129 return;
2130
2131 b43_phy_write(dev, 0x0028, 0x8018);
2132
2133 /* This does something with the Analog... */
2134 b43_write16(dev, B43_MMIO_PHY0, b43_read16(dev, B43_MMIO_PHY0)
2135 & 0xFFDF);
2136
2137 if (!phy->gmode)
2138 return;
2139 b43_hardware_pctl_early_init(dev);
2140 if (gphy->cur_idle_tssi == 0) {
2141 if (phy->radio_ver == 0x2050 && phy->analog == 0) {
2142 b43_radio_write16(dev, 0x0076,
2143 (b43_radio_read16(dev, 0x0076)
2144 & 0x00F7) | 0x0084);
2145 } else {
2146 struct b43_rfatt rfatt;
2147 struct b43_bbatt bbatt;
2148
2149 memcpy(&old_rfatt, &gphy->rfatt, sizeof(old_rfatt));
2150 memcpy(&old_bbatt, &gphy->bbatt, sizeof(old_bbatt));
2151 old_tx_control = gphy->tx_control;
2152
2153 bbatt.att = 11;
2154 if (phy->radio_rev == 8) {
2155 rfatt.att = 15;
2156 rfatt.with_padmix = 1;
2157 } else {
2158 rfatt.att = 9;
2159 rfatt.with_padmix = 0;
3682 } 2160 }
2161 b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
2162 }
2163 b43_dummy_transmission(dev);
2164 gphy->cur_idle_tssi = b43_phy_read(dev, B43_PHY_ITSSI);
2165 if (B43_DEBUG) {
2166 /* Current-Idle-TSSI sanity check. */
2167 if (abs(gphy->cur_idle_tssi - gphy->tgt_idle_tssi) >= 20) {
2168 b43dbg(dev->wl,
2169 "!WARNING! Idle-TSSI phy->cur_idle_tssi "
2170 "measuring failed. (cur=%d, tgt=%d). Disabling TX power "
2171 "adjustment.\n", gphy->cur_idle_tssi,
2172 gphy->tgt_idle_tssi);
2173 gphy->cur_idle_tssi = 0;
2174 }
2175 }
2176 if (phy->radio_ver == 0x2050 && phy->analog == 0) {
2177 b43_radio_write16(dev, 0x0076,
2178 b43_radio_read16(dev, 0x0076)
2179 & 0xFF7B);
2180 } else {
2181 b43_set_txpower_g(dev, &old_bbatt,
2182 &old_rfatt, old_tx_control);
3683 } 2183 }
3684 } 2184 }
2185 b43_hardware_pctl_init_gphy(dev);
2186 b43_shm_clear_tssi(dev);
3685} 2187}
3686 2188
3687int b43_radio_selectchannel(struct b43_wldev *dev, 2189static void b43_phy_initg(struct b43_wldev *dev)
3688 u8 channel, int synthetic_pu_workaround)
3689{ 2190{
3690 struct b43_phy *phy = &dev->phy; 2191 struct b43_phy *phy = &dev->phy;
3691 u16 r8, tmp; 2192 struct b43_phy_g *gphy = phy->g;
3692 u16 freq; 2193 u16 tmp;
3693 u16 channelcookie, savedcookie; 2194
3694 int err = 0; 2195 if (phy->rev == 1)
3695 2196 b43_phy_initb5(dev);
3696 if (channel == 0xFF) { 2197 else
3697 switch (phy->type) { 2198 b43_phy_initb6(dev);
3698 case B43_PHYTYPE_A: 2199
3699 channel = B43_DEFAULT_CHANNEL_A; 2200 if (phy->rev >= 2 || phy->gmode)
3700 break; 2201 b43_phy_inita(dev);
3701 case B43_PHYTYPE_B: 2202
3702 case B43_PHYTYPE_G: 2203 if (phy->rev >= 2) {
3703 channel = B43_DEFAULT_CHANNEL_BG; 2204 b43_phy_write(dev, B43_PHY_ANALOGOVER, 0);
3704 break; 2205 b43_phy_write(dev, B43_PHY_ANALOGOVERVAL, 0);
3705 case B43_PHYTYPE_N: 2206 }
3706 //FIXME check if we are on 2.4GHz or 5GHz and set a default channel. 2207 if (phy->rev == 2) {
3707 channel = 1; 2208 b43_phy_write(dev, B43_PHY_RFOVER, 0);
3708 break; 2209 b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
3709 default: 2210 }
3710 B43_WARN_ON(1); 2211 if (phy->rev > 5) {
2212 b43_phy_write(dev, B43_PHY_RFOVER, 0x400);
2213 b43_phy_write(dev, B43_PHY_PGACTL, 0xC0);
2214 }
2215 if (phy->gmode || phy->rev >= 2) {
2216 tmp = b43_phy_read(dev, B43_PHY_VERSION_OFDM);
2217 tmp &= B43_PHYVER_VERSION;
2218 if (tmp == 3 || tmp == 5) {
2219 b43_phy_write(dev, B43_PHY_OFDM(0xC2), 0x1816);
2220 b43_phy_write(dev, B43_PHY_OFDM(0xC3), 0x8006);
3711 } 2221 }
2222 if (tmp == 5) {
2223 b43_phy_write(dev, B43_PHY_OFDM(0xCC),
2224 (b43_phy_read(dev, B43_PHY_OFDM(0xCC))
2225 & 0x00FF) | 0x1F00);
2226 }
2227 }
2228 if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2)
2229 b43_phy_write(dev, B43_PHY_OFDM(0x7E), 0x78);
2230 if (phy->radio_rev == 8) {
2231 b43_phy_write(dev, B43_PHY_EXTG(0x01),
2232 b43_phy_read(dev, B43_PHY_EXTG(0x01))
2233 | 0x80);
2234 b43_phy_write(dev, B43_PHY_OFDM(0x3E),
2235 b43_phy_read(dev, B43_PHY_OFDM(0x3E))
2236 | 0x4);
2237 }
2238 if (has_loopback_gain(phy))
2239 b43_calc_loopback_gain(dev);
2240
2241 if (phy->radio_rev != 8) {
2242 if (gphy->initval == 0xFFFF)
2243 gphy->initval = b43_radio_init2050(dev);
2244 else
2245 b43_radio_write16(dev, 0x0078, gphy->initval);
2246 }
2247 b43_lo_g_init(dev);
2248 if (has_tx_magnification(phy)) {
2249 b43_radio_write16(dev, 0x52,
2250 (b43_radio_read16(dev, 0x52) & 0xFF00)
2251 | gphy->lo_control->tx_bias | gphy->
2252 lo_control->tx_magn);
2253 } else {
2254 b43_radio_write16(dev, 0x52,
2255 (b43_radio_read16(dev, 0x52) & 0xFFF0)
2256 | gphy->lo_control->tx_bias);
2257 }
2258 if (phy->rev >= 6) {
2259 b43_phy_write(dev, B43_PHY_CCK(0x36),
2260 (b43_phy_read(dev, B43_PHY_CCK(0x36))
2261 & 0x0FFF) | (gphy->lo_control->
2262 tx_bias << 12));
2263 }
2264 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
2265 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075);
2266 else
2267 b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F);
2268 if (phy->rev < 2)
2269 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x101);
2270 else
2271 b43_phy_write(dev, B43_PHY_CCK(0x2F), 0x202);
2272 if (phy->gmode || phy->rev >= 2) {
2273 b43_lo_g_adjust(dev);
2274 b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078);
3712 } 2275 }
3713 2276
3714 /* First we set the channel radio code to prevent the 2277 if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) {
3715 * firmware from sending ghost packets. 2278 /* The specs state to update the NRSSI LT with
3716 */ 2279 * the value 0x7FFFFFFF here. I think that is some weird
3717 channelcookie = channel; 2280 * compiler optimization in the original driver.
3718 if (0 /*FIXME on 5Ghz */) 2281 * Essentially, what we do here is resetting all NRSSI LT
3719 channelcookie |= 0x100; 2282 * entries to -32 (see the clamp_val() in nrssi_hw_update())
3720 //FIXME set 40Mhz flag if required 2283 */
3721 savedcookie = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN); 2284 b43_nrssi_hw_update(dev, 0xFFFF); //FIXME?
3722 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_CHAN, channelcookie); 2285 b43_calc_nrssi_threshold(dev);
3723 2286 } else if (phy->gmode || phy->rev >= 2) {
3724 switch (phy->type) { 2287 if (gphy->nrssi[0] == -1000) {
3725 case B43_PHYTYPE_A: 2288 B43_WARN_ON(gphy->nrssi[1] != -1000);
3726 if (channel > 200) { 2289 b43_calc_nrssi_slope(dev);
3727 err = -EINVAL; 2290 } else
3728 goto out; 2291 b43_calc_nrssi_threshold(dev);
2292 }
2293 if (phy->radio_rev == 8)
2294 b43_phy_write(dev, B43_PHY_EXTG(0x05), 0x3230);
2295 b43_phy_init_pctl(dev);
2296 /* FIXME: The spec says in the following if, the 0 should be replaced
2297 'if OFDM may not be used in the current locale'
2298 but OFDM is legal everywhere */
2299 if ((dev->dev->bus->chip_id == 0x4306
2300 && dev->dev->bus->chip_package == 2) || 0) {
2301 b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0)
2302 & 0xBFFF);
2303 b43_phy_write(dev, B43_PHY_OFDM(0xC3),
2304 b43_phy_read(dev, B43_PHY_OFDM(0xC3))
2305 & 0x7FFF);
2306 }
2307}
2308
2309void b43_gphy_channel_switch(struct b43_wldev *dev,
2310 unsigned int channel,
2311 bool synthetic_pu_workaround)
2312{
2313 if (synthetic_pu_workaround)
2314 b43_synth_pu_workaround(dev, channel);
2315
2316 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel));
2317
2318 if (channel == 14) {
2319 if (dev->dev->bus->sprom.country_code ==
2320 SSB_SPROM1CCODE_JAPAN)
2321 b43_hf_write(dev,
2322 b43_hf_read(dev) & ~B43_HF_ACPR);
2323 else
2324 b43_hf_write(dev,
2325 b43_hf_read(dev) | B43_HF_ACPR);
2326 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2327 b43_read16(dev, B43_MMIO_CHANNEL_EXT)
2328 | (1 << 11));
2329 } else {
2330 b43_write16(dev, B43_MMIO_CHANNEL_EXT,
2331 b43_read16(dev, B43_MMIO_CHANNEL_EXT)
2332 & 0xF7BF);
2333 }
2334}
2335
2336static void default_baseband_attenuation(struct b43_wldev *dev,
2337 struct b43_bbatt *bb)
2338{
2339 struct b43_phy *phy = &dev->phy;
2340
2341 if (phy->radio_ver == 0x2050 && phy->radio_rev < 6)
2342 bb->att = 0;
2343 else
2344 bb->att = 2;
2345}
2346
2347static void default_radio_attenuation(struct b43_wldev *dev,
2348 struct b43_rfatt *rf)
2349{
2350 struct ssb_bus *bus = dev->dev->bus;
2351 struct b43_phy *phy = &dev->phy;
2352
2353 rf->with_padmix = 0;
2354
2355 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM &&
2356 bus->boardinfo.type == SSB_BOARD_BCM4309G) {
2357 if (bus->boardinfo.rev < 0x43) {
2358 rf->att = 2;
2359 return;
2360 } else if (bus->boardinfo.rev < 0x51) {
2361 rf->att = 3;
2362 return;
3729 } 2363 }
3730 freq = channel2freq_a(channel); 2364 }
3731 2365
3732 r8 = b43_radio_read16(dev, 0x0008); 2366 if (phy->type == B43_PHYTYPE_A) {
3733 b43_write16(dev, 0x03F0, freq); 2367 rf->att = 0x60;
3734 b43_radio_write16(dev, 0x0008, r8); 2368 return;
3735 2369 }
3736 //TODO: write max channel TX power? to Radio 0x2D 2370
3737 tmp = b43_radio_read16(dev, 0x002E); 2371 switch (phy->radio_ver) {
3738 tmp &= 0x0080; 2372 case 0x2053:
3739 //TODO: OR tmp with the Power out estimation for this channel? 2373 switch (phy->radio_rev) {
3740 b43_radio_write16(dev, 0x002E, tmp); 2374 case 1:
3741 2375 rf->att = 6;
3742 if (freq >= 4920 && freq <= 5500) { 2376 return;
3743 /*
3744 * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F;
3745 * = (freq * 0.025862069
3746 */
3747 r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */
3748 } 2377 }
3749 b43_radio_write16(dev, 0x0007, (r8 << 4) | r8);
3750 b43_radio_write16(dev, 0x0020, (r8 << 4) | r8);
3751 b43_radio_write16(dev, 0x0021, (r8 << 4) | r8);
3752 b43_radio_write16(dev, 0x0022, (b43_radio_read16(dev, 0x0022)
3753 & 0x000F) | (r8 << 4));
3754 b43_radio_write16(dev, 0x002A, (r8 << 4));
3755 b43_radio_write16(dev, 0x002B, (r8 << 4));
3756 b43_radio_write16(dev, 0x0008, (b43_radio_read16(dev, 0x0008)
3757 & 0x00F0) | (r8 << 4));
3758 b43_radio_write16(dev, 0x0029, (b43_radio_read16(dev, 0x0029)
3759 & 0xFF0F) | 0x00B0);
3760 b43_radio_write16(dev, 0x0035, 0x00AA);
3761 b43_radio_write16(dev, 0x0036, 0x0085);
3762 b43_radio_write16(dev, 0x003A, (b43_radio_read16(dev, 0x003A)
3763 & 0xFF20) |
3764 freq_r3A_value(freq));
3765 b43_radio_write16(dev, 0x003D,
3766 b43_radio_read16(dev, 0x003D) & 0x00FF);
3767 b43_radio_write16(dev, 0x0081, (b43_radio_read16(dev, 0x0081)
3768 & 0xFF7F) | 0x0080);
3769 b43_radio_write16(dev, 0x0035,
3770 b43_radio_read16(dev, 0x0035) & 0xFFEF);
3771 b43_radio_write16(dev, 0x0035, (b43_radio_read16(dev, 0x0035)
3772 & 0xFFEF) | 0x0010);
3773 b43_radio_set_tx_iq(dev);
3774 //TODO: TSSI2dbm workaround
3775 b43_phy_xmitpower(dev); //FIXME correct?
3776 break; 2378 break;
3777 case B43_PHYTYPE_G: 2379 case 0x2050:
3778 if ((channel < 1) || (channel > 14)) { 2380 switch (phy->radio_rev) {
3779 err = -EINVAL; 2381 case 0:
3780 goto out; 2382 rf->att = 5;
2383 return;
2384 case 1:
2385 if (phy->type == B43_PHYTYPE_G) {
2386 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
2387 && bus->boardinfo.type == SSB_BOARD_BCM4309G
2388 && bus->boardinfo.rev >= 30)
2389 rf->att = 3;
2390 else if (bus->boardinfo.vendor ==
2391 SSB_BOARDVENDOR_BCM
2392 && bus->boardinfo.type ==
2393 SSB_BOARD_BU4306)
2394 rf->att = 3;
2395 else
2396 rf->att = 1;
2397 } else {
2398 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
2399 && bus->boardinfo.type == SSB_BOARD_BCM4309G
2400 && bus->boardinfo.rev >= 30)
2401 rf->att = 7;
2402 else
2403 rf->att = 6;
2404 }
2405 return;
2406 case 2:
2407 if (phy->type == B43_PHYTYPE_G) {
2408 if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM
2409 && bus->boardinfo.type == SSB_BOARD_BCM4309G
2410 && bus->boardinfo.rev >= 30)
2411 rf->att = 3;
2412 else if (bus->boardinfo.vendor ==
2413 SSB_BOARDVENDOR_BCM
2414 && bus->boardinfo.type ==
2415 SSB_BOARD_BU4306)
2416 rf->att = 5;
2417 else if (bus->chip_id == 0x4320)
2418 rf->att = 4;
2419 else
2420 rf->att = 3;
2421 } else
2422 rf->att = 6;
2423 return;
2424 case 3:
2425 rf->att = 5;
2426 return;
2427 case 4:
2428 case 5:
2429 rf->att = 1;
2430 return;
2431 case 6:
2432 case 7:
2433 rf->att = 5;
2434 return;
2435 case 8:
2436 rf->att = 0xA;
2437 rf->with_padmix = 1;
2438 return;
2439 case 9:
2440 default:
2441 rf->att = 5;
2442 return;
3781 } 2443 }
2444 }
2445 rf->att = 5;
2446}
3782 2447
3783 if (synthetic_pu_workaround) 2448static u16 default_tx_control(struct b43_wldev *dev)
3784 b43_synth_pu_workaround(dev, channel); 2449{
2450 struct b43_phy *phy = &dev->phy;
3785 2451
3786 b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); 2452 if (phy->radio_ver != 0x2050)
2453 return 0;
2454 if (phy->radio_rev == 1)
2455 return B43_TXCTL_PA2DB | B43_TXCTL_TXMIX;
2456 if (phy->radio_rev < 6)
2457 return B43_TXCTL_PA2DB;
2458 if (phy->radio_rev == 8)
2459 return B43_TXCTL_TXMIX;
2460 return 0;
2461}
3787 2462
3788 if (channel == 14) { 2463static u8 b43_gphy_aci_detect(struct b43_wldev *dev, u8 channel)
3789 if (dev->dev->bus->sprom.country_code == 2464{
3790 SSB_SPROM1CCODE_JAPAN) 2465 struct b43_phy *phy = &dev->phy;
3791 b43_hf_write(dev, 2466 struct b43_phy_g *gphy = phy->g;
3792 b43_hf_read(dev) & ~B43_HF_ACPR); 2467 u8 ret = 0;
3793 else 2468 u16 saved, rssi, temp;
3794 b43_hf_write(dev, 2469 int i, j = 0;
3795 b43_hf_read(dev) | B43_HF_ACPR); 2470
3796 b43_write16(dev, B43_MMIO_CHANNEL_EXT, 2471 saved = b43_phy_read(dev, 0x0403);
3797 b43_read16(dev, B43_MMIO_CHANNEL_EXT) 2472 b43_switch_channel(dev, channel);
3798 | (1 << 11)); 2473 b43_phy_write(dev, 0x0403, (saved & 0xFFF8) | 5);
3799 } else { 2474 if (gphy->aci_hw_rssi)
3800 b43_write16(dev, B43_MMIO_CHANNEL_EXT, 2475 rssi = b43_phy_read(dev, 0x048A) & 0x3F;
3801 b43_read16(dev, B43_MMIO_CHANNEL_EXT) 2476 else
3802 & 0xF7BF); 2477 rssi = saved & 0x3F;
2478 /* clamp temp to signed 5bit */
2479 if (rssi > 32)
2480 rssi -= 64;
2481 for (i = 0; i < 100; i++) {
2482 temp = (b43_phy_read(dev, 0x047F) >> 8) & 0x3F;
2483 if (temp > 32)
2484 temp -= 64;
2485 if (temp < rssi)
2486 j++;
2487 if (j >= 20)
2488 ret = 1;
2489 }
2490 b43_phy_write(dev, 0x0403, saved);
2491
2492 return ret;
2493}
2494
2495static u8 b43_gphy_aci_scan(struct b43_wldev *dev)
2496{
2497 struct b43_phy *phy = &dev->phy;
2498 u8 ret[13];
2499 unsigned int channel = phy->channel;
2500 unsigned int i, j, start, end;
2501
2502 if (!((phy->type == B43_PHYTYPE_G) && (phy->rev > 0)))
2503 return 0;
2504
2505 b43_phy_lock(dev);
2506 b43_radio_lock(dev);
2507 b43_phy_write(dev, 0x0802, b43_phy_read(dev, 0x0802) & 0xFFFC);
2508 b43_phy_write(dev, B43_PHY_G_CRS,
2509 b43_phy_read(dev, B43_PHY_G_CRS) & 0x7FFF);
2510 b43_set_all_gains(dev, 3, 8, 1);
2511
2512 start = (channel - 5 > 0) ? channel - 5 : 1;
2513 end = (channel + 5 < 14) ? channel + 5 : 13;
2514
2515 for (i = start; i <= end; i++) {
2516 if (abs(channel - i) > 2)
2517 ret[i - 1] = b43_gphy_aci_detect(dev, i);
2518 }
2519 b43_switch_channel(dev, channel);
2520 b43_phy_write(dev, 0x0802,
2521 (b43_phy_read(dev, 0x0802) & 0xFFFC) | 0x0003);
2522 b43_phy_write(dev, 0x0403, b43_phy_read(dev, 0x0403) & 0xFFF8);
2523 b43_phy_write(dev, B43_PHY_G_CRS,
2524 b43_phy_read(dev, B43_PHY_G_CRS) | 0x8000);
2525 b43_set_original_gains(dev);
2526 for (i = 0; i < 13; i++) {
2527 if (!ret[i])
2528 continue;
2529 end = (i + 5 < 13) ? i + 5 : 13;
2530 for (j = i; j < end; j++)
2531 ret[j] = 1;
2532 }
2533 b43_radio_unlock(dev);
2534 b43_phy_unlock(dev);
2535
2536 return ret[channel - 1];
2537}
2538
2539static s32 b43_tssi2dbm_ad(s32 num, s32 den)
2540{
2541 if (num < 0)
2542 return num / den;
2543 else
2544 return (num + den / 2) / den;
2545}
2546
2547static s8 b43_tssi2dbm_entry(s8 entry[], u8 index,
2548 s16 pab0, s16 pab1, s16 pab2)
2549{
2550 s32 m1, m2, f = 256, q, delta;
2551 s8 i = 0;
2552
2553 m1 = b43_tssi2dbm_ad(16 * pab0 + index * pab1, 32);
2554 m2 = max(b43_tssi2dbm_ad(32768 + index * pab2, 256), 1);
2555 do {
2556 if (i > 15)
2557 return -EINVAL;
2558 q = b43_tssi2dbm_ad(f * 4096 -
2559 b43_tssi2dbm_ad(m2 * f, 16) * f, 2048);
2560 delta = abs(q - f);
2561 f = q;
2562 i++;
2563 } while (delta >= 2);
2564 entry[index] = clamp_val(b43_tssi2dbm_ad(m1 * f, 8192), -127, 128);
2565 return 0;
2566}
2567
2568u8 * b43_generate_dyn_tssi2dbm_tab(struct b43_wldev *dev,
2569 s16 pab0, s16 pab1, s16 pab2)
2570{
2571 unsigned int i;
2572 u8 *tab;
2573 int err;
2574
2575 tab = kmalloc(64, GFP_KERNEL);
2576 if (!tab) {
2577 b43err(dev->wl, "Could not allocate memory "
2578 "for tssi2dbm table\n");
2579 return NULL;
2580 }
2581 for (i = 0; i < 64; i++) {
2582 err = b43_tssi2dbm_entry(tab, i, pab0, pab1, pab2);
2583 if (err) {
2584 b43err(dev->wl, "Could not generate "
2585 "tssi2dBm table\n");
2586 kfree(tab);
2587 return NULL;
3803 } 2588 }
3804 break;
3805 case B43_PHYTYPE_N:
3806 err = b43_nphy_selectchannel(dev, channel);
3807 if (err)
3808 goto out;
3809 break;
3810 default:
3811 B43_WARN_ON(1);
3812 } 2589 }
3813 2590
3814 phy->channel = channel; 2591 return tab;
3815 /* Wait for the radio to tune to the channel and stabilize. */ 2592}
3816 msleep(8); 2593
3817out: 2594/* Initialise the TSSI->dBm lookup table */
3818 if (err) { 2595static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
3819 b43_shm_write16(dev, B43_SHM_SHARED, 2596{
3820 B43_SHM_SH_CHAN, savedcookie); 2597 struct b43_phy *phy = &dev->phy;
2598 struct b43_phy_g *gphy = phy->g;
2599 s16 pab0, pab1, pab2;
2600
2601 pab0 = (s16) (dev->dev->bus->sprom.pa0b0);
2602 pab1 = (s16) (dev->dev->bus->sprom.pa0b1);
2603 pab2 = (s16) (dev->dev->bus->sprom.pa0b2);
2604
2605 B43_WARN_ON((dev->dev->bus->chip_id == 0x4301) &&
2606 (phy->radio_ver != 0x2050)); /* Not supported anymore */
2607
2608 gphy->dyn_tssi_tbl = 0;
2609
2610 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
2611 pab0 != -1 && pab1 != -1 && pab2 != -1) {
2612 /* The pabX values are set in SPROM. Use them. */
2613 if ((s8) dev->dev->bus->sprom.itssi_bg != 0 &&
2614 (s8) dev->dev->bus->sprom.itssi_bg != -1) {
2615 gphy->tgt_idle_tssi =
2616 (s8) (dev->dev->bus->sprom.itssi_bg);
2617 } else
2618 gphy->tgt_idle_tssi = 62;
2619 gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
2620 pab1, pab2);
2621 if (!gphy->tssi2dbm)
2622 return -ENOMEM;
2623 gphy->dyn_tssi_tbl = 1;
2624 } else {
2625 /* pabX values not set in SPROM. */
2626 gphy->tgt_idle_tssi = 52;
2627 gphy->tssi2dbm = b43_tssi2dbm_g_table;
3821 } 2628 }
2629
2630 return 0;
2631}
2632
2633static int b43_gphy_op_allocate(struct b43_wldev *dev)
2634{
2635 struct b43_phy_g *gphy;
2636 struct b43_txpower_lo_control *lo;
2637 int err;
2638
2639 gphy = kzalloc(sizeof(*gphy), GFP_KERNEL);
2640 if (!gphy) {
2641 err = -ENOMEM;
2642 goto error;
2643 }
2644 dev->phy.g = gphy;
2645
2646 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
2647 if (!lo) {
2648 err = -ENOMEM;
2649 goto err_free_gphy;
2650 }
2651 gphy->lo_control = lo;
2652
2653 err = b43_gphy_init_tssi2dbm_table(dev);
2654 if (err)
2655 goto err_free_lo;
2656
2657 return 0;
2658
2659err_free_lo:
2660 kfree(lo);
2661err_free_gphy:
2662 kfree(gphy);
2663error:
3822 return err; 2664 return err;
3823} 2665}
3824 2666
3825void b43_radio_turn_on(struct b43_wldev *dev) 2667static void b43_gphy_op_prepare_structs(struct b43_wldev *dev)
3826{ 2668{
3827 struct b43_phy *phy = &dev->phy; 2669 struct b43_phy *phy = &dev->phy;
3828 int err; 2670 struct b43_phy_g *gphy = phy->g;
3829 u8 channel; 2671 const void *tssi2dbm;
2672 int tgt_idle_tssi;
2673 struct b43_txpower_lo_control *lo;
2674 unsigned int i;
2675
2676 /* tssi2dbm table is constant, so it is initialized at alloc time.
2677 * Save a copy of the pointer. */
2678 tssi2dbm = gphy->tssi2dbm;
2679 tgt_idle_tssi = gphy->tgt_idle_tssi;
2680 /* Save the LO pointer. */
2681 lo = gphy->lo_control;
2682
2683 /* Zero out the whole PHY structure. */
2684 memset(gphy, 0, sizeof(*gphy));
2685
2686 /* Restore pointers. */
2687 gphy->tssi2dbm = tssi2dbm;
2688 gphy->tgt_idle_tssi = tgt_idle_tssi;
2689 gphy->lo_control = lo;
2690
2691 memset(gphy->minlowsig, 0xFF, sizeof(gphy->minlowsig));
2692
2693 /* NRSSI */
2694 for (i = 0; i < ARRAY_SIZE(gphy->nrssi); i++)
2695 gphy->nrssi[i] = -1000;
2696 for (i = 0; i < ARRAY_SIZE(gphy->nrssi_lt); i++)
2697 gphy->nrssi_lt[i] = i;
2698
2699 gphy->lofcal = 0xFFFF;
2700 gphy->initval = 0xFFFF;
2701
2702 gphy->interfmode = B43_INTERFMODE_NONE;
2703
2704 /* OFDM-table address caching. */
2705 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_UNKNOWN;
2706
2707 gphy->average_tssi = 0xFF;
2708
2709 /* Local Osciallator structure */
2710 lo->tx_bias = 0xFF;
2711 INIT_LIST_HEAD(&lo->calib_list);
2712}
2713
2714static void b43_gphy_op_free(struct b43_wldev *dev)
2715{
2716 struct b43_phy *phy = &dev->phy;
2717 struct b43_phy_g *gphy = phy->g;
2718
2719 kfree(gphy->lo_control);
2720
2721 if (gphy->dyn_tssi_tbl)
2722 kfree(gphy->tssi2dbm);
2723 gphy->dyn_tssi_tbl = 0;
2724 gphy->tssi2dbm = NULL;
2725
2726 kfree(gphy);
2727 dev->phy.g = NULL;
2728}
2729
2730static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
2731{
2732 struct b43_phy *phy = &dev->phy;
2733 struct b43_phy_g *gphy = phy->g;
2734 struct b43_txpower_lo_control *lo = gphy->lo_control;
2735
2736 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2737
2738 default_baseband_attenuation(dev, &gphy->bbatt);
2739 default_radio_attenuation(dev, &gphy->rfatt);
2740 gphy->tx_control = (default_tx_control(dev) << 4);
2741 generate_rfatt_list(dev, &lo->rfatt_list);
2742 generate_bbatt_list(dev, &lo->bbatt_list);
2743
2744 /* Commit previous writes */
2745 b43_read32(dev, B43_MMIO_MACCTL);
2746
2747 if (phy->rev == 1) {
2748 /* Workaround: Temporarly disable gmode through the early init
2749 * phase, as the gmode stuff is not needed for phy rev 1 */
2750 phy->gmode = 0;
2751 b43_wireless_core_reset(dev, 0);
2752 b43_phy_initg(dev);
2753 phy->gmode = 1;
2754 b43_wireless_core_reset(dev, B43_TMSLOW_GMODE);
2755 }
2756
2757 return 0;
2758}
2759
2760static int b43_gphy_op_init(struct b43_wldev *dev)
2761{
2762 b43_phy_initg(dev);
2763
2764 return 0;
2765}
2766
2767static void b43_gphy_op_exit(struct b43_wldev *dev)
2768{
2769 b43_lo_g_cleanup(dev);
2770}
2771
2772static u16 b43_gphy_op_read(struct b43_wldev *dev, u16 reg)
2773{
2774 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
2775 return b43_read16(dev, B43_MMIO_PHY_DATA);
2776}
2777
2778static void b43_gphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
2779{
2780 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
2781 b43_write16(dev, B43_MMIO_PHY_DATA, value);
2782}
2783
2784static u16 b43_gphy_op_radio_read(struct b43_wldev *dev, u16 reg)
2785{
2786 /* Register 1 is a 32-bit register. */
2787 B43_WARN_ON(reg == 1);
2788 /* G-PHY needs 0x80 for read access. */
2789 reg |= 0x80;
2790
2791 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
2792 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
2793}
2794
2795static void b43_gphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
2796{
2797 /* Register 1 is a 32-bit register. */
2798 B43_WARN_ON(reg == 1);
2799
2800 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
2801 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
2802}
2803
2804static bool b43_gphy_op_supports_hwpctl(struct b43_wldev *dev)
2805{
2806 return (dev->phy.rev >= 6);
2807}
2808
2809static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
2810 enum rfkill_state state)
2811{
2812 struct b43_phy *phy = &dev->phy;
2813 struct b43_phy_g *gphy = phy->g;
2814 unsigned int channel;
3830 2815
3831 might_sleep(); 2816 might_sleep();
3832 2817
3833 if (phy->radio_on) 2818 if (state == RFKILL_STATE_UNBLOCKED) {
3834 return; 2819 /* Turn radio ON */
2820 if (phy->radio_on)
2821 return;
3835 2822
3836 switch (phy->type) {
3837 case B43_PHYTYPE_A:
3838 b43_radio_write16(dev, 0x0004, 0x00C0);
3839 b43_radio_write16(dev, 0x0005, 0x0008);
3840 b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) & 0xFFF7);
3841 b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) & 0xFFF7);
3842 b43_radio_init2060(dev);
3843 break;
3844 case B43_PHYTYPE_B:
3845 case B43_PHYTYPE_G:
3846 b43_phy_write(dev, 0x0015, 0x8000); 2823 b43_phy_write(dev, 0x0015, 0x8000);
3847 b43_phy_write(dev, 0x0015, 0xCC00); 2824 b43_phy_write(dev, 0x0015, 0xCC00);
3848 b43_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000)); 2825 b43_phy_write(dev, 0x0015, (phy->gmode ? 0x00C0 : 0x0000));
3849 if (phy->radio_off_context.valid) { 2826 if (gphy->radio_off_context.valid) {
3850 /* Restore the RFover values. */ 2827 /* Restore the RFover values. */
3851 b43_phy_write(dev, B43_PHY_RFOVER, 2828 b43_phy_write(dev, B43_PHY_RFOVER,
3852 phy->radio_off_context.rfover); 2829 gphy->radio_off_context.rfover);
3853 b43_phy_write(dev, B43_PHY_RFOVERVAL, 2830 b43_phy_write(dev, B43_PHY_RFOVERVAL,
3854 phy->radio_off_context.rfoverval); 2831 gphy->radio_off_context.rfoverval);
3855 phy->radio_off_context.valid = 0; 2832 gphy->radio_off_context.valid = 0;
3856 } 2833 }
3857 channel = phy->channel; 2834 channel = phy->channel;
3858 err = b43_radio_selectchannel(dev, B43_DEFAULT_CHANNEL_BG, 1); 2835 b43_gphy_channel_switch(dev, 6, 1);
3859 err |= b43_radio_selectchannel(dev, channel, 0); 2836 b43_gphy_channel_switch(dev, channel, 0);
3860 B43_WARN_ON(err); 2837 } else {
3861 break; 2838 /* Turn radio OFF */
3862 case B43_PHYTYPE_N: 2839 u16 rfover, rfoverval;
3863 b43_nphy_radio_turn_on(dev); 2840
3864 break; 2841 rfover = b43_phy_read(dev, B43_PHY_RFOVER);
3865 default: 2842 rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
3866 B43_WARN_ON(1); 2843 gphy->radio_off_context.rfover = rfover;
2844 gphy->radio_off_context.rfoverval = rfoverval;
2845 gphy->radio_off_context.valid = 1;
2846 b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
2847 b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
3867 } 2848 }
3868 phy->radio_on = 1;
3869} 2849}
3870 2850
3871void b43_radio_turn_off(struct b43_wldev *dev, bool force) 2851static int b43_gphy_op_switch_channel(struct b43_wldev *dev,
2852 unsigned int new_channel)
2853{
2854 if ((new_channel < 1) || (new_channel > 14))
2855 return -EINVAL;
2856 b43_gphy_channel_switch(dev, new_channel, 0);
2857
2858 return 0;
2859}
2860
2861static unsigned int b43_gphy_op_get_default_chan(struct b43_wldev *dev)
2862{
2863 return 1; /* Default to channel 1 */
2864}
2865
2866static void b43_gphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
3872{ 2867{
3873 struct b43_phy *phy = &dev->phy; 2868 struct b43_phy *phy = &dev->phy;
2869 u64 hf;
2870 u16 tmp;
2871 int autodiv = 0;
3874 2872
3875 if (!phy->radio_on && !force) 2873 if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1)
3876 return; 2874 autodiv = 1;
2875
2876 hf = b43_hf_read(dev);
2877 hf &= ~B43_HF_ANTDIVHELP;
2878 b43_hf_write(dev, hf);
2879
2880 tmp = b43_phy_read(dev, B43_PHY_BBANDCFG);
2881 tmp &= ~B43_PHY_BBANDCFG_RXANT;
2882 tmp |= (autodiv ? B43_ANTENNA_AUTO0 : antenna)
2883 << B43_PHY_BBANDCFG_RXANT_SHIFT;
2884 b43_phy_write(dev, B43_PHY_BBANDCFG, tmp);
2885
2886 if (autodiv) {
2887 tmp = b43_phy_read(dev, B43_PHY_ANTDWELL);
2888 if (antenna == B43_ANTENNA_AUTO0)
2889 tmp &= ~B43_PHY_ANTDWELL_AUTODIV1;
2890 else
2891 tmp |= B43_PHY_ANTDWELL_AUTODIV1;
2892 b43_phy_write(dev, B43_PHY_ANTDWELL, tmp);
2893 }
2894 tmp = b43_phy_read(dev, B43_PHY_ANTWRSETT);
2895 if (autodiv)
2896 tmp |= B43_PHY_ANTWRSETT_ARXDIV;
2897 else
2898 tmp &= ~B43_PHY_ANTWRSETT_ARXDIV;
2899 b43_phy_write(dev, B43_PHY_ANTWRSETT, tmp);
2900 if (phy->rev >= 2) {
2901 tmp = b43_phy_read(dev, B43_PHY_OFDM61);
2902 tmp |= B43_PHY_OFDM61_10;
2903 b43_phy_write(dev, B43_PHY_OFDM61, tmp);
2904
2905 tmp =
2906 b43_phy_read(dev, B43_PHY_DIVSRCHGAINBACK);
2907 tmp = (tmp & 0xFF00) | 0x15;
2908 b43_phy_write(dev, B43_PHY_DIVSRCHGAINBACK,
2909 tmp);
2910
2911 if (phy->rev == 2) {
2912 b43_phy_write(dev, B43_PHY_ADIVRELATED,
2913 8);
2914 } else {
2915 tmp =
2916 b43_phy_read(dev,
2917 B43_PHY_ADIVRELATED);
2918 tmp = (tmp & 0xFF00) | 8;
2919 b43_phy_write(dev, B43_PHY_ADIVRELATED,
2920 tmp);
2921 }
2922 }
2923 if (phy->rev >= 6)
2924 b43_phy_write(dev, B43_PHY_OFDM9B, 0xDC);
2925
2926 hf |= B43_HF_ANTDIVHELP;
2927 b43_hf_write(dev, hf);
2928}
3877 2929
3878 switch (phy->type) { 2930static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
3879 case B43_PHYTYPE_N: 2931 enum b43_interference_mitigation mode)
3880 b43_nphy_radio_turn_off(dev); 2932{
2933 struct b43_phy *phy = &dev->phy;
2934 struct b43_phy_g *gphy = phy->g;
2935 int currentmode;
2936
2937 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
2938 if ((phy->rev == 0) || (!phy->gmode))
2939 return -ENODEV;
2940
2941 gphy->aci_wlan_automatic = 0;
2942 switch (mode) {
2943 case B43_INTERFMODE_AUTOWLAN:
2944 gphy->aci_wlan_automatic = 1;
2945 if (gphy->aci_enable)
2946 mode = B43_INTERFMODE_MANUALWLAN;
2947 else
2948 mode = B43_INTERFMODE_NONE;
3881 break; 2949 break;
3882 case B43_PHYTYPE_A: 2950 case B43_INTERFMODE_NONE:
3883 b43_radio_write16(dev, 0x0004, 0x00FF); 2951 case B43_INTERFMODE_NONWLAN:
3884 b43_radio_write16(dev, 0x0005, 0x00FB); 2952 case B43_INTERFMODE_MANUALWLAN:
3885 b43_phy_write(dev, 0x0010, b43_phy_read(dev, 0x0010) | 0x0008);
3886 b43_phy_write(dev, 0x0011, b43_phy_read(dev, 0x0011) | 0x0008);
3887 break; 2953 break;
3888 case B43_PHYTYPE_G: { 2954 default:
3889 u16 rfover, rfoverval; 2955 return -EINVAL;
2956 }
3890 2957
3891 rfover = b43_phy_read(dev, B43_PHY_RFOVER); 2958 currentmode = gphy->interfmode;
3892 rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL); 2959 if (currentmode == mode)
3893 if (!force) { 2960 return 0;
3894 phy->radio_off_context.rfover = rfover; 2961 if (currentmode != B43_INTERFMODE_NONE)
3895 phy->radio_off_context.rfoverval = rfoverval; 2962 b43_radio_interference_mitigation_disable(dev, currentmode);
3896 phy->radio_off_context.valid = 1; 2963
2964 if (mode == B43_INTERFMODE_NONE) {
2965 gphy->aci_enable = 0;
2966 gphy->aci_hw_rssi = 0;
2967 } else
2968 b43_radio_interference_mitigation_enable(dev, mode);
2969 gphy->interfmode = mode;
2970
2971 return 0;
2972}
2973
2974/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
2975 * This function converts a TSSI value to dBm in Q5.2
2976 */
2977static s8 b43_gphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
2978{
2979 struct b43_phy_g *gphy = dev->phy.g;
2980 s8 dbm;
2981 s32 tmp;
2982
2983 tmp = (gphy->tgt_idle_tssi - gphy->cur_idle_tssi + tssi);
2984 tmp = clamp_val(tmp, 0x00, 0x3F);
2985 dbm = gphy->tssi2dbm[tmp];
2986
2987 return dbm;
2988}
2989
2990static void b43_put_attenuation_into_ranges(struct b43_wldev *dev,
2991 int *_bbatt, int *_rfatt)
2992{
2993 int rfatt = *_rfatt;
2994 int bbatt = *_bbatt;
2995 struct b43_txpower_lo_control *lo = dev->phy.g->lo_control;
2996
2997 /* Get baseband and radio attenuation values into their permitted ranges.
2998 * Radio attenuation affects power level 4 times as much as baseband. */
2999
3000 /* Range constants */
3001 const int rf_min = lo->rfatt_list.min_val;
3002 const int rf_max = lo->rfatt_list.max_val;
3003 const int bb_min = lo->bbatt_list.min_val;
3004 const int bb_max = lo->bbatt_list.max_val;
3005
3006 while (1) {
3007 if (rfatt > rf_max && bbatt > bb_max - 4)
3008 break; /* Can not get it into ranges */
3009 if (rfatt < rf_min && bbatt < bb_min + 4)
3010 break; /* Can not get it into ranges */
3011 if (bbatt > bb_max && rfatt > rf_max - 1)
3012 break; /* Can not get it into ranges */
3013 if (bbatt < bb_min && rfatt < rf_min + 1)
3014 break; /* Can not get it into ranges */
3015
3016 if (bbatt > bb_max) {
3017 bbatt -= 4;
3018 rfatt += 1;
3019 continue;
3020 }
3021 if (bbatt < bb_min) {
3022 bbatt += 4;
3023 rfatt -= 1;
3024 continue;
3025 }
3026 if (rfatt > rf_max) {
3027 rfatt -= 1;
3028 bbatt += 4;
3029 continue;
3030 }
3031 if (rfatt < rf_min) {
3032 rfatt += 1;
3033 bbatt -= 4;
3034 continue;
3897 } 3035 }
3898 b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
3899 b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
3900 break; 3036 break;
3901 } 3037 }
3902 default: 3038
3903 B43_WARN_ON(1); 3039 *_rfatt = clamp_val(rfatt, rf_min, rf_max);
3040 *_bbatt = clamp_val(bbatt, bb_min, bb_max);
3041}
3042
3043static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev)
3044{
3045 struct b43_phy *phy = &dev->phy;
3046 struct b43_phy_g *gphy = phy->g;
3047 int rfatt, bbatt;
3048 u8 tx_control;
3049
3050 spin_lock_irq(&dev->wl->irq_lock);
3051
3052 /* Calculate the new attenuation values. */
3053 bbatt = gphy->bbatt.att;
3054 bbatt += gphy->bbatt_delta;
3055 rfatt = gphy->rfatt.att;
3056 rfatt += gphy->rfatt_delta;
3057
3058 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
3059 tx_control = gphy->tx_control;
3060 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 2)) {
3061 if (rfatt <= 1) {
3062 if (tx_control == 0) {
3063 tx_control =
3064 B43_TXCTL_PA2DB |
3065 B43_TXCTL_TXMIX;
3066 rfatt += 2;
3067 bbatt += 2;
3068 } else if (dev->dev->bus->sprom.
3069 boardflags_lo &
3070 B43_BFL_PACTRL) {
3071 bbatt += 4 * (rfatt - 2);
3072 rfatt = 2;
3073 }
3074 } else if (rfatt > 4 && tx_control) {
3075 tx_control = 0;
3076 if (bbatt < 3) {
3077 rfatt -= 3;
3078 bbatt += 2;
3079 } else {
3080 rfatt -= 2;
3081 bbatt -= 2;
3082 }
3083 }
3904 } 3084 }
3905 phy->radio_on = 0; 3085 /* Save the control values */
3086 gphy->tx_control = tx_control;
3087 b43_put_attenuation_into_ranges(dev, &bbatt, &rfatt);
3088 gphy->rfatt.att = rfatt;
3089 gphy->bbatt.att = bbatt;
3090
3091 /* We drop the lock early, so we can sleep during hardware
3092 * adjustment. Possible races with op_recalc_txpower are harmless,
3093 * as we will be called once again in case we raced. */
3094 spin_unlock_irq(&dev->wl->irq_lock);
3095
3096 if (b43_debug(dev, B43_DBG_XMITPOWER))
3097 b43dbg(dev->wl, "Adjusting TX power\n");
3098
3099 /* Adjust the hardware */
3100 b43_phy_lock(dev);
3101 b43_radio_lock(dev);
3102 b43_set_txpower_g(dev, &gphy->bbatt, &gphy->rfatt,
3103 gphy->tx_control);
3104 b43_radio_unlock(dev);
3105 b43_phy_unlock(dev);
3906} 3106}
3107
3108static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev,
3109 bool ignore_tssi)
3110{
3111 struct b43_phy *phy = &dev->phy;
3112 struct b43_phy_g *gphy = phy->g;
3113 unsigned int average_tssi;
3114 int cck_result, ofdm_result;
3115 int estimated_pwr, desired_pwr, pwr_adjust;
3116 int rfatt_delta, bbatt_delta;
3117 unsigned int max_pwr;
3118
3119 /* First get the average TSSI */
3120 cck_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_CCK);
3121 ofdm_result = b43_phy_shm_tssi_read(dev, B43_SHM_SH_TSSI_OFDM_G);
3122 if ((cck_result < 0) && (ofdm_result < 0)) {
3123 /* No TSSI information available */
3124 if (!ignore_tssi)
3125 goto no_adjustment_needed;
3126 cck_result = 0;
3127 ofdm_result = 0;
3128 }
3129 if (cck_result < 0)
3130 average_tssi = ofdm_result;
3131 else if (ofdm_result < 0)
3132 average_tssi = cck_result;
3133 else
3134 average_tssi = (cck_result + ofdm_result) / 2;
3135 /* Merge the average with the stored value. */
3136 if (likely(gphy->average_tssi != 0xFF))
3137 average_tssi = (average_tssi + gphy->average_tssi) / 2;
3138 gphy->average_tssi = average_tssi;
3139 B43_WARN_ON(average_tssi >= B43_TSSI_MAX);
3140
3141 /* Estimate the TX power emission based on the TSSI */
3142 estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi);
3143
3144 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
3145 max_pwr = dev->dev->bus->sprom.maxpwr_bg;
3146 if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)
3147 max_pwr -= 3; /* minus 0.75 */
3148 if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) {
3149 b43warn(dev->wl,
3150 "Invalid max-TX-power value in SPROM.\n");
3151 max_pwr = INT_TO_Q52(20); /* fake it */
3152 dev->dev->bus->sprom.maxpwr_bg = max_pwr;
3153 }
3154
3155 /* Get desired power (in Q5.2) */
3156 if (phy->desired_txpower < 0)
3157 desired_pwr = INT_TO_Q52(0);
3158 else
3159 desired_pwr = INT_TO_Q52(phy->desired_txpower);
3160 /* And limit it. max_pwr already is Q5.2 */
3161 desired_pwr = clamp_val(desired_pwr, 0, max_pwr);
3162 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
3163 b43dbg(dev->wl,
3164 "[TX power] current = " Q52_FMT
3165 " dBm, desired = " Q52_FMT
3166 " dBm, max = " Q52_FMT "\n",
3167 Q52_ARG(estimated_pwr),
3168 Q52_ARG(desired_pwr),
3169 Q52_ARG(max_pwr));
3170 }
3171
3172 /* Calculate the adjustment delta. */
3173 pwr_adjust = desired_pwr - estimated_pwr;
3174 if (pwr_adjust == 0)
3175 goto no_adjustment_needed;
3176
3177 /* RF attenuation delta. */
3178 rfatt_delta = ((pwr_adjust + 7) / 8);
3179 /* Lower attenuation => Bigger power output. Negate it. */
3180 rfatt_delta = -rfatt_delta;
3181
3182 /* Baseband attenuation delta. */
3183 bbatt_delta = pwr_adjust / 2;
3184 /* Lower attenuation => Bigger power output. Negate it. */
3185 bbatt_delta = -bbatt_delta;
3186 /* RF att affects power level 4 times as much as
3187 * Baseband attennuation. Subtract it. */
3188 bbatt_delta -= 4 * rfatt_delta;
3189
3190 if (b43_debug(dev, B43_DBG_XMITPOWER)) {
3191 int dbm = pwr_adjust < 0 ? -pwr_adjust : pwr_adjust;
3192 b43dbg(dev->wl,
3193 "[TX power deltas] %s" Q52_FMT " dBm => "
3194 "bbatt-delta = %d, rfatt-delta = %d\n",
3195 (pwr_adjust < 0 ? "-" : ""), Q52_ARG(dbm),
3196 bbatt_delta, rfatt_delta);
3197 }
3198 /* So do we finally need to adjust something in hardware? */
3199 if ((rfatt_delta == 0) && (bbatt_delta == 0))
3200 goto no_adjustment_needed;
3201
3202 /* Save the deltas for later when we adjust the power. */
3203 gphy->bbatt_delta = bbatt_delta;
3204 gphy->rfatt_delta = rfatt_delta;
3205
3206 /* We need to adjust the TX power on the device. */
3207 return B43_TXPWR_RES_NEED_ADJUST;
3208
3209no_adjustment_needed:
3210 return B43_TXPWR_RES_DONE;
3211}
3212
3213static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev)
3214{
3215 struct b43_phy *phy = &dev->phy;
3216 struct b43_phy_g *gphy = phy->g;
3217
3218 //TODO: update_aci_moving_average
3219 if (gphy->aci_enable && gphy->aci_wlan_automatic) {
3220 b43_mac_suspend(dev);
3221 if (!gphy->aci_enable && 1 /*TODO: not scanning? */ ) {
3222 if (0 /*TODO: bunch of conditions */ ) {
3223 phy->ops->interf_mitigation(dev,
3224 B43_INTERFMODE_MANUALWLAN);
3225 }
3226 } else if (0 /*TODO*/) {
3227 if (/*(aci_average > 1000) &&*/ !b43_gphy_aci_scan(dev))
3228 phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE);
3229 }
3230 b43_mac_enable(dev);
3231 } else if (gphy->interfmode == B43_INTERFMODE_NONWLAN &&
3232 phy->rev == 1) {
3233 //TODO: implement rev1 workaround
3234 }
3235 b43_lo_g_maintanance_work(dev);
3236}
3237
3238static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev)
3239{
3240 struct b43_phy *phy = &dev->phy;
3241
3242 if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI))
3243 return;
3244
3245 b43_mac_suspend(dev);
3246 b43_calc_nrssi_slope(dev);
3247 if ((phy->radio_ver == 0x2050) && (phy->radio_rev == 8)) {
3248 u8 old_chan = phy->channel;
3249
3250 /* VCO Calibration */
3251 if (old_chan >= 8)
3252 b43_switch_channel(dev, 1);
3253 else
3254 b43_switch_channel(dev, 13);
3255 b43_switch_channel(dev, old_chan);
3256 }
3257 b43_mac_enable(dev);
3258}
3259
3260const struct b43_phy_operations b43_phyops_g = {
3261 .allocate = b43_gphy_op_allocate,
3262 .free = b43_gphy_op_free,
3263 .prepare_structs = b43_gphy_op_prepare_structs,
3264 .prepare_hardware = b43_gphy_op_prepare_hardware,
3265 .init = b43_gphy_op_init,
3266 .exit = b43_gphy_op_exit,
3267 .phy_read = b43_gphy_op_read,
3268 .phy_write = b43_gphy_op_write,
3269 .radio_read = b43_gphy_op_radio_read,
3270 .radio_write = b43_gphy_op_radio_write,
3271 .supports_hwpctl = b43_gphy_op_supports_hwpctl,
3272 .software_rfkill = b43_gphy_op_software_rfkill,
3273 .switch_analog = b43_phyop_switch_analog_generic,
3274 .switch_channel = b43_gphy_op_switch_channel,
3275 .get_default_chan = b43_gphy_op_get_default_chan,
3276 .set_rx_antenna = b43_gphy_op_set_rx_antenna,
3277 .interf_mitigation = b43_gphy_op_interf_mitigation,
3278 .recalc_txpower = b43_gphy_op_recalc_txpower,
3279 .adjust_txpower = b43_gphy_op_adjust_txpower,
3280 .pwork_15sec = b43_gphy_op_pwork_15sec,
3281 .pwork_60sec = b43_gphy_op_pwork_60sec,
3282};
diff --git a/drivers/net/wireless/b43/phy_g.h b/drivers/net/wireless/b43/phy_g.h
new file mode 100644
index 000000000000..718947fd41ae
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_g.h
@@ -0,0 +1,209 @@
1#ifndef LINUX_B43_PHY_G_H_
2#define LINUX_B43_PHY_G_H_
3
4/* OFDM PHY registers are defined in the A-PHY header. */
5#include "phy_a.h"
6
7/* CCK (B) PHY Registers */
8#define B43_PHY_VERSION_CCK B43_PHY_CCK(0x00) /* Versioning register for B-PHY */
9#define B43_PHY_CCKBBANDCFG B43_PHY_CCK(0x01) /* Contains antenna 0/1 control bit */
10#define B43_PHY_PGACTL B43_PHY_CCK(0x15) /* PGA control */
11#define B43_PHY_PGACTL_LPF 0x1000 /* Low pass filter (?) */
12#define B43_PHY_PGACTL_LOWBANDW 0x0040 /* Low bandwidth flag */
13#define B43_PHY_PGACTL_UNKNOWN 0xEFA0
14#define B43_PHY_FBCTL1 B43_PHY_CCK(0x18) /* Frequency bandwidth control 1 */
15#define B43_PHY_ITSSI B43_PHY_CCK(0x29) /* Idle TSSI */
16#define B43_PHY_LO_LEAKAGE B43_PHY_CCK(0x2D) /* Measured LO leakage */
17#define B43_PHY_ENERGY B43_PHY_CCK(0x33) /* Energy */
18#define B43_PHY_SYNCCTL B43_PHY_CCK(0x35)
19#define B43_PHY_FBCTL2 B43_PHY_CCK(0x38) /* Frequency bandwidth control 2 */
20#define B43_PHY_DACCTL B43_PHY_CCK(0x60) /* DAC control */
21#define B43_PHY_RCCALOVER B43_PHY_CCK(0x78) /* RC calibration override */
22
23/* Extended G-PHY Registers */
24#define B43_PHY_CLASSCTL B43_PHY_EXTG(0x02) /* Classify control */
25#define B43_PHY_GTABCTL B43_PHY_EXTG(0x03) /* G-PHY table control (see below) */
26#define B43_PHY_GTABOFF 0x03FF /* G-PHY table offset (see below) */
27#define B43_PHY_GTABNR 0xFC00 /* G-PHY table number (see below) */
28#define B43_PHY_GTABNR_SHIFT 10
29#define B43_PHY_GTABDATA B43_PHY_EXTG(0x04) /* G-PHY table data */
30#define B43_PHY_LO_MASK B43_PHY_EXTG(0x0F) /* Local Oscillator control mask */
31#define B43_PHY_LO_CTL B43_PHY_EXTG(0x10) /* Local Oscillator control */
32#define B43_PHY_RFOVER B43_PHY_EXTG(0x11) /* RF override */
33#define B43_PHY_RFOVERVAL B43_PHY_EXTG(0x12) /* RF override value */
34#define B43_PHY_RFOVERVAL_EXTLNA 0x8000
35#define B43_PHY_RFOVERVAL_LNA 0x7000
36#define B43_PHY_RFOVERVAL_LNA_SHIFT 12
37#define B43_PHY_RFOVERVAL_PGA 0x0F00
38#define B43_PHY_RFOVERVAL_PGA_SHIFT 8
39#define B43_PHY_RFOVERVAL_UNK 0x0010 /* Unknown, always set. */
40#define B43_PHY_RFOVERVAL_TRSWRX 0x00E0
41#define B43_PHY_RFOVERVAL_BW 0x0003 /* Bandwidth flags */
42#define B43_PHY_RFOVERVAL_BW_LPF 0x0001 /* Low Pass Filter */
43#define B43_PHY_RFOVERVAL_BW_LBW 0x0002 /* Low Bandwidth (when set), high when unset */
44#define B43_PHY_ANALOGOVER B43_PHY_EXTG(0x14) /* Analog override */
45#define B43_PHY_ANALOGOVERVAL B43_PHY_EXTG(0x15) /* Analog override value */
46
47
48/*** G-PHY table numbers */
49#define B43_GTAB(number, offset) (((number) << B43_PHY_GTABNR_SHIFT) | (offset))
50#define B43_GTAB_NRSSI B43_GTAB(0x00, 0)
51#define B43_GTAB_TRFEMW B43_GTAB(0x0C, 0x120)
52#define B43_GTAB_ORIGTR B43_GTAB(0x2E, 0x298)
53
54u16 b43_gtab_read(struct b43_wldev *dev, u16 table, u16 offset);
55void b43_gtab_write(struct b43_wldev *dev, u16 table, u16 offset, u16 value);
56
57
58/* Returns the boolean whether "TX Magnification" is enabled. */
59#define has_tx_magnification(phy) \
60 (((phy)->rev >= 2) && \
61 ((phy)->radio_ver == 0x2050) && \
62 ((phy)->radio_rev == 8))
63/* Card uses the loopback gain stuff */
64#define has_loopback_gain(phy) \
65 (((phy)->rev > 1) || ((phy)->gmode))
66
67/* Radio Attenuation (RF Attenuation) */
68struct b43_rfatt {
69 u8 att; /* Attenuation value */
70 bool with_padmix; /* Flag, PAD Mixer enabled. */
71};
72struct b43_rfatt_list {
73 /* Attenuation values list */
74 const struct b43_rfatt *list;
75 u8 len;
76 /* Minimum/Maximum attenuation values */
77 u8 min_val;
78 u8 max_val;
79};
80
81/* Returns true, if the values are the same. */
82static inline bool b43_compare_rfatt(const struct b43_rfatt *a,
83 const struct b43_rfatt *b)
84{
85 return ((a->att == b->att) &&
86 (a->with_padmix == b->with_padmix));
87}
88
89/* Baseband Attenuation */
90struct b43_bbatt {
91 u8 att; /* Attenuation value */
92};
93struct b43_bbatt_list {
94 /* Attenuation values list */
95 const struct b43_bbatt *list;
96 u8 len;
97 /* Minimum/Maximum attenuation values */
98 u8 min_val;
99 u8 max_val;
100};
101
102/* Returns true, if the values are the same. */
103static inline bool b43_compare_bbatt(const struct b43_bbatt *a,
104 const struct b43_bbatt *b)
105{
106 return (a->att == b->att);
107}
108
109/* tx_control bits. */
110#define B43_TXCTL_PA3DB 0x40 /* PA Gain 3dB */
111#define B43_TXCTL_PA2DB 0x20 /* PA Gain 2dB */
112#define B43_TXCTL_TXMIX 0x10 /* TX Mixer Gain */
113
114struct b43_txpower_lo_control;
115
116struct b43_phy_g {
117 /* ACI (adjacent channel interference) flags. */
118 bool aci_enable;
119 bool aci_wlan_automatic;
120 bool aci_hw_rssi;
121
122 /* Radio switched on/off */
123 bool radio_on;
124 struct {
125 /* Values saved when turning the radio off.
126 * They are needed when turning it on again. */
127 bool valid;
128 u16 rfover;
129 u16 rfoverval;
130 } radio_off_context;
131
132 u16 minlowsig[2];
133 u16 minlowsigpos[2];
134
135 /* Pointer to the table used to convert a
136 * TSSI value to dBm-Q5.2 */
137 const s8 *tssi2dbm;
138 /* tssi2dbm is kmalloc()ed. Only used for free()ing. */
139 bool dyn_tssi_tbl;
140 /* Target idle TSSI */
141 int tgt_idle_tssi;
142 /* Current idle TSSI */
143 int cur_idle_tssi;
144 /* The current average TSSI.
145 * Needs irq_lock, as it's updated in the IRQ path. */
146 u8 average_tssi;
147 /* Current TX power level attenuation control values */
148 struct b43_bbatt bbatt;
149 struct b43_rfatt rfatt;
150 u8 tx_control; /* B43_TXCTL_XXX */
151 /* The calculated attenuation deltas that are used later
152 * when adjusting the actual power output. */
153 int bbatt_delta;
154 int rfatt_delta;
155
156 /* LocalOscillator control values. */
157 struct b43_txpower_lo_control *lo_control;
158 /* Values from b43_calc_loopback_gain() */
159 s16 max_lb_gain; /* Maximum Loopback gain in hdB */
160 s16 trsw_rx_gain; /* TRSW RX gain in hdB */
161 s16 lna_lod_gain; /* LNA lod */
162 s16 lna_gain; /* LNA */
163 s16 pga_gain; /* PGA */
164
165 /* Current Interference Mitigation mode */
166 int interfmode;
167 /* Stack of saved values from the Interference Mitigation code.
168 * Each value in the stack is layed out as follows:
169 * bit 0-11: offset
170 * bit 12-15: register ID
171 * bit 16-32: value
172 * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT
173 */
174#define B43_INTERFSTACK_SIZE 26
175 u32 interfstack[B43_INTERFSTACK_SIZE]; //FIXME: use a data structure
176
177 /* Saved values from the NRSSI Slope calculation */
178 s16 nrssi[2];
179 s32 nrssislope;
180 /* In memory nrssi lookup table. */
181 s8 nrssi_lt[64];
182
183 u16 lofcal;
184
185 u16 initval; //FIXME rename?
186
187 /* The device does address auto increment for the OFDM tables.
188 * We cache the previously used address here and omit the address
189 * write on the next table access, if possible. */
190 u16 ofdmtab_addr; /* The address currently set in hardware. */
191 enum { /* The last data flow direction. */
192 B43_OFDMTAB_DIRECTION_UNKNOWN = 0,
193 B43_OFDMTAB_DIRECTION_READ,
194 B43_OFDMTAB_DIRECTION_WRITE,
195 } ofdmtab_addr_direction;
196};
197
198void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev,
199 u16 baseband_attenuation);
200void b43_gphy_channel_switch(struct b43_wldev *dev,
201 unsigned int channel,
202 bool synthetic_pu_workaround);
203u8 * b43_generate_dyn_tssi2dbm_tab(struct b43_wldev *dev,
204 s16 pab0, s16 pab1, s16 pab2);
205
206struct b43_phy_operations;
207extern const struct b43_phy_operations b43_phyops_g;
208
209#endif /* LINUX_B43_PHY_G_H_ */
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
new file mode 100644
index 000000000000..c5d9dc3667c0
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -0,0 +1,155 @@
1/*
2
3 Broadcom B43 wireless driver
4 IEEE 802.11g LP-PHY driver
5
6 Copyright (c) 2008 Michael Buesch <mb@bu3sch.de>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
21 Boston, MA 02110-1301, USA.
22
23*/
24
25#include "b43.h"
26#include "phy_lp.h"
27#include "phy_common.h"
28
29
30static int b43_lpphy_op_allocate(struct b43_wldev *dev)
31{
32 struct b43_phy_lp *lpphy;
33
34 lpphy = kzalloc(sizeof(*lpphy), GFP_KERNEL);
35 if (!lpphy)
36 return -ENOMEM;
37 dev->phy.lp = lpphy;
38
39 return 0;
40}
41
42static void b43_lpphy_op_prepare_structs(struct b43_wldev *dev)
43{
44 struct b43_phy *phy = &dev->phy;
45 struct b43_phy_lp *lpphy = phy->lp;
46
47 memset(lpphy, 0, sizeof(*lpphy));
48
49 //TODO
50}
51
52static void b43_lpphy_op_free(struct b43_wldev *dev)
53{
54 struct b43_phy_lp *lpphy = dev->phy.lp;
55
56 kfree(lpphy);
57 dev->phy.lp = NULL;
58}
59
60static int b43_lpphy_op_init(struct b43_wldev *dev)
61{
62 //TODO
63
64 return 0;
65}
66
67static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg)
68{
69 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
70 return b43_read16(dev, B43_MMIO_PHY_DATA);
71}
72
73static void b43_lpphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
74{
75 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
76 b43_write16(dev, B43_MMIO_PHY_DATA, value);
77}
78
79static u16 b43_lpphy_op_radio_read(struct b43_wldev *dev, u16 reg)
80{
81 /* Register 1 is a 32-bit register. */
82 B43_WARN_ON(reg == 1);
83 /* LP-PHY needs a special bit set for read access */
84 if (dev->phy.rev < 2) {
85 if (reg != 0x4001)
86 reg |= 0x100;
87 } else
88 reg |= 0x200;
89
90 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
91 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
92}
93
94static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
95{
96 /* Register 1 is a 32-bit register. */
97 B43_WARN_ON(reg == 1);
98
99 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
100 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
101}
102
103static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
104 enum rfkill_state state)
105{
106 //TODO
107}
108
109static int b43_lpphy_op_switch_channel(struct b43_wldev *dev,
110 unsigned int new_channel)
111{
112 //TODO
113 return 0;
114}
115
116static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
117{
118 return 1; /* Default to channel 1 */
119}
120
121static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
122{
123 //TODO
124}
125
126static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev)
127{
128 //TODO
129}
130
131static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev,
132 bool ignore_tssi)
133{
134 //TODO
135 return B43_TXPWR_RES_DONE;
136}
137
138
139const struct b43_phy_operations b43_phyops_lp = {
140 .allocate = b43_lpphy_op_allocate,
141 .free = b43_lpphy_op_free,
142 .prepare_structs = b43_lpphy_op_prepare_structs,
143 .init = b43_lpphy_op_init,
144 .phy_read = b43_lpphy_op_read,
145 .phy_write = b43_lpphy_op_write,
146 .radio_read = b43_lpphy_op_radio_read,
147 .radio_write = b43_lpphy_op_radio_write,
148 .software_rfkill = b43_lpphy_op_software_rfkill,
149 .switch_analog = b43_phyop_switch_analog_generic,
150 .switch_channel = b43_lpphy_op_switch_channel,
151 .get_default_chan = b43_lpphy_op_get_default_chan,
152 .set_rx_antenna = b43_lpphy_op_set_rx_antenna,
153 .recalc_txpower = b43_lpphy_op_recalc_txpower,
154 .adjust_txpower = b43_lpphy_op_adjust_txpower,
155};
diff --git a/drivers/net/wireless/b43/phy_lp.h b/drivers/net/wireless/b43/phy_lp.h
new file mode 100644
index 000000000000..b0b5357abf93
--- /dev/null
+++ b/drivers/net/wireless/b43/phy_lp.h
@@ -0,0 +1,540 @@
1#ifndef LINUX_B43_PHY_LP_H_
2#define LINUX_B43_PHY_LP_H_
3
4/* Definitions for the LP-PHY */
5
6
7
8
9#define B43_LP_RADIO(radio_reg) (radio_reg)
10#define B43_LP_NORTH(radio_reg) B43_LP_RADIO(radio_reg)
11#define B43_LP_SOUTH(radio_reg) B43_LP_RADIO((radio_reg) | 0x4000)
12
13
14/*** Broadcom 2062 NORTH radio registers ***/
15#define B2062_N_COMM1 B43_LP_NORTH(0x000) /* Common 01 (north) */
16#define B2062_N_COMM2 B43_LP_NORTH(0x002) /* Common 02 (north) */
17#define B2062_N_COMM3 B43_LP_NORTH(0x003) /* Common 03 (north) */
18#define B2062_N_COMM4 B43_LP_NORTH(0x004) /* Common 04 (north) */
19#define B2062_N_COMM5 B43_LP_NORTH(0x005) /* Common 05 (north) */
20#define B2062_N_COMM6 B43_LP_NORTH(0x006) /* Common 06 (north) */
21#define B2062_N_COMM7 B43_LP_NORTH(0x007) /* Common 07 (north) */
22#define B2062_N_COMM8 B43_LP_NORTH(0x008) /* Common 08 (north) */
23#define B2062_N_COMM9 B43_LP_NORTH(0x009) /* Common 09 (north) */
24#define B2062_N_COMM10 B43_LP_NORTH(0x00A) /* Common 10 (north) */
25#define B2062_N_COMM11 B43_LP_NORTH(0x00B) /* Common 11 (north) */
26#define B2062_N_COMM12 B43_LP_NORTH(0x00C) /* Common 12 (north) */
27#define B2062_N_COMM13 B43_LP_NORTH(0x00D) /* Common 13 (north) */
28#define B2062_N_COMM14 B43_LP_NORTH(0x00E) /* Common 14 (north) */
29#define B2062_N_COMM15 B43_LP_NORTH(0x00F) /* Common 15 (north) */
30#define B2062_N_PDN_CTL0 B43_LP_NORTH(0x010) /* PDN Control 0 (north) */
31#define B2062_N_PDN_CTL1 B43_LP_NORTH(0x011) /* PDN Control 1 (north) */
32#define B2062_N_PDN_CTL2 B43_LP_NORTH(0x012) /* PDN Control 2 (north) */
33#define B2062_N_PDN_CTL3 B43_LP_NORTH(0x013) /* PDN Control 3 (north) */
34#define B2062_N_PDN_CTL4 B43_LP_NORTH(0x014) /* PDN Control 4 (north) */
35#define B2062_N_GEN_CTL0 B43_LP_NORTH(0x015) /* GEN Control 0 (north) */
36#define B2062_N_IQ_CALIB B43_LP_NORTH(0x016) /* IQ Calibration (north) */
37#define B2062_N_LGENC B43_LP_NORTH(0x017) /* LGENC (north) */
38#define B2062_N_LGENA_LPF B43_LP_NORTH(0x018) /* LGENA LPF (north) */
39#define B2062_N_LGENA_BIAS0 B43_LP_NORTH(0x019) /* LGENA Bias 0 (north) */
40#define B2062_N_LGNEA_BIAS1 B43_LP_NORTH(0x01A) /* LGNEA Bias 1 (north) */
41#define B2062_N_LGENA_CTL0 B43_LP_NORTH(0x01B) /* LGENA Control 0 (north) */
42#define B2062_N_LGENA_CTL1 B43_LP_NORTH(0x01C) /* LGENA Control 1 (north) */
43#define B2062_N_LGENA_CTL2 B43_LP_NORTH(0x01D) /* LGENA Control 2 (north) */
44#define B2062_N_LGENA_TUNE0 B43_LP_NORTH(0x01E) /* LGENA Tune 0 (north) */
45#define B2062_N_LGENA_TUNE1 B43_LP_NORTH(0x01F) /* LGENA Tune 1 (north) */
46#define B2062_N_LGENA_TUNE2 B43_LP_NORTH(0x020) /* LGENA Tune 2 (north) */
47#define B2062_N_LGENA_TUNE3 B43_LP_NORTH(0x021) /* LGENA Tune 3 (north) */
48#define B2062_N_LGENA_CTL3 B43_LP_NORTH(0x022) /* LGENA Control 3 (north) */
49#define B2062_N_LGENA_CTL4 B43_LP_NORTH(0x023) /* LGENA Control 4 (north) */
50#define B2062_N_LGENA_CTL5 B43_LP_NORTH(0x024) /* LGENA Control 5 (north) */
51#define B2062_N_LGENA_CTL6 B43_LP_NORTH(0x025) /* LGENA Control 6 (north) */
52#define B2062_N_LGENA_CTL7 B43_LP_NORTH(0x026) /* LGENA Control 7 (north) */
53#define B2062_N_RXA_CTL0 B43_LP_NORTH(0x027) /* RXA Control 0 (north) */
54#define B2062_N_RXA_CTL1 B43_LP_NORTH(0x028) /* RXA Control 1 (north) */
55#define B2062_N_RXA_CTL2 B43_LP_NORTH(0x029) /* RXA Control 2 (north) */
56#define B2062_N_RXA_CTL3 B43_LP_NORTH(0x02A) /* RXA Control 3 (north) */
57#define B2062_N_RXA_CTL4 B43_LP_NORTH(0x02B) /* RXA Control 4 (north) */
58#define B2062_N_RXA_CTL5 B43_LP_NORTH(0x02C) /* RXA Control 5 (north) */
59#define B2062_N_RXA_CTL6 B43_LP_NORTH(0x02D) /* RXA Control 6 (north) */
60#define B2062_N_RXA_CTL7 B43_LP_NORTH(0x02E) /* RXA Control 7 (north) */
61#define B2062_N_RXBB_CTL0 B43_LP_NORTH(0x02F) /* RXBB Control 0 (north) */
62#define B2062_N_RXBB_CTL1 B43_LP_NORTH(0x030) /* RXBB Control 1 (north) */
63#define B2062_N_RXBB_CTL2 B43_LP_NORTH(0x031) /* RXBB Control 2 (north) */
64#define B2062_N_RXBB_GAIN0 B43_LP_NORTH(0x032) /* RXBB Gain 0 (north) */
65#define B2062_N_RXBB_GAIN1 B43_LP_NORTH(0x033) /* RXBB Gain 1 (north) */
66#define B2062_N_RXBB_GAIN2 B43_LP_NORTH(0x034) /* RXBB Gain 2 (north) */
67#define B2062_N_RXBB_GAIN3 B43_LP_NORTH(0x035) /* RXBB Gain 3 (north) */
68#define B2062_N_RXBB_RSSI0 B43_LP_NORTH(0x036) /* RXBB RSSI 0 (north) */
69#define B2062_N_RXBB_RSSI1 B43_LP_NORTH(0x037) /* RXBB RSSI 1 (north) */
70#define B2062_N_RXBB_CALIB0 B43_LP_NORTH(0x038) /* RXBB Calibration0 (north) */
71#define B2062_N_RXBB_CALIB1 B43_LP_NORTH(0x039) /* RXBB Calibration1 (north) */
72#define B2062_N_RXBB_CALIB2 B43_LP_NORTH(0x03A) /* RXBB Calibration2 (north) */
73#define B2062_N_RXBB_BIAS0 B43_LP_NORTH(0x03B) /* RXBB Bias 0 (north) */
74#define B2062_N_RXBB_BIAS1 B43_LP_NORTH(0x03C) /* RXBB Bias 1 (north) */
75#define B2062_N_RXBB_BIAS2 B43_LP_NORTH(0x03D) /* RXBB Bias 2 (north) */
76#define B2062_N_RXBB_BIAS3 B43_LP_NORTH(0x03E) /* RXBB Bias 3 (north) */
77#define B2062_N_RXBB_BIAS4 B43_LP_NORTH(0x03F) /* RXBB Bias 4 (north) */
78#define B2062_N_RXBB_BIAS5 B43_LP_NORTH(0x040) /* RXBB Bias 5 (north) */
79#define B2062_N_RXBB_RSSI2 B43_LP_NORTH(0x041) /* RXBB RSSI 2 (north) */
80#define B2062_N_RXBB_RSSI3 B43_LP_NORTH(0x042) /* RXBB RSSI 3 (north) */
81#define B2062_N_RXBB_RSSI4 B43_LP_NORTH(0x043) /* RXBB RSSI 4 (north) */
82#define B2062_N_RXBB_RSSI5 B43_LP_NORTH(0x044) /* RXBB RSSI 5 (north) */
83#define B2062_N_TX_CTL0 B43_LP_NORTH(0x045) /* TX Control 0 (north) */
84#define B2062_N_TX_CTL1 B43_LP_NORTH(0x046) /* TX Control 1 (north) */
85#define B2062_N_TX_CTL2 B43_LP_NORTH(0x047) /* TX Control 2 (north) */
86#define B2062_N_TX_CTL3 B43_LP_NORTH(0x048) /* TX Control 3 (north) */
87#define B2062_N_TX_CTL4 B43_LP_NORTH(0x049) /* TX Control 4 (north) */
88#define B2062_N_TX_CTL5 B43_LP_NORTH(0x04A) /* TX Control 5 (north) */
89#define B2062_N_TX_CTL6 B43_LP_NORTH(0x04B) /* TX Control 6 (north) */
90#define B2062_N_TX_CTL7 B43_LP_NORTH(0x04C) /* TX Control 7 (north) */
91#define B2062_N_TX_CTL8 B43_LP_NORTH(0x04D) /* TX Control 8 (north) */
92#define B2062_N_TX_CTL9 B43_LP_NORTH(0x04E) /* TX Control 9 (north) */
93#define B2062_N_TX_CTL_A B43_LP_NORTH(0x04F) /* TX Control A (north) */
94#define B2062_N_TX_GC2G B43_LP_NORTH(0x050) /* TX GC2G (north) */
95#define B2062_N_TX_GC5G B43_LP_NORTH(0x051) /* TX GC5G (north) */
96#define B2062_N_TX_TUNE B43_LP_NORTH(0x052) /* TX Tune (north) */
97#define B2062_N_TX_PAD B43_LP_NORTH(0x053) /* TX PAD (north) */
98#define B2062_N_TX_PGA B43_LP_NORTH(0x054) /* TX PGA (north) */
99#define B2062_N_TX_PADAUX B43_LP_NORTH(0x055) /* TX PADAUX (north) */
100#define B2062_N_TX_PGAAUX B43_LP_NORTH(0x056) /* TX PGAAUX (north) */
101#define B2062_N_TSSI_CTL0 B43_LP_NORTH(0x057) /* TSSI Control 0 (north) */
102#define B2062_N_TSSI_CTL1 B43_LP_NORTH(0x058) /* TSSI Control 1 (north) */
103#define B2062_N_TSSI_CTL2 B43_LP_NORTH(0x059) /* TSSI Control 2 (north) */
104#define B2062_N_IQ_CALIB_CTL0 B43_LP_NORTH(0x05A) /* IQ Calibration Control 0 (north) */
105#define B2062_N_IQ_CALIB_CTL1 B43_LP_NORTH(0x05B) /* IQ Calibration Control 1 (north) */
106#define B2062_N_IQ_CALIB_CTL2 B43_LP_NORTH(0x05C) /* IQ Calibration Control 2 (north) */
107#define B2062_N_CALIB_TS B43_LP_NORTH(0x05D) /* Calibration TS (north) */
108#define B2062_N_CALIB_CTL0 B43_LP_NORTH(0x05E) /* Calibration Control 0 (north) */
109#define B2062_N_CALIB_CTL1 B43_LP_NORTH(0x05F) /* Calibration Control 1 (north) */
110#define B2062_N_CALIB_CTL2 B43_LP_NORTH(0x060) /* Calibration Control 2 (north) */
111#define B2062_N_CALIB_CTL3 B43_LP_NORTH(0x061) /* Calibration Control 3 (north) */
112#define B2062_N_CALIB_CTL4 B43_LP_NORTH(0x062) /* Calibration Control 4 (north) */
113#define B2062_N_CALIB_DBG0 B43_LP_NORTH(0x063) /* Calibration Debug 0 (north) */
114#define B2062_N_CALIB_DBG1 B43_LP_NORTH(0x064) /* Calibration Debug 1 (north) */
115#define B2062_N_CALIB_DBG2 B43_LP_NORTH(0x065) /* Calibration Debug 2 (north) */
116#define B2062_N_CALIB_DBG3 B43_LP_NORTH(0x066) /* Calibration Debug 3 (north) */
117#define B2062_N_PSENSE_CTL0 B43_LP_NORTH(0x069) /* PSENSE Control 0 (north) */
118#define B2062_N_PSENSE_CTL1 B43_LP_NORTH(0x06A) /* PSENSE Control 1 (north) */
119#define B2062_N_PSENSE_CTL2 B43_LP_NORTH(0x06B) /* PSENSE Control 2 (north) */
120#define B2062_N_TEST_BUF0 B43_LP_NORTH(0x06C) /* TEST BUF0 (north) */
121
122/*** Broadcom 2062 SOUTH radio registers ***/
123#define B2062_S_COMM1 B43_LP_SOUTH(0x000) /* Common 01 (south) */
124#define B2062_S_RADIO_ID_CODE B43_LP_SOUTH(0x001) /* Radio ID code (south) */
125#define B2062_S_COMM2 B43_LP_SOUTH(0x002) /* Common 02 (south) */
126#define B2062_S_COMM3 B43_LP_SOUTH(0x003) /* Common 03 (south) */
127#define B2062_S_COMM4 B43_LP_SOUTH(0x004) /* Common 04 (south) */
128#define B2062_S_COMM5 B43_LP_SOUTH(0x005) /* Common 05 (south) */
129#define B2062_S_COMM6 B43_LP_SOUTH(0x006) /* Common 06 (south) */
130#define B2062_S_COMM7 B43_LP_SOUTH(0x007) /* Common 07 (south) */
131#define B2062_S_COMM8 B43_LP_SOUTH(0x008) /* Common 08 (south) */
132#define B2062_S_COMM9 B43_LP_SOUTH(0x009) /* Common 09 (south) */
133#define B2062_S_COMM10 B43_LP_SOUTH(0x00A) /* Common 10 (south) */
134#define B2062_S_COMM11 B43_LP_SOUTH(0x00B) /* Common 11 (south) */
135#define B2062_S_COMM12 B43_LP_SOUTH(0x00C) /* Common 12 (south) */
136#define B2062_S_COMM13 B43_LP_SOUTH(0x00D) /* Common 13 (south) */
137#define B2062_S_COMM14 B43_LP_SOUTH(0x00E) /* Common 14 (south) */
138#define B2062_S_COMM15 B43_LP_SOUTH(0x00F) /* Common 15 (south) */
139#define B2062_S_PDS_CTL0 B43_LP_SOUTH(0x010) /* PDS Control 0 (south) */
140#define B2062_S_PDS_CTL1 B43_LP_SOUTH(0x011) /* PDS Control 1 (south) */
141#define B2062_S_PDS_CTL2 B43_LP_SOUTH(0x012) /* PDS Control 2 (south) */
142#define B2062_S_PDS_CTL3 B43_LP_SOUTH(0x013) /* PDS Control 3 (south) */
143#define B2062_S_BG_CTL0 B43_LP_SOUTH(0x014) /* BG Control 0 (south) */
144#define B2062_S_BG_CTL1 B43_LP_SOUTH(0x015) /* BG Control 1 (south) */
145#define B2062_S_BG_CTL2 B43_LP_SOUTH(0x016) /* BG Control 2 (south) */
146#define B2062_S_LGENG_CTL0 B43_LP_SOUTH(0x017) /* LGENG Control 00 (south) */
147#define B2062_S_LGENG_CTL1 B43_LP_SOUTH(0x018) /* LGENG Control 01 (south) */
148#define B2062_S_LGENG_CTL2 B43_LP_SOUTH(0x019) /* LGENG Control 02 (south) */
149#define B2062_S_LGENG_CTL3 B43_LP_SOUTH(0x01A) /* LGENG Control 03 (south) */
150#define B2062_S_LGENG_CTL4 B43_LP_SOUTH(0x01B) /* LGENG Control 04 (south) */
151#define B2062_S_LGENG_CTL5 B43_LP_SOUTH(0x01C) /* LGENG Control 05 (south) */
152#define B2062_S_LGENG_CTL6 B43_LP_SOUTH(0x01D) /* LGENG Control 06 (south) */
153#define B2062_S_LGENG_CTL7 B43_LP_SOUTH(0x01E) /* LGENG Control 07 (south) */
154#define B2062_S_LGENG_CTL8 B43_LP_SOUTH(0x01F) /* LGENG Control 08 (south) */
155#define B2062_S_LGENG_CTL9 B43_LP_SOUTH(0x020) /* LGENG Control 09 (south) */
156#define B2062_S_LGENG_CTL10 B43_LP_SOUTH(0x021) /* LGENG Control 10 (south) */
157#define B2062_S_LGENG_CTL11 B43_LP_SOUTH(0x022) /* LGENG Control 11 (south) */
158#define B2062_S_REFPLL_CTL0 B43_LP_SOUTH(0x023) /* REFPLL Control 00 (south) */
159#define B2062_S_REFPLL_CTL1 B43_LP_SOUTH(0x024) /* REFPLL Control 01 (south) */
160#define B2062_S_REFPLL_CTL2 B43_LP_SOUTH(0x025) /* REFPLL Control 02 (south) */
161#define B2062_S_REFPLL_CTL3 B43_LP_SOUTH(0x026) /* REFPLL Control 03 (south) */
162#define B2062_S_REFPLL_CTL4 B43_LP_SOUTH(0x027) /* REFPLL Control 04 (south) */
163#define B2062_S_REFPLL_CTL5 B43_LP_SOUTH(0x028) /* REFPLL Control 05 (south) */
164#define B2062_S_REFPLL_CTL6 B43_LP_SOUTH(0x029) /* REFPLL Control 06 (south) */
165#define B2062_S_REFPLL_CTL7 B43_LP_SOUTH(0x02A) /* REFPLL Control 07 (south) */
166#define B2062_S_REFPLL_CTL8 B43_LP_SOUTH(0x02B) /* REFPLL Control 08 (south) */
167#define B2062_S_REFPLL_CTL9 B43_LP_SOUTH(0x02C) /* REFPLL Control 09 (south) */
168#define B2062_S_REFPLL_CTL10 B43_LP_SOUTH(0x02D) /* REFPLL Control 10 (south) */
169#define B2062_S_REFPLL_CTL11 B43_LP_SOUTH(0x02E) /* REFPLL Control 11 (south) */
170#define B2062_S_REFPLL_CTL12 B43_LP_SOUTH(0x02F) /* REFPLL Control 12 (south) */
171#define B2062_S_REFPLL_CTL13 B43_LP_SOUTH(0x030) /* REFPLL Control 13 (south) */
172#define B2062_S_REFPLL_CTL14 B43_LP_SOUTH(0x031) /* REFPLL Control 14 (south) */
173#define B2062_S_REFPLL_CTL15 B43_LP_SOUTH(0x032) /* REFPLL Control 15 (south) */
174#define B2062_S_REFPLL_CTL16 B43_LP_SOUTH(0x033) /* REFPLL Control 16 (south) */
175#define B2062_S_RFPLL_CTL0 B43_LP_SOUTH(0x034) /* RFPLL Control 00 (south) */
176#define B2062_S_RFPLL_CTL1 B43_LP_SOUTH(0x035) /* RFPLL Control 01 (south) */
177#define B2062_S_RFPLL_CTL2 B43_LP_SOUTH(0x036) /* RFPLL Control 02 (south) */
178#define B2062_S_RFPLL_CTL3 B43_LP_SOUTH(0x037) /* RFPLL Control 03 (south) */
179#define B2062_S_RFPLL_CTL4 B43_LP_SOUTH(0x038) /* RFPLL Control 04 (south) */
180#define B2062_S_RFPLL_CTL5 B43_LP_SOUTH(0x039) /* RFPLL Control 05 (south) */
181#define B2062_S_RFPLL_CTL6 B43_LP_SOUTH(0x03A) /* RFPLL Control 06 (south) */
182#define B2062_S_RFPLL_CTL7 B43_LP_SOUTH(0x03B) /* RFPLL Control 07 (south) */
183#define B2062_S_RFPLL_CTL8 B43_LP_SOUTH(0x03C) /* RFPLL Control 08 (south) */
184#define B2062_S_RFPLL_CTL9 B43_LP_SOUTH(0x03D) /* RFPLL Control 09 (south) */
185#define B2062_S_RFPLL_CTL10 B43_LP_SOUTH(0x03E) /* RFPLL Control 10 (south) */
186#define B2062_S_RFPLL_CTL11 B43_LP_SOUTH(0x03F) /* RFPLL Control 11 (south) */
187#define B2062_S_RFPLL_CTL12 B43_LP_SOUTH(0x040) /* RFPLL Control 12 (south) */
188#define B2062_S_RFPLL_CTL13 B43_LP_SOUTH(0x041) /* RFPLL Control 13 (south) */
189#define B2062_S_RFPLL_CTL14 B43_LP_SOUTH(0x042) /* RFPLL Control 14 (south) */
190#define B2062_S_RFPLL_CTL15 B43_LP_SOUTH(0x043) /* RFPLL Control 15 (south) */
191#define B2062_S_RFPLL_CTL16 B43_LP_SOUTH(0x044) /* RFPLL Control 16 (south) */
192#define B2062_S_RFPLL_CTL17 B43_LP_SOUTH(0x045) /* RFPLL Control 17 (south) */
193#define B2062_S_RFPLL_CTL18 B43_LP_SOUTH(0x046) /* RFPLL Control 18 (south) */
194#define B2062_S_RFPLL_CTL19 B43_LP_SOUTH(0x047) /* RFPLL Control 19 (south) */
195#define B2062_S_RFPLL_CTL20 B43_LP_SOUTH(0x048) /* RFPLL Control 20 (south) */
196#define B2062_S_RFPLL_CTL21 B43_LP_SOUTH(0x049) /* RFPLL Control 21 (south) */
197#define B2062_S_RFPLL_CTL22 B43_LP_SOUTH(0x04A) /* RFPLL Control 22 (south) */
198#define B2062_S_RFPLL_CTL23 B43_LP_SOUTH(0x04B) /* RFPLL Control 23 (south) */
199#define B2062_S_RFPLL_CTL24 B43_LP_SOUTH(0x04C) /* RFPLL Control 24 (south) */
200#define B2062_S_RFPLL_CTL25 B43_LP_SOUTH(0x04D) /* RFPLL Control 25 (south) */
201#define B2062_S_RFPLL_CTL26 B43_LP_SOUTH(0x04E) /* RFPLL Control 26 (south) */
202#define B2062_S_RFPLL_CTL27 B43_LP_SOUTH(0x04F) /* RFPLL Control 27 (south) */
203#define B2062_S_RFPLL_CTL28 B43_LP_SOUTH(0x050) /* RFPLL Control 28 (south) */
204#define B2062_S_RFPLL_CTL29 B43_LP_SOUTH(0x051) /* RFPLL Control 29 (south) */
205#define B2062_S_RFPLL_CTL30 B43_LP_SOUTH(0x052) /* RFPLL Control 30 (south) */
206#define B2062_S_RFPLL_CTL31 B43_LP_SOUTH(0x053) /* RFPLL Control 31 (south) */
207#define B2062_S_RFPLL_CTL32 B43_LP_SOUTH(0x054) /* RFPLL Control 32 (south) */
208#define B2062_S_RFPLL_CTL33 B43_LP_SOUTH(0x055) /* RFPLL Control 33 (south) */
209#define B2062_S_RFPLL_CTL34 B43_LP_SOUTH(0x056) /* RFPLL Control 34 (south) */
210#define B2062_S_RXG_CNT0 B43_LP_SOUTH(0x057) /* RXG Counter 00 (south) */
211#define B2062_S_RXG_CNT1 B43_LP_SOUTH(0x058) /* RXG Counter 01 (south) */
212#define B2062_S_RXG_CNT2 B43_LP_SOUTH(0x059) /* RXG Counter 02 (south) */
213#define B2062_S_RXG_CNT3 B43_LP_SOUTH(0x05A) /* RXG Counter 03 (south) */
214#define B2062_S_RXG_CNT4 B43_LP_SOUTH(0x05B) /* RXG Counter 04 (south) */
215#define B2062_S_RXG_CNT5 B43_LP_SOUTH(0x05C) /* RXG Counter 05 (south) */
216#define B2062_S_RXG_CNT6 B43_LP_SOUTH(0x05D) /* RXG Counter 06 (south) */
217#define B2062_S_RXG_CNT7 B43_LP_SOUTH(0x05E) /* RXG Counter 07 (south) */
218#define B2062_S_RXG_CNT8 B43_LP_SOUTH(0x05F) /* RXG Counter 08 (south) */
219#define B2062_S_RXG_CNT9 B43_LP_SOUTH(0x060) /* RXG Counter 09 (south) */
220#define B2062_S_RXG_CNT10 B43_LP_SOUTH(0x061) /* RXG Counter 10 (south) */
221#define B2062_S_RXG_CNT11 B43_LP_SOUTH(0x062) /* RXG Counter 11 (south) */
222#define B2062_S_RXG_CNT12 B43_LP_SOUTH(0x063) /* RXG Counter 12 (south) */
223#define B2062_S_RXG_CNT13 B43_LP_SOUTH(0x064) /* RXG Counter 13 (south) */
224#define B2062_S_RXG_CNT14 B43_LP_SOUTH(0x065) /* RXG Counter 14 (south) */
225#define B2062_S_RXG_CNT15 B43_LP_SOUTH(0x066) /* RXG Counter 15 (south) */
226#define B2062_S_RXG_CNT16 B43_LP_SOUTH(0x067) /* RXG Counter 16 (south) */
227#define B2062_S_RXG_CNT17 B43_LP_SOUTH(0x068) /* RXG Counter 17 (south) */
228
229
230
231/*** Broadcom 2063 radio registers ***/
232#define B2063_RADIO_ID_CODE B43_LP_RADIO(0x001) /* Radio ID code */
233#define B2063_COMM1 B43_LP_RADIO(0x000) /* Common 01 */
234#define B2063_COMM2 B43_LP_RADIO(0x002) /* Common 02 */
235#define B2063_COMM3 B43_LP_RADIO(0x003) /* Common 03 */
236#define B2063_COMM4 B43_LP_RADIO(0x004) /* Common 04 */
237#define B2063_COMM5 B43_LP_RADIO(0x005) /* Common 05 */
238#define B2063_COMM6 B43_LP_RADIO(0x006) /* Common 06 */
239#define B2063_COMM7 B43_LP_RADIO(0x007) /* Common 07 */
240#define B2063_COMM8 B43_LP_RADIO(0x008) /* Common 08 */
241#define B2063_COMM9 B43_LP_RADIO(0x009) /* Common 09 */
242#define B2063_COMM10 B43_LP_RADIO(0x00A) /* Common 10 */
243#define B2063_COMM11 B43_LP_RADIO(0x00B) /* Common 11 */
244#define B2063_COMM12 B43_LP_RADIO(0x00C) /* Common 12 */
245#define B2063_COMM13 B43_LP_RADIO(0x00D) /* Common 13 */
246#define B2063_COMM14 B43_LP_RADIO(0x00E) /* Common 14 */
247#define B2063_COMM15 B43_LP_RADIO(0x00F) /* Common 15 */
248#define B2063_COMM16 B43_LP_RADIO(0x010) /* Common 16 */
249#define B2063_COMM17 B43_LP_RADIO(0x011) /* Common 17 */
250#define B2063_COMM18 B43_LP_RADIO(0x012) /* Common 18 */
251#define B2063_COMM19 B43_LP_RADIO(0x013) /* Common 19 */
252#define B2063_COMM20 B43_LP_RADIO(0x014) /* Common 20 */
253#define B2063_COMM21 B43_LP_RADIO(0x015) /* Common 21 */
254#define B2063_COMM22 B43_LP_RADIO(0x016) /* Common 22 */
255#define B2063_COMM23 B43_LP_RADIO(0x017) /* Common 23 */
256#define B2063_COMM24 B43_LP_RADIO(0x018) /* Common 24 */
257#define B2063_PWR_SWITCH_CTL B43_LP_RADIO(0x019) /* POWER SWITCH Control */
258#define B2063_PLL_SP1 B43_LP_RADIO(0x01A) /* PLL SP 1 */
259#define B2063_PLL_SP2 B43_LP_RADIO(0x01B) /* PLL SP 2 */
260#define B2063_LOGEN_SP1 B43_LP_RADIO(0x01C) /* LOGEN SP 1 */
261#define B2063_LOGEN_SP2 B43_LP_RADIO(0x01D) /* LOGEN SP 2 */
262#define B2063_LOGEN_SP3 B43_LP_RADIO(0x01E) /* LOGEN SP 3 */
263#define B2063_LOGEN_SP4 B43_LP_RADIO(0x01F) /* LOGEN SP 4 */
264#define B2063_LOGEN_SP5 B43_LP_RADIO(0x020) /* LOGEN SP 5 */
265#define B2063_G_RX_SP1 B43_LP_RADIO(0x021) /* G RX SP 1 */
266#define B2063_G_RX_SP2 B43_LP_RADIO(0x022) /* G RX SP 2 */
267#define B2063_G_RX_SP3 B43_LP_RADIO(0x023) /* G RX SP 3 */
268#define B2063_G_RX_SP4 B43_LP_RADIO(0x024) /* G RX SP 4 */
269#define B2063_G_RX_SP5 B43_LP_RADIO(0x025) /* G RX SP 5 */
270#define B2063_G_RX_SP6 B43_LP_RADIO(0x026) /* G RX SP 6 */
271#define B2063_G_RX_SP7 B43_LP_RADIO(0x027) /* G RX SP 7 */
272#define B2063_G_RX_SP8 B43_LP_RADIO(0x028) /* G RX SP 8 */
273#define B2063_G_RX_SP9 B43_LP_RADIO(0x029) /* G RX SP 9 */
274#define B2063_G_RX_SP10 B43_LP_RADIO(0x02A) /* G RX SP 10 */
275#define B2063_G_RX_SP11 B43_LP_RADIO(0x02B) /* G RX SP 11 */
276#define B2063_A_RX_SP1 B43_LP_RADIO(0x02C) /* A RX SP 1 */
277#define B2063_A_RX_SP2 B43_LP_RADIO(0x02D) /* A RX SP 2 */
278#define B2063_A_RX_SP3 B43_LP_RADIO(0x02E) /* A RX SP 3 */
279#define B2063_A_RX_SP4 B43_LP_RADIO(0x02F) /* A RX SP 4 */
280#define B2063_A_RX_SP5 B43_LP_RADIO(0x030) /* A RX SP 5 */
281#define B2063_A_RX_SP6 B43_LP_RADIO(0x031) /* A RX SP 6 */
282#define B2063_A_RX_SP7 B43_LP_RADIO(0x032) /* A RX SP 7 */
283#define B2063_RX_BB_SP1 B43_LP_RADIO(0x033) /* RX BB SP 1 */
284#define B2063_RX_BB_SP2 B43_LP_RADIO(0x034) /* RX BB SP 2 */
285#define B2063_RX_BB_SP3 B43_LP_RADIO(0x035) /* RX BB SP 3 */
286#define B2063_RX_BB_SP4 B43_LP_RADIO(0x036) /* RX BB SP 4 */
287#define B2063_RX_BB_SP5 B43_LP_RADIO(0x037) /* RX BB SP 5 */
288#define B2063_RX_BB_SP6 B43_LP_RADIO(0x038) /* RX BB SP 6 */
289#define B2063_RX_BB_SP7 B43_LP_RADIO(0x039) /* RX BB SP 7 */
290#define B2063_RX_BB_SP8 B43_LP_RADIO(0x03A) /* RX BB SP 8 */
291#define B2063_TX_RF_SP1 B43_LP_RADIO(0x03B) /* TX RF SP 1 */
292#define B2063_TX_RF_SP2 B43_LP_RADIO(0x03C) /* TX RF SP 2 */
293#define B2063_TX_RF_SP3 B43_LP_RADIO(0x03D) /* TX RF SP 3 */
294#define B2063_TX_RF_SP4 B43_LP_RADIO(0x03E) /* TX RF SP 4 */
295#define B2063_TX_RF_SP5 B43_LP_RADIO(0x03F) /* TX RF SP 5 */
296#define B2063_TX_RF_SP6 B43_LP_RADIO(0x040) /* TX RF SP 6 */
297#define B2063_TX_RF_SP7 B43_LP_RADIO(0x041) /* TX RF SP 7 */
298#define B2063_TX_RF_SP8 B43_LP_RADIO(0x042) /* TX RF SP 8 */
299#define B2063_TX_RF_SP9 B43_LP_RADIO(0x043) /* TX RF SP 9 */
300#define B2063_TX_RF_SP10 B43_LP_RADIO(0x044) /* TX RF SP 10 */
301#define B2063_TX_RF_SP11 B43_LP_RADIO(0x045) /* TX RF SP 11 */
302#define B2063_TX_RF_SP12 B43_LP_RADIO(0x046) /* TX RF SP 12 */
303#define B2063_TX_RF_SP13 B43_LP_RADIO(0x047) /* TX RF SP 13 */
304#define B2063_TX_RF_SP14 B43_LP_RADIO(0x048) /* TX RF SP 14 */
305#define B2063_TX_RF_SP15 B43_LP_RADIO(0x049) /* TX RF SP 15 */
306#define B2063_TX_RF_SP16 B43_LP_RADIO(0x04A) /* TX RF SP 16 */
307#define B2063_TX_RF_SP17 B43_LP_RADIO(0x04B) /* TX RF SP 17 */
308#define B2063_PA_SP1 B43_LP_RADIO(0x04C) /* PA SP 1 */
309#define B2063_PA_SP2 B43_LP_RADIO(0x04D) /* PA SP 2 */
310#define B2063_PA_SP3 B43_LP_RADIO(0x04E) /* PA SP 3 */
311#define B2063_PA_SP4 B43_LP_RADIO(0x04F) /* PA SP 4 */
312#define B2063_PA_SP5 B43_LP_RADIO(0x050) /* PA SP 5 */
313#define B2063_PA_SP6 B43_LP_RADIO(0x051) /* PA SP 6 */
314#define B2063_PA_SP7 B43_LP_RADIO(0x052) /* PA SP 7 */
315#define B2063_TX_BB_SP1 B43_LP_RADIO(0x053) /* TX BB SP 1 */
316#define B2063_TX_BB_SP2 B43_LP_RADIO(0x054) /* TX BB SP 2 */
317#define B2063_TX_BB_SP3 B43_LP_RADIO(0x055) /* TX BB SP 3 */
318#define B2063_REG_SP1 B43_LP_RADIO(0x056) /* REG SP 1 */
319#define B2063_BANDGAP_CTL1 B43_LP_RADIO(0x057) /* BANDGAP Control 1 */
320#define B2063_BANDGAP_CTL2 B43_LP_RADIO(0x058) /* BANDGAP Control 2 */
321#define B2063_LPO_CTL1 B43_LP_RADIO(0x059) /* LPO Control 1 */
322#define B2063_RC_CALIB_CTL1 B43_LP_RADIO(0x05A) /* RC Calibration Control 1 */
323#define B2063_RC_CALIB_CTL2 B43_LP_RADIO(0x05B) /* RC Calibration Control 2 */
324#define B2063_RC_CALIB_CTL3 B43_LP_RADIO(0x05C) /* RC Calibration Control 3 */
325#define B2063_RC_CALIB_CTL4 B43_LP_RADIO(0x05D) /* RC Calibration Control 4 */
326#define B2063_RC_CALIB_CTL5 B43_LP_RADIO(0x05E) /* RC Calibration Control 5 */
327#define B2063_RC_CALIB_CTL6 B43_LP_RADIO(0x05F) /* RC Calibration Control 6 */
328#define B2063_RC_CALIB_CTL7 B43_LP_RADIO(0x060) /* RC Calibration Control 7 */
329#define B2063_RC_CALIB_CTL8 B43_LP_RADIO(0x061) /* RC Calibration Control 8 */
330#define B2063_RC_CALIB_CTL9 B43_LP_RADIO(0x062) /* RC Calibration Control 9 */
331#define B2063_RC_CALIB_CTL10 B43_LP_RADIO(0x063) /* RC Calibration Control 10 */
332#define B2063_PLL_JTAG_CALNRST B43_LP_RADIO(0x064) /* PLL JTAG CALNRST */
333#define B2063_PLL_JTAG_IN_PLL1 B43_LP_RADIO(0x065) /* PLL JTAG IN PLL 1 */
334#define B2063_PLL_JTAG_IN_PLL2 B43_LP_RADIO(0x066) /* PLL JTAG IN PLL 2 */
335#define B2063_PLL_JTAG_PLL_CP1 B43_LP_RADIO(0x067) /* PLL JTAG PLL CP 1 */
336#define B2063_PLL_JTAG_PLL_CP2 B43_LP_RADIO(0x068) /* PLL JTAG PLL CP 2 */
337#define B2063_PLL_JTAG_PLL_CP3 B43_LP_RADIO(0x069) /* PLL JTAG PLL CP 3 */
338#define B2063_PLL_JTAG_PLL_CP4 B43_LP_RADIO(0x06A) /* PLL JTAG PLL CP 4 */
339#define B2063_PLL_JTAG_PLL_CTL1 B43_LP_RADIO(0x06B) /* PLL JTAG PLL Control 1 */
340#define B2063_PLL_JTAG_PLL_LF1 B43_LP_RADIO(0x06C) /* PLL JTAG PLL LF 1 */
341#define B2063_PLL_JTAG_PLL_LF2 B43_LP_RADIO(0x06D) /* PLL JTAG PLL LF 2 */
342#define B2063_PLL_JTAG_PLL_LF3 B43_LP_RADIO(0x06E) /* PLL JTAG PLL LF 3 */
343#define B2063_PLL_JTAG_PLL_LF4 B43_LP_RADIO(0x06F) /* PLL JTAG PLL LF 4 */
344#define B2063_PLL_JTAG_PLL_SG1 B43_LP_RADIO(0x070) /* PLL JTAG PLL SG 1 */
345#define B2063_PLL_JTAG_PLL_SG2 B43_LP_RADIO(0x071) /* PLL JTAG PLL SG 2 */
346#define B2063_PLL_JTAG_PLL_SG3 B43_LP_RADIO(0x072) /* PLL JTAG PLL SG 3 */
347#define B2063_PLL_JTAG_PLL_SG4 B43_LP_RADIO(0x073) /* PLL JTAG PLL SG 4 */
348#define B2063_PLL_JTAG_PLL_SG5 B43_LP_RADIO(0x074) /* PLL JTAG PLL SG 5 */
349#define B2063_PLL_JTAG_PLL_VCO1 B43_LP_RADIO(0x075) /* PLL JTAG PLL VCO 1 */
350#define B2063_PLL_JTAG_PLL_VCO2 B43_LP_RADIO(0x076) /* PLL JTAG PLL VCO 2 */
351#define B2063_PLL_JTAG_PLL_VCO_CALIB1 B43_LP_RADIO(0x077) /* PLL JTAG PLL VCO Calibration 1 */
352#define B2063_PLL_JTAG_PLL_VCO_CALIB2 B43_LP_RADIO(0x078) /* PLL JTAG PLL VCO Calibration 2 */
353#define B2063_PLL_JTAG_PLL_VCO_CALIB3 B43_LP_RADIO(0x079) /* PLL JTAG PLL VCO Calibration 3 */
354#define B2063_PLL_JTAG_PLL_VCO_CALIB4 B43_LP_RADIO(0x07A) /* PLL JTAG PLL VCO Calibration 4 */
355#define B2063_PLL_JTAG_PLL_VCO_CALIB5 B43_LP_RADIO(0x07B) /* PLL JTAG PLL VCO Calibration 5 */
356#define B2063_PLL_JTAG_PLL_VCO_CALIB6 B43_LP_RADIO(0x07C) /* PLL JTAG PLL VCO Calibration 6 */
357#define B2063_PLL_JTAG_PLL_VCO_CALIB7 B43_LP_RADIO(0x07D) /* PLL JTAG PLL VCO Calibration 7 */
358#define B2063_PLL_JTAG_PLL_VCO_CALIB8 B43_LP_RADIO(0x07E) /* PLL JTAG PLL VCO Calibration 8 */
359#define B2063_PLL_JTAG_PLL_VCO_CALIB9 B43_LP_RADIO(0x07F) /* PLL JTAG PLL VCO Calibration 9 */
360#define B2063_PLL_JTAG_PLL_VCO_CALIB10 B43_LP_RADIO(0x080) /* PLL JTAG PLL VCO Calibration 10 */
361#define B2063_PLL_JTAG_PLL_XTAL_12 B43_LP_RADIO(0x081) /* PLL JTAG PLL XTAL 1 2 */
362#define B2063_PLL_JTAG_PLL_XTAL3 B43_LP_RADIO(0x082) /* PLL JTAG PLL XTAL 3 */
363#define B2063_LOGEN_ACL1 B43_LP_RADIO(0x083) /* LOGEN ACL 1 */
364#define B2063_LOGEN_ACL2 B43_LP_RADIO(0x084) /* LOGEN ACL 2 */
365#define B2063_LOGEN_ACL3 B43_LP_RADIO(0x085) /* LOGEN ACL 3 */
366#define B2063_LOGEN_ACL4 B43_LP_RADIO(0x086) /* LOGEN ACL 4 */
367#define B2063_LOGEN_ACL5 B43_LP_RADIO(0x087) /* LOGEN ACL 5 */
368#define B2063_LO_CALIB_INPUTS B43_LP_RADIO(0x088) /* LO Calibration INPUTS */
369#define B2063_LO_CALIB_CTL1 B43_LP_RADIO(0x089) /* LO Calibration Control 1 */
370#define B2063_LO_CALIB_CTL2 B43_LP_RADIO(0x08A) /* LO Calibration Control 2 */
371#define B2063_LO_CALIB_CTL3 B43_LP_RADIO(0x08B) /* LO Calibration Control 3 */
372#define B2063_LO_CALIB_WAITCNT B43_LP_RADIO(0x08C) /* LO Calibration WAITCNT */
373#define B2063_LO_CALIB_OVR1 B43_LP_RADIO(0x08D) /* LO Calibration OVR 1 */
374#define B2063_LO_CALIB_OVR2 B43_LP_RADIO(0x08E) /* LO Calibration OVR 2 */
375#define B2063_LO_CALIB_OVAL1 B43_LP_RADIO(0x08F) /* LO Calibration OVAL 1 */
376#define B2063_LO_CALIB_OVAL2 B43_LP_RADIO(0x090) /* LO Calibration OVAL 2 */
377#define B2063_LO_CALIB_OVAL3 B43_LP_RADIO(0x091) /* LO Calibration OVAL 3 */
378#define B2063_LO_CALIB_OVAL4 B43_LP_RADIO(0x092) /* LO Calibration OVAL 4 */
379#define B2063_LO_CALIB_OVAL5 B43_LP_RADIO(0x093) /* LO Calibration OVAL 5 */
380#define B2063_LO_CALIB_OVAL6 B43_LP_RADIO(0x094) /* LO Calibration OVAL 6 */
381#define B2063_LO_CALIB_OVAL7 B43_LP_RADIO(0x095) /* LO Calibration OVAL 7 */
382#define B2063_LO_CALIB_CALVLD1 B43_LP_RADIO(0x096) /* LO Calibration CALVLD 1 */
383#define B2063_LO_CALIB_CALVLD2 B43_LP_RADIO(0x097) /* LO Calibration CALVLD 2 */
384#define B2063_LO_CALIB_CVAL1 B43_LP_RADIO(0x098) /* LO Calibration CVAL 1 */
385#define B2063_LO_CALIB_CVAL2 B43_LP_RADIO(0x099) /* LO Calibration CVAL 2 */
386#define B2063_LO_CALIB_CVAL3 B43_LP_RADIO(0x09A) /* LO Calibration CVAL 3 */
387#define B2063_LO_CALIB_CVAL4 B43_LP_RADIO(0x09B) /* LO Calibration CVAL 4 */
388#define B2063_LO_CALIB_CVAL5 B43_LP_RADIO(0x09C) /* LO Calibration CVAL 5 */
389#define B2063_LO_CALIB_CVAL6 B43_LP_RADIO(0x09D) /* LO Calibration CVAL 6 */
390#define B2063_LO_CALIB_CVAL7 B43_LP_RADIO(0x09E) /* LO Calibration CVAL 7 */
391#define B2063_LOGEN_CALIB_EN B43_LP_RADIO(0x09F) /* LOGEN Calibration EN */
392#define B2063_LOGEN_PEAKDET1 B43_LP_RADIO(0x0A0) /* LOGEN PEAKDET 1 */
393#define B2063_LOGEN_RCCR1 B43_LP_RADIO(0x0A1) /* LOGEN RCCR 1 */
394#define B2063_LOGEN_VCOBUF1 B43_LP_RADIO(0x0A2) /* LOGEN VCOBUF 1 */
395#define B2063_LOGEN_MIXER1 B43_LP_RADIO(0x0A3) /* LOGEN MIXER 1 */
396#define B2063_LOGEN_MIXER2 B43_LP_RADIO(0x0A4) /* LOGEN MIXER 2 */
397#define B2063_LOGEN_BUF1 B43_LP_RADIO(0x0A5) /* LOGEN BUF 1 */
398#define B2063_LOGEN_BUF2 B43_LP_RADIO(0x0A6) /* LOGEN BUF 2 */
399#define B2063_LOGEN_DIV1 B43_LP_RADIO(0x0A7) /* LOGEN DIV 1 */
400#define B2063_LOGEN_DIV2 B43_LP_RADIO(0x0A8) /* LOGEN DIV 2 */
401#define B2063_LOGEN_DIV3 B43_LP_RADIO(0x0A9) /* LOGEN DIV 3 */
402#define B2063_LOGEN_CBUFRX1 B43_LP_RADIO(0x0AA) /* LOGEN CBUFRX 1 */
403#define B2063_LOGEN_CBUFRX2 B43_LP_RADIO(0x0AB) /* LOGEN CBUFRX 2 */
404#define B2063_LOGEN_CBUFTX1 B43_LP_RADIO(0x0AC) /* LOGEN CBUFTX 1 */
405#define B2063_LOGEN_CBUFTX2 B43_LP_RADIO(0x0AD) /* LOGEN CBUFTX 2 */
406#define B2063_LOGEN_IDAC1 B43_LP_RADIO(0x0AE) /* LOGEN IDAC 1 */
407#define B2063_LOGEN_SPARE1 B43_LP_RADIO(0x0AF) /* LOGEN SPARE 1 */
408#define B2063_LOGEN_SPARE2 B43_LP_RADIO(0x0B0) /* LOGEN SPARE 2 */
409#define B2063_LOGEN_SPARE3 B43_LP_RADIO(0x0B1) /* LOGEN SPARE 3 */
410#define B2063_G_RX_1ST1 B43_LP_RADIO(0x0B2) /* G RX 1ST 1 */
411#define B2063_G_RX_1ST2 B43_LP_RADIO(0x0B3) /* G RX 1ST 2 */
412#define B2063_G_RX_1ST3 B43_LP_RADIO(0x0B4) /* G RX 1ST 3 */
413#define B2063_G_RX_2ND1 B43_LP_RADIO(0x0B5) /* G RX 2ND 1 */
414#define B2063_G_RX_2ND2 B43_LP_RADIO(0x0B6) /* G RX 2ND 2 */
415#define B2063_G_RX_2ND3 B43_LP_RADIO(0x0B7) /* G RX 2ND 3 */
416#define B2063_G_RX_2ND4 B43_LP_RADIO(0x0B8) /* G RX 2ND 4 */
417#define B2063_G_RX_2ND5 B43_LP_RADIO(0x0B9) /* G RX 2ND 5 */
418#define B2063_G_RX_2ND6 B43_LP_RADIO(0x0BA) /* G RX 2ND 6 */
419#define B2063_G_RX_2ND7 B43_LP_RADIO(0x0BB) /* G RX 2ND 7 */
420#define B2063_G_RX_2ND8 B43_LP_RADIO(0x0BC) /* G RX 2ND 8 */
421#define B2063_G_RX_PS1 B43_LP_RADIO(0x0BD) /* G RX PS 1 */
422#define B2063_G_RX_PS2 B43_LP_RADIO(0x0BE) /* G RX PS 2 */
423#define B2063_G_RX_PS3 B43_LP_RADIO(0x0BF) /* G RX PS 3 */
424#define B2063_G_RX_PS4 B43_LP_RADIO(0x0C0) /* G RX PS 4 */
425#define B2063_G_RX_PS5 B43_LP_RADIO(0x0C1) /* G RX PS 5 */
426#define B2063_G_RX_MIX1 B43_LP_RADIO(0x0C2) /* G RX MIX 1 */
427#define B2063_G_RX_MIX2 B43_LP_RADIO(0x0C3) /* G RX MIX 2 */
428#define B2063_G_RX_MIX3 B43_LP_RADIO(0x0C4) /* G RX MIX 3 */
429#define B2063_G_RX_MIX4 B43_LP_RADIO(0x0C5) /* G RX MIX 4 */
430#define B2063_G_RX_MIX5 B43_LP_RADIO(0x0C6) /* G RX MIX 5 */
431#define B2063_G_RX_MIX6 B43_LP_RADIO(0x0C7) /* G RX MIX 6 */
432#define B2063_G_RX_MIX7 B43_LP_RADIO(0x0C8) /* G RX MIX 7 */
433#define B2063_G_RX_MIX8 B43_LP_RADIO(0x0C9) /* G RX MIX 8 */
434#define B2063_G_RX_PDET1 B43_LP_RADIO(0x0CA) /* G RX PDET 1 */
435#define B2063_G_RX_SPARES1 B43_LP_RADIO(0x0CB) /* G RX SPARES 1 */
436#define B2063_G_RX_SPARES2 B43_LP_RADIO(0x0CC) /* G RX SPARES 2 */
437#define B2063_G_RX_SPARES3 B43_LP_RADIO(0x0CD) /* G RX SPARES 3 */
438#define B2063_A_RX_1ST1 B43_LP_RADIO(0x0CE) /* A RX 1ST 1 */
439#define B2063_A_RX_1ST2 B43_LP_RADIO(0x0CF) /* A RX 1ST 2 */
440#define B2063_A_RX_1ST3 B43_LP_RADIO(0x0D0) /* A RX 1ST 3 */
441#define B2063_A_RX_1ST4 B43_LP_RADIO(0x0D1) /* A RX 1ST 4 */
442#define B2063_A_RX_1ST5 B43_LP_RADIO(0x0D2) /* A RX 1ST 5 */
443#define B2063_A_RX_2ND1 B43_LP_RADIO(0x0D3) /* A RX 2ND 1 */
444#define B2063_A_RX_2ND2 B43_LP_RADIO(0x0D4) /* A RX 2ND 2 */
445#define B2063_A_RX_2ND3 B43_LP_RADIO(0x0D5) /* A RX 2ND 3 */
446#define B2063_A_RX_2ND4 B43_LP_RADIO(0x0D6) /* A RX 2ND 4 */
447#define B2063_A_RX_2ND5 B43_LP_RADIO(0x0D7) /* A RX 2ND 5 */
448#define B2063_A_RX_2ND6 B43_LP_RADIO(0x0D8) /* A RX 2ND 6 */
449#define B2063_A_RX_2ND7 B43_LP_RADIO(0x0D9) /* A RX 2ND 7 */
450#define B2063_A_RX_PS1 B43_LP_RADIO(0x0DA) /* A RX PS 1 */
451#define B2063_A_RX_PS2 B43_LP_RADIO(0x0DB) /* A RX PS 2 */
452#define B2063_A_RX_PS3 B43_LP_RADIO(0x0DC) /* A RX PS 3 */
453#define B2063_A_RX_PS4 B43_LP_RADIO(0x0DD) /* A RX PS 4 */
454#define B2063_A_RX_PS5 B43_LP_RADIO(0x0DE) /* A RX PS 5 */
455#define B2063_A_RX_PS6 B43_LP_RADIO(0x0DF) /* A RX PS 6 */
456#define B2063_A_RX_MIX1 B43_LP_RADIO(0x0E0) /* A RX MIX 1 */
457#define B2063_A_RX_MIX2 B43_LP_RADIO(0x0E1) /* A RX MIX 2 */
458#define B2063_A_RX_MIX3 B43_LP_RADIO(0x0E2) /* A RX MIX 3 */
459#define B2063_A_RX_MIX4 B43_LP_RADIO(0x0E3) /* A RX MIX 4 */
460#define B2063_A_RX_MIX5 B43_LP_RADIO(0x0E4) /* A RX MIX 5 */
461#define B2063_A_RX_MIX6 B43_LP_RADIO(0x0E5) /* A RX MIX 6 */
462#define B2063_A_RX_MIX7 B43_LP_RADIO(0x0E6) /* A RX MIX 7 */
463#define B2063_A_RX_MIX8 B43_LP_RADIO(0x0E7) /* A RX MIX 8 */
464#define B2063_A_RX_PWRDET1 B43_LP_RADIO(0x0E8) /* A RX PWRDET 1 */
465#define B2063_A_RX_SPARE1 B43_LP_RADIO(0x0E9) /* A RX SPARE 1 */
466#define B2063_A_RX_SPARE2 B43_LP_RADIO(0x0EA) /* A RX SPARE 2 */
467#define B2063_A_RX_SPARE3 B43_LP_RADIO(0x0EB) /* A RX SPARE 3 */
468#define B2063_RX_TIA_CTL1 B43_LP_RADIO(0x0EC) /* RX TIA Control 1 */
469#define B2063_RX_TIA_CTL2 B43_LP_RADIO(0x0ED) /* RX TIA Control 2 */
470#define B2063_RX_TIA_CTL3 B43_LP_RADIO(0x0EE) /* RX TIA Control 3 */
471#define B2063_RX_TIA_CTL4 B43_LP_RADIO(0x0EF) /* RX TIA Control 4 */
472#define B2063_RX_TIA_CTL5 B43_LP_RADIO(0x0F0) /* RX TIA Control 5 */
473#define B2063_RX_TIA_CTL6 B43_LP_RADIO(0x0F1) /* RX TIA Control 6 */
474#define B2063_RX_BB_CTL1 B43_LP_RADIO(0x0F2) /* RX BB Control 1 */
475#define B2063_RX_BB_CTL2 B43_LP_RADIO(0x0F3) /* RX BB Control 2 */
476#define B2063_RX_BB_CTL3 B43_LP_RADIO(0x0F4) /* RX BB Control 3 */
477#define B2063_RX_BB_CTL4 B43_LP_RADIO(0x0F5) /* RX BB Control 4 */
478#define B2063_RX_BB_CTL5 B43_LP_RADIO(0x0F6) /* RX BB Control 5 */
479#define B2063_RX_BB_CTL6 B43_LP_RADIO(0x0F7) /* RX BB Control 6 */
480#define B2063_RX_BB_CTL7 B43_LP_RADIO(0x0F8) /* RX BB Control 7 */
481#define B2063_RX_BB_CTL8 B43_LP_RADIO(0x0F9) /* RX BB Control 8 */
482#define B2063_RX_BB_CTL9 B43_LP_RADIO(0x0FA) /* RX BB Control 9 */
483#define B2063_TX_RF_CTL1 B43_LP_RADIO(0x0FB) /* TX RF Control 1 */
484#define B2063_TX_RF_IDAC_LO_RF_I B43_LP_RADIO(0x0FC) /* TX RF IDAC LO RF I */
485#define B2063_TX_RF_IDAC_LO_RF_Q B43_LP_RADIO(0x0FD) /* TX RF IDAC LO RF Q */
486#define B2063_TX_RF_IDAC_LO_BB_I B43_LP_RADIO(0x0FE) /* TX RF IDAC LO BB I */
487#define B2063_TX_RF_IDAC_LO_BB_Q B43_LP_RADIO(0x0FF) /* TX RF IDAC LO BB Q */
488#define B2063_TX_RF_CTL2 B43_LP_RADIO(0x100) /* TX RF Control 2 */
489#define B2063_TX_RF_CTL3 B43_LP_RADIO(0x101) /* TX RF Control 3 */
490#define B2063_TX_RF_CTL4 B43_LP_RADIO(0x102) /* TX RF Control 4 */
491#define B2063_TX_RF_CTL5 B43_LP_RADIO(0x103) /* TX RF Control 5 */
492#define B2063_TX_RF_CTL6 B43_LP_RADIO(0x104) /* TX RF Control 6 */
493#define B2063_TX_RF_CTL7 B43_LP_RADIO(0x105) /* TX RF Control 7 */
494#define B2063_TX_RF_CTL8 B43_LP_RADIO(0x106) /* TX RF Control 8 */
495#define B2063_TX_RF_CTL9 B43_LP_RADIO(0x107) /* TX RF Control 9 */
496#define B2063_TX_RF_CTL10 B43_LP_RADIO(0x108) /* TX RF Control 10 */
497#define B2063_TX_RF_CTL14 B43_LP_RADIO(0x109) /* TX RF Control 14 */
498#define B2063_TX_RF_CTL15 B43_LP_RADIO(0x10A) /* TX RF Control 15 */
499#define B2063_PA_CTL1 B43_LP_RADIO(0x10B) /* PA Control 1 */
500#define B2063_PA_CTL2 B43_LP_RADIO(0x10C) /* PA Control 2 */
501#define B2063_PA_CTL3 B43_LP_RADIO(0x10D) /* PA Control 3 */
502#define B2063_PA_CTL4 B43_LP_RADIO(0x10E) /* PA Control 4 */
503#define B2063_PA_CTL5 B43_LP_RADIO(0x10F) /* PA Control 5 */
504#define B2063_PA_CTL6 B43_LP_RADIO(0x110) /* PA Control 6 */
505#define B2063_PA_CTL7 B43_LP_RADIO(0x111) /* PA Control 7 */
506#define B2063_PA_CTL8 B43_LP_RADIO(0x112) /* PA Control 8 */
507#define B2063_PA_CTL9 B43_LP_RADIO(0x113) /* PA Control 9 */
508#define B2063_PA_CTL10 B43_LP_RADIO(0x114) /* PA Control 10 */
509#define B2063_PA_CTL11 B43_LP_RADIO(0x115) /* PA Control 11 */
510#define B2063_PA_CTL12 B43_LP_RADIO(0x116) /* PA Control 12 */
511#define B2063_PA_CTL13 B43_LP_RADIO(0x117) /* PA Control 13 */
512#define B2063_TX_BB_CTL1 B43_LP_RADIO(0x118) /* TX BB Control 1 */
513#define B2063_TX_BB_CTL2 B43_LP_RADIO(0x119) /* TX BB Control 2 */
514#define B2063_TX_BB_CTL3 B43_LP_RADIO(0x11A) /* TX BB Control 3 */
515#define B2063_TX_BB_CTL4 B43_LP_RADIO(0x11B) /* TX BB Control 4 */
516#define B2063_GPIO_CTL1 B43_LP_RADIO(0x11C) /* GPIO Control 1 */
517#define B2063_VREG_CTL1 B43_LP_RADIO(0x11D) /* VREG Control 1 */
518#define B2063_AMUX_CTL1 B43_LP_RADIO(0x11E) /* AMUX Control 1 */
519#define B2063_IQ_CALIB_GVAR B43_LP_RADIO(0x11F) /* IQ Calibration GVAR */
520#define B2063_IQ_CALIB_CTL1 B43_LP_RADIO(0x120) /* IQ Calibration Control 1 */
521#define B2063_IQ_CALIB_CTL2 B43_LP_RADIO(0x121) /* IQ Calibration Control 2 */
522#define B2063_TEMPSENSE_CTL1 B43_LP_RADIO(0x122) /* TEMPSENSE Control 1 */
523#define B2063_TEMPSENSE_CTL2 B43_LP_RADIO(0x123) /* TEMPSENSE Control 2 */
524#define B2063_TX_RX_LOOPBACK1 B43_LP_RADIO(0x124) /* TX/RX LOOPBACK 1 */
525#define B2063_TX_RX_LOOPBACK2 B43_LP_RADIO(0x125) /* TX/RX LOOPBACK 2 */
526#define B2063_EXT_TSSI_CTL1 B43_LP_RADIO(0x126) /* EXT TSSI Control 1 */
527#define B2063_EXT_TSSI_CTL2 B43_LP_RADIO(0x127) /* EXT TSSI Control 2 */
528#define B2063_AFE_CTL B43_LP_RADIO(0x128) /* AFE Control */
529
530
531
532struct b43_phy_lp {
533 //TODO
534};
535
536
537struct b43_phy_operations;
538extern const struct b43_phy_operations b43_phyops_lp;
539
540#endif /* LINUX_B43_PHY_LP_H_ */
diff --git a/drivers/net/wireless/b43/nphy.c b/drivers/net/wireless/b43/phy_n.c
index 644eed993bea..8bcfda5f3f07 100644
--- a/drivers/net/wireless/b43/nphy.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -26,7 +26,7 @@
26#include <linux/types.h> 26#include <linux/types.h>
27 27
28#include "b43.h" 28#include "b43.h"
29#include "nphy.h" 29#include "phy_n.h"
30#include "tables_nphy.h" 30#include "tables_nphy.h"
31 31
32 32
@@ -34,10 +34,16 @@ void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
34{//TODO 34{//TODO
35} 35}
36 36
37void b43_nphy_xmitpower(struct b43_wldev *dev) 37static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
38{//TODO 38{//TODO
39} 39}
40 40
41static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
42 bool ignore_tssi)
43{//TODO
44 return B43_TXPWR_RES_DONE;
45}
46
41static void b43_chantab_radio_upload(struct b43_wldev *dev, 47static void b43_chantab_radio_upload(struct b43_wldev *dev,
42 const struct b43_nphy_channeltab_entry *e) 48 const struct b43_nphy_channeltab_entry *e)
43{ 49{
@@ -81,9 +87,8 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
81 //TODO 87 //TODO
82} 88}
83 89
84/* Tune the hardware to a new channel. Don't call this directly. 90/* Tune the hardware to a new channel. */
85 * Use b43_radio_selectchannel() */ 91static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
86int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel)
87{ 92{
88 const struct b43_nphy_channeltab_entry *tabent; 93 const struct b43_nphy_channeltab_entry *tabent;
89 94
@@ -162,7 +167,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
162 msleep(1); 167 msleep(1);
163 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); 168 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
164 msleep(1); 169 msleep(1);
165 b43_radio_selectchannel(dev, dev->phy.channel, 0); 170 nphy_channel_switch(dev, dev->phy.channel);
166 b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9); 171 b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9);
167 b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9); 172 b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9);
168 b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83); 173 b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
@@ -484,3 +489,140 @@ int b43_phy_initn(struct b43_wldev *dev)
484 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n"); 489 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
485 return 0; 490 return 0;
486} 491}
492
493static int b43_nphy_op_allocate(struct b43_wldev *dev)
494{
495 struct b43_phy_n *nphy;
496
497 nphy = kzalloc(sizeof(*nphy), GFP_KERNEL);
498 if (!nphy)
499 return -ENOMEM;
500 dev->phy.n = nphy;
501
502 return 0;
503}
504
505static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
506{
507 struct b43_phy *phy = &dev->phy;
508 struct b43_phy_n *nphy = phy->n;
509
510 memset(nphy, 0, sizeof(*nphy));
511
512 //TODO init struct b43_phy_n
513}
514
515static void b43_nphy_op_free(struct b43_wldev *dev)
516{
517 struct b43_phy *phy = &dev->phy;
518 struct b43_phy_n *nphy = phy->n;
519
520 kfree(nphy);
521 phy->n = NULL;
522}
523
524static int b43_nphy_op_init(struct b43_wldev *dev)
525{
526 return b43_phy_initn(dev);
527}
528
529static inline void check_phyreg(struct b43_wldev *dev, u16 offset)
530{
531#if B43_DEBUG
532 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
533 /* OFDM registers are onnly available on A/G-PHYs */
534 b43err(dev->wl, "Invalid OFDM PHY access at "
535 "0x%04X on N-PHY\n", offset);
536 dump_stack();
537 }
538 if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
539 /* Ext-G registers are only available on G-PHYs */
540 b43err(dev->wl, "Invalid EXT-G PHY access at "
541 "0x%04X on N-PHY\n", offset);
542 dump_stack();
543 }
544#endif /* B43_DEBUG */
545}
546
547static u16 b43_nphy_op_read(struct b43_wldev *dev, u16 reg)
548{
549 check_phyreg(dev, reg);
550 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
551 return b43_read16(dev, B43_MMIO_PHY_DATA);
552}
553
554static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
555{
556 check_phyreg(dev, reg);
557 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
558 b43_write16(dev, B43_MMIO_PHY_DATA, value);
559}
560
561static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
562{
563 /* Register 1 is a 32-bit register. */
564 B43_WARN_ON(reg == 1);
565 /* N-PHY needs 0x100 for read access */
566 reg |= 0x100;
567
568 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
569 return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
570}
571
572static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
573{
574 /* Register 1 is a 32-bit register. */
575 B43_WARN_ON(reg == 1);
576
577 b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg);
578 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
579}
580
581static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
582 enum rfkill_state state)
583{//TODO
584}
585
586static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
587{
588 b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
589 on ? 0 : 0x7FFF);
590}
591
592static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
593 unsigned int new_channel)
594{
595 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
596 if ((new_channel < 1) || (new_channel > 14))
597 return -EINVAL;
598 } else {
599 if (new_channel > 200)
600 return -EINVAL;
601 }
602
603 return nphy_channel_switch(dev, new_channel);
604}
605
606static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev)
607{
608 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
609 return 1;
610 return 36;
611}
612
613const struct b43_phy_operations b43_phyops_n = {
614 .allocate = b43_nphy_op_allocate,
615 .free = b43_nphy_op_free,
616 .prepare_structs = b43_nphy_op_prepare_structs,
617 .init = b43_nphy_op_init,
618 .phy_read = b43_nphy_op_read,
619 .phy_write = b43_nphy_op_write,
620 .radio_read = b43_nphy_op_radio_read,
621 .radio_write = b43_nphy_op_radio_write,
622 .software_rfkill = b43_nphy_op_software_rfkill,
623 .switch_analog = b43_nphy_op_switch_analog,
624 .switch_channel = b43_nphy_op_switch_channel,
625 .get_default_chan = b43_nphy_op_get_default_chan,
626 .recalc_txpower = b43_nphy_op_recalc_txpower,
627 .adjust_txpower = b43_nphy_op_adjust_txpower,
628};
diff --git a/drivers/net/wireless/b43/nphy.h b/drivers/net/wireless/b43/phy_n.h
index faf46b9cbf1b..1749aef4147d 100644
--- a/drivers/net/wireless/b43/nphy.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -1,7 +1,7 @@
1#ifndef B43_NPHY_H_ 1#ifndef B43_NPHY_H_
2#define B43_NPHY_H_ 2#define B43_NPHY_H_
3 3
4#include "phy.h" 4#include "phy_common.h"
5 5
6 6
7/* N-PHY registers. */ 7/* N-PHY registers. */
@@ -919,54 +919,12 @@
919 919
920struct b43_wldev; 920struct b43_wldev;
921 921
922struct b43_phy_n {
923 //TODO lots of missing stuff
924};
922 925
923#ifdef CONFIG_B43_NPHY
924/* N-PHY support enabled */
925 926
926int b43_phy_initn(struct b43_wldev *dev); 927struct b43_phy_operations;
928extern const struct b43_phy_operations b43_phyops_n;
927 929
928void b43_nphy_radio_turn_on(struct b43_wldev *dev);
929void b43_nphy_radio_turn_off(struct b43_wldev *dev);
930
931int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel);
932
933void b43_nphy_xmitpower(struct b43_wldev *dev);
934void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna);
935
936
937#else /* CONFIG_B43_NPHY */
938/* N-PHY support disabled */
939
940
941static inline
942int b43_phy_initn(struct b43_wldev *dev)
943{
944 return -EOPNOTSUPP;
945}
946
947static inline
948void b43_nphy_radio_turn_on(struct b43_wldev *dev)
949{
950}
951static inline
952void b43_nphy_radio_turn_off(struct b43_wldev *dev)
953{
954}
955
956static inline
957int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel)
958{
959 return -ENOSYS;
960}
961
962static inline
963void b43_nphy_xmitpower(struct b43_wldev *dev)
964{
965}
966static inline
967void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
968{
969}
970
971#endif /* CONFIG_B43_NPHY */
972#endif /* B43_NPHY_H_ */ 930#endif /* B43_NPHY_H_ */
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 34ae125d5384..713753781f40 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -24,6 +24,7 @@
24 24
25#include "rfkill.h" 25#include "rfkill.h"
26#include "b43.h" 26#include "b43.h"
27#include "phy_common.h"
27 28
28#include <linux/kmod.h> 29#include <linux/kmod.h>
29 30
@@ -96,11 +97,11 @@ static int b43_rfkill_soft_toggle(void *data, enum rfkill_state state)
96 goto out_unlock; 97 goto out_unlock;
97 } 98 }
98 if (!dev->phy.radio_on) 99 if (!dev->phy.radio_on)
99 b43_radio_turn_on(dev); 100 b43_software_rfkill(dev, state);
100 break; 101 break;
101 case RFKILL_STATE_SOFT_BLOCKED: 102 case RFKILL_STATE_SOFT_BLOCKED:
102 if (dev->phy.radio_on) 103 if (dev->phy.radio_on)
103 b43_radio_turn_off(dev, 0); 104 b43_software_rfkill(dev, state);
104 break; 105 break;
105 default: 106 default:
106 b43warn(wl, "Received unexpected rfkill state %d.\n", state); 107 b43warn(wl, "Received unexpected rfkill state %d.\n", state);
@@ -169,6 +170,11 @@ void b43_rfkill_init(struct b43_wldev *dev)
169 "The built-in radio LED will not work.\n"); 170 "The built-in radio LED will not work.\n");
170#endif /* CONFIG_RFKILL_INPUT */ 171#endif /* CONFIG_RFKILL_INPUT */
171 172
173#if !defined(CONFIG_RFKILL_INPUT) && !defined(CONFIG_RFKILL_INPUT_MODULE)
174 b43warn(wl, "The rfkill-input subsystem is not available. "
175 "The built-in radio LED will not work.\n");
176#endif
177
172 err = input_register_polled_device(rfk->poll_dev); 178 err = input_register_polled_device(rfk->poll_dev);
173 if (err) 179 if (err)
174 goto err_unreg_rfk; 180 goto err_unreg_rfk;
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index 275095b8cbe7..5adaa3692d75 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -29,7 +29,7 @@
29#include "b43.h" 29#include "b43.h"
30#include "sysfs.h" 30#include "sysfs.h"
31#include "main.h" 31#include "main.h"
32#include "phy.h" 32#include "phy_common.h"
33 33
34#define GENERIC_FILESIZE 64 34#define GENERIC_FILESIZE 64
35 35
@@ -59,7 +59,12 @@ static ssize_t b43_attr_interfmode_show(struct device *dev,
59 59
60 mutex_lock(&wldev->wl->mutex); 60 mutex_lock(&wldev->wl->mutex);
61 61
62 switch (wldev->phy.interfmode) { 62 if (wldev->phy.type != B43_PHYTYPE_G) {
63 mutex_unlock(&wldev->wl->mutex);
64 return -ENOSYS;
65 }
66
67 switch (wldev->phy.g->interfmode) {
63 case B43_INTERFMODE_NONE: 68 case B43_INTERFMODE_NONE:
64 count = 69 count =
65 snprintf(buf, PAGE_SIZE, 70 snprintf(buf, PAGE_SIZE,
@@ -117,11 +122,15 @@ static ssize_t b43_attr_interfmode_store(struct device *dev,
117 mutex_lock(&wldev->wl->mutex); 122 mutex_lock(&wldev->wl->mutex);
118 spin_lock_irqsave(&wldev->wl->irq_lock, flags); 123 spin_lock_irqsave(&wldev->wl->irq_lock, flags);
119 124
120 err = b43_radio_set_interference_mitigation(wldev, mode); 125 if (wldev->phy.ops->interf_mitigation) {
121 if (err) { 126 err = wldev->phy.ops->interf_mitigation(wldev, mode);
122 b43err(wldev->wl, "Interference Mitigation not " 127 if (err) {
123 "supported by device\n"); 128 b43err(wldev->wl, "Interference Mitigation not "
124 } 129 "supported by device\n");
130 }
131 } else
132 err = -ENOSYS;
133
125 mmiowb(); 134 mmiowb();
126 spin_unlock_irqrestore(&wldev->wl->irq_lock, flags); 135 spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
127 mutex_unlock(&wldev->wl->mutex); 136 mutex_unlock(&wldev->wl->mutex);
diff --git a/drivers/net/wireless/b43/tables.c b/drivers/net/wireless/b43/tables.c
index 3f5ea06bf13c..1ef9a6463ec6 100644
--- a/drivers/net/wireless/b43/tables.c
+++ b/drivers/net/wireless/b43/tables.c
@@ -27,7 +27,8 @@
27 27
28#include "b43.h" 28#include "b43.h"
29#include "tables.h" 29#include "tables.h"
30#include "phy.h" 30#include "phy_g.h"
31
31 32
32const u32 b43_tab_rotor[] = { 33const u32 b43_tab_rotor[] = {
33 0xFEB93FFD, 0xFEC63FFD, /* 0 */ 34 0xFEB93FFD, 0xFEC63FFD, /* 0 */
@@ -377,17 +378,17 @@ static inline void assert_sizes(void)
377 378
378u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset) 379u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset)
379{ 380{
380 struct b43_phy *phy = &dev->phy; 381 struct b43_phy_g *gphy = dev->phy.g;
381 u16 addr; 382 u16 addr;
382 383
383 addr = table + offset; 384 addr = table + offset;
384 if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) || 385 if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
385 (addr - 1 != phy->ofdmtab_addr)) { 386 (addr - 1 != gphy->ofdmtab_addr)) {
386 /* The hardware has a different address in memory. Update it. */ 387 /* The hardware has a different address in memory. Update it. */
387 b43_phy_write(dev, B43_PHY_OTABLECTL, addr); 388 b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
388 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ; 389 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
389 } 390 }
390 phy->ofdmtab_addr = addr; 391 gphy->ofdmtab_addr = addr;
391 392
392 return b43_phy_read(dev, B43_PHY_OTABLEI); 393 return b43_phy_read(dev, B43_PHY_OTABLEI);
393 394
@@ -398,34 +399,34 @@ u16 b43_ofdmtab_read16(struct b43_wldev *dev, u16 table, u16 offset)
398void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table, 399void b43_ofdmtab_write16(struct b43_wldev *dev, u16 table,
399 u16 offset, u16 value) 400 u16 offset, u16 value)
400{ 401{
401 struct b43_phy *phy = &dev->phy; 402 struct b43_phy_g *gphy = dev->phy.g;
402 u16 addr; 403 u16 addr;
403 404
404 addr = table + offset; 405 addr = table + offset;
405 if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) || 406 if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) ||
406 (addr - 1 != phy->ofdmtab_addr)) { 407 (addr - 1 != gphy->ofdmtab_addr)) {
407 /* The hardware has a different address in memory. Update it. */ 408 /* The hardware has a different address in memory. Update it. */
408 b43_phy_write(dev, B43_PHY_OTABLECTL, addr); 409 b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
409 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE; 410 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE;
410 } 411 }
411 phy->ofdmtab_addr = addr; 412 gphy->ofdmtab_addr = addr;
412 b43_phy_write(dev, B43_PHY_OTABLEI, value); 413 b43_phy_write(dev, B43_PHY_OTABLEI, value);
413} 414}
414 415
415u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset) 416u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset)
416{ 417{
417 struct b43_phy *phy = &dev->phy; 418 struct b43_phy_g *gphy = dev->phy.g;
418 u32 ret; 419 u32 ret;
419 u16 addr; 420 u16 addr;
420 421
421 addr = table + offset; 422 addr = table + offset;
422 if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) || 423 if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_READ) ||
423 (addr - 1 != phy->ofdmtab_addr)) { 424 (addr - 1 != gphy->ofdmtab_addr)) {
424 /* The hardware has a different address in memory. Update it. */ 425 /* The hardware has a different address in memory. Update it. */
425 b43_phy_write(dev, B43_PHY_OTABLECTL, addr); 426 b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
426 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ; 427 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_READ;
427 } 428 }
428 phy->ofdmtab_addr = addr; 429 gphy->ofdmtab_addr = addr;
429 ret = b43_phy_read(dev, B43_PHY_OTABLEQ); 430 ret = b43_phy_read(dev, B43_PHY_OTABLEQ);
430 ret <<= 16; 431 ret <<= 16;
431 ret |= b43_phy_read(dev, B43_PHY_OTABLEI); 432 ret |= b43_phy_read(dev, B43_PHY_OTABLEI);
@@ -436,17 +437,17 @@ u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset)
436void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, 437void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table,
437 u16 offset, u32 value) 438 u16 offset, u32 value)
438{ 439{
439 struct b43_phy *phy = &dev->phy; 440 struct b43_phy_g *gphy = dev->phy.g;
440 u16 addr; 441 u16 addr;
441 442
442 addr = table + offset; 443 addr = table + offset;
443 if ((phy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) || 444 if ((gphy->ofdmtab_addr_direction != B43_OFDMTAB_DIRECTION_WRITE) ||
444 (addr - 1 != phy->ofdmtab_addr)) { 445 (addr - 1 != gphy->ofdmtab_addr)) {
445 /* The hardware has a different address in memory. Update it. */ 446 /* The hardware has a different address in memory. Update it. */
446 b43_phy_write(dev, B43_PHY_OTABLECTL, addr); 447 b43_phy_write(dev, B43_PHY_OTABLECTL, addr);
447 phy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE; 448 gphy->ofdmtab_addr_direction = B43_OFDMTAB_DIRECTION_WRITE;
448 } 449 }
449 phy->ofdmtab_addr = addr; 450 gphy->ofdmtab_addr = addr;
450 451
451 b43_phy_write(dev, B43_PHY_OTABLEI, value); 452 b43_phy_write(dev, B43_PHY_OTABLEI, value);
452 b43_phy_write(dev, B43_PHY_OTABLEQ, (value >> 16)); 453 b43_phy_write(dev, B43_PHY_OTABLEQ, (value >> 16));
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 2aa57551786a..4e2336315545 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -24,8 +24,8 @@
24 24
25#include "b43.h" 25#include "b43.h"
26#include "tables_nphy.h" 26#include "tables_nphy.h"
27#include "phy.h" 27#include "phy_common.h"
28#include "nphy.h" 28#include "phy_n.h"
29 29
30 30
31struct b2055_inittab_entry { 31struct b2055_inittab_entry {
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index daa94211f838..0c0fb15abb9f 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -27,7 +27,7 @@
27#include "b43.h" 27#include "b43.h"
28#include "main.h" 28#include "main.h"
29#include "tables.h" 29#include "tables.h"
30#include "phy.h" 30#include "phy_common.h"
31#include "wa.h" 31#include "wa.h"
32 32
33static void b43_wa_papd(struct b43_wldev *dev) 33static void b43_wa_papd(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 9dda8169f7cc..5e0b71c3ad02 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -28,7 +28,7 @@
28*/ 28*/
29 29
30#include "xmit.h" 30#include "xmit.h"
31#include "phy.h" 31#include "phy_common.h"
32#include "dma.h" 32#include "dma.h"
33#include "pio.h" 33#include "pio.h"
34 34
@@ -431,6 +431,7 @@ static s8 b43_rssi_postprocess(struct b43_wldev *dev,
431 int adjust_2053, int adjust_2050) 431 int adjust_2053, int adjust_2050)
432{ 432{
433 struct b43_phy *phy = &dev->phy; 433 struct b43_phy *phy = &dev->phy;
434 struct b43_phy_g *gphy = phy->g;
434 s32 tmp; 435 s32 tmp;
435 436
436 switch (phy->radio_ver) { 437 switch (phy->radio_ver) {
@@ -450,7 +451,8 @@ static s8 b43_rssi_postprocess(struct b43_wldev *dev,
450 boardflags_lo & B43_BFL_RSSI) { 451 boardflags_lo & B43_BFL_RSSI) {
451 if (in_rssi > 63) 452 if (in_rssi > 63)
452 in_rssi = 63; 453 in_rssi = 63;
453 tmp = phy->nrssi_lt[in_rssi]; 454 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
455 tmp = gphy->nrssi_lt[in_rssi];
454 tmp = 31 - tmp; 456 tmp = 31 - tmp;
455 tmp *= -131; 457 tmp *= -131;
456 tmp /= 128; 458 tmp /= 128;
@@ -678,6 +680,8 @@ void b43_handle_txstatus(struct b43_wldev *dev,
678 b43_pio_handle_txstatus(dev, status); 680 b43_pio_handle_txstatus(dev, status);
679 else 681 else
680 b43_dma_handle_txstatus(dev, status); 682 b43_dma_handle_txstatus(dev, status);
683
684 b43_phy_txpower_check(dev, 0);
681} 685}
682 686
683/* Fill out the mac80211 TXstatus report based on the b43-specific 687/* Fill out the mac80211 TXstatus report based on the b43-specific
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1cb77db5c292..9fb1421cbec2 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -888,13 +888,13 @@ generate_new:
888 888
889static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev) 889static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
890{ 890{
891 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) { 891 if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP)) {
892 /* TODO: PS TBTT */ 892 /* TODO: PS TBTT */
893 } else { 893 } else {
894 if (1/*FIXME: the last PSpoll frame was sent successfully */) 894 if (1/*FIXME: the last PSpoll frame was sent successfully */)
895 b43legacy_power_saving_ctl_bits(dev, -1, -1); 895 b43legacy_power_saving_ctl_bits(dev, -1, -1);
896 } 896 }
897 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) 897 if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
898 dev->dfq_valid = 1; 898 dev->dfq_valid = 1;
899} 899}
900 900
@@ -1201,7 +1201,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev)
1201 struct b43legacy_wl *wl = dev->wl; 1201 struct b43legacy_wl *wl = dev->wl;
1202 u32 cmd; 1202 u32 cmd;
1203 1203
1204 if (!b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) 1204 if (!b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
1205 return; 1205 return;
1206 1206
1207 /* This is the bottom half of the asynchronous beacon update. */ 1207 /* This is the bottom half of the asynchronous beacon update. */
@@ -1936,9 +1936,9 @@ static void b43legacy_adjust_opmode(struct b43legacy_wldev *dev)
1936 ctl &= ~B43legacy_MACCTL_BEACPROMISC; 1936 ctl &= ~B43legacy_MACCTL_BEACPROMISC;
1937 ctl |= B43legacy_MACCTL_INFRA; 1937 ctl |= B43legacy_MACCTL_INFRA;
1938 1938
1939 if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) 1939 if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
1940 ctl |= B43legacy_MACCTL_AP; 1940 ctl |= B43legacy_MACCTL_AP;
1941 else if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) 1941 else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC))
1942 ctl &= ~B43legacy_MACCTL_INFRA; 1942 ctl &= ~B43legacy_MACCTL_INFRA;
1943 1943
1944 if (wl->filter_flags & FIF_CONTROL) 1944 if (wl->filter_flags & FIF_CONTROL)
@@ -2646,7 +2646,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2646 b43legacy_mgmtframe_txantenna(dev, antenna_tx); 2646 b43legacy_mgmtframe_txantenna(dev, antenna_tx);
2647 2647
2648 /* Update templates for AP mode. */ 2648 /* Update templates for AP mode. */
2649 if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) 2649 if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP))
2650 b43legacy_set_beacon_int(dev, conf->beacon_int); 2650 b43legacy_set_beacon_int(dev, conf->beacon_int);
2651 2651
2652 2652
@@ -2733,12 +2733,12 @@ static int b43legacy_op_config_interface(struct ieee80211_hw *hw,
2733 else 2733 else
2734 memset(wl->bssid, 0, ETH_ALEN); 2734 memset(wl->bssid, 0, ETH_ALEN);
2735 if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { 2735 if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
2736 if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP)) { 2736 if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP)) {
2737 B43legacy_WARN_ON(vif->type != IEEE80211_IF_TYPE_AP); 2737 B43legacy_WARN_ON(vif->type != NL80211_IFTYPE_AP);
2738 b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len); 2738 b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len);
2739 if (conf->changed & IEEE80211_IFCC_BEACON) 2739 if (conf->changed & IEEE80211_IFCC_BEACON)
2740 b43legacy_update_templates(wl); 2740 b43legacy_update_templates(wl);
2741 } else if (b43legacy_is_mode(wl, IEEE80211_IF_TYPE_IBSS)) { 2741 } else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
2742 if (conf->changed & IEEE80211_IFCC_BEACON) 2742 if (conf->changed & IEEE80211_IFCC_BEACON)
2743 b43legacy_update_templates(wl); 2743 b43legacy_update_templates(wl);
2744 } 2744 }
@@ -3020,7 +3020,7 @@ static void b43legacy_set_synth_pu_delay(struct b43legacy_wldev *dev,
3020 bool idle) { 3020 bool idle) {
3021 u16 pu_delay = 1050; 3021 u16 pu_delay = 1050;
3022 3022
3023 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS) || idle) 3023 if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle)
3024 pu_delay = 500; 3024 pu_delay = 500;
3025 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) 3025 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
3026 pu_delay = max(pu_delay, (u16)2400); 3026 pu_delay = max(pu_delay, (u16)2400);
@@ -3035,7 +3035,7 @@ static void b43legacy_set_pretbtt(struct b43legacy_wldev *dev)
3035 u16 pretbtt; 3035 u16 pretbtt;
3036 3036
3037 /* The time value is in microseconds. */ 3037 /* The time value is in microseconds. */
3038 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) 3038 if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
3039 pretbtt = 2; 3039 pretbtt = 2;
3040 else 3040 else
3041 pretbtt = 250; 3041 pretbtt = 250;
@@ -3259,10 +3259,10 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3259 3259
3260 /* TODO: allow WDS/AP devices to coexist */ 3260 /* TODO: allow WDS/AP devices to coexist */
3261 3261
3262 if (conf->type != IEEE80211_IF_TYPE_AP && 3262 if (conf->type != NL80211_IFTYPE_AP &&
3263 conf->type != IEEE80211_IF_TYPE_STA && 3263 conf->type != NL80211_IFTYPE_STATION &&
3264 conf->type != IEEE80211_IF_TYPE_WDS && 3264 conf->type != NL80211_IFTYPE_WDS &&
3265 conf->type != IEEE80211_IF_TYPE_IBSS) 3265 conf->type != NL80211_IFTYPE_ADHOC)
3266 return -EOPNOTSUPP; 3266 return -EOPNOTSUPP;
3267 3267
3268 mutex_lock(&wl->mutex); 3268 mutex_lock(&wl->mutex);
@@ -3403,7 +3403,7 @@ out_unlock:
3403} 3403}
3404 3404
3405static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw, 3405static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3406 int aid, int set) 3406 struct ieee80211_sta *sta, bool set)
3407{ 3407{
3408 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 3408 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3409 unsigned long flags; 3409 unsigned long flags;
@@ -3704,6 +3704,11 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
3704 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 3704 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3705 IEEE80211_HW_SIGNAL_DBM | 3705 IEEE80211_HW_SIGNAL_DBM |
3706 IEEE80211_HW_NOISE_DBM; 3706 IEEE80211_HW_NOISE_DBM;
3707 hw->wiphy->interface_modes =
3708 BIT(NL80211_IFTYPE_AP) |
3709 BIT(NL80211_IFTYPE_STATION) |
3710 BIT(NL80211_IFTYPE_WDS) |
3711 BIT(NL80211_IFTYPE_ADHOC);
3707 hw->queues = 1; /* FIXME: hardware has more queues */ 3712 hw->queues = 1; /* FIXME: hardware has more queues */
3708 SET_IEEE80211_DEV(hw, dev->dev); 3713 SET_IEEE80211_DEV(hw, dev->dev);
3709 if (is_valid_ether_addr(sprom->et1mac)) 3714 if (is_valid_ether_addr(sprom->et1mac))
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 768cccb9b1ba..4c9442b16f3f 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -103,7 +103,7 @@ void b43legacy_phy_lock(struct b43legacy_wldev *dev)
103 if (dev->dev->id.revision < 3) { 103 if (dev->dev->id.revision < 3) {
104 b43legacy_mac_suspend(dev); 104 b43legacy_mac_suspend(dev);
105 } else { 105 } else {
106 if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) 106 if (!b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP))
107 b43legacy_power_saving_ctl_bits(dev, -1, 1); 107 b43legacy_power_saving_ctl_bits(dev, -1, 1);
108 } 108 }
109} 109}
@@ -118,7 +118,7 @@ void b43legacy_phy_unlock(struct b43legacy_wldev *dev)
118 if (dev->dev->id.revision < 3) { 118 if (dev->dev->id.revision < 3) {
119 b43legacy_mac_enable(dev); 119 b43legacy_mac_enable(dev);
120 } else { 120 } else {
121 if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) 121 if (!b43legacy_is_mode(dev->wl, NL80211_IFTYPE_AP))
122 b43legacy_power_saving_ctl_bits(dev, -1, -1); 122 b43legacy_power_saving_ctl_bits(dev, -1, -1);
123 } 123 }
124} 124}
@@ -595,12 +595,14 @@ static void b43legacy_phy_initb5(struct b43legacy_wldev *dev)
595 0x0035) & 0xFFC0) | 0x0064); 595 0x0035) & 0xFFC0) | 0x0064);
596 b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, 596 b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev,
597 0x005D) & 0xFF80) | 0x000A); 597 0x005D) & 0xFF80) | 0x000A);
598 b43legacy_phy_write(dev, 0x5B, 0x0000);
599 b43legacy_phy_write(dev, 0x5C, 0x0000);
598 } 600 }
599 601
600 if (dev->bad_frames_preempt) 602 if (dev->bad_frames_preempt)
601 b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD, 603 b43legacy_phy_write(dev, B43legacy_PHY_RADIO_BITFIELD,
602 b43legacy_phy_read(dev, 604 b43legacy_phy_read(dev,
603 B43legacy_PHY_RADIO_BITFIELD) | (1 << 11)); 605 B43legacy_PHY_RADIO_BITFIELD) | (1 << 12));
604 606
605 if (phy->analog == 1) { 607 if (phy->analog == 1) {
606 b43legacy_phy_write(dev, 0x0026, 0xCE00); 608 b43legacy_phy_write(dev, 0x0026, 0xCE00);
@@ -753,7 +755,7 @@ static void b43legacy_phy_initb6(struct b43legacy_wldev *dev)
753 b43legacy_radio_write16(dev, 0x0050, 0x0020); 755 b43legacy_radio_write16(dev, 0x0050, 0x0020);
754 } 756 }
755 if (phy->radio_rev <= 2) { 757 if (phy->radio_rev <= 2) {
756 b43legacy_radio_write16(dev, 0x007C, 0x0020); 758 b43legacy_radio_write16(dev, 0x0050, 0x0020);
757 b43legacy_radio_write16(dev, 0x005A, 0x0070); 759 b43legacy_radio_write16(dev, 0x005A, 0x0070);
758 b43legacy_radio_write16(dev, 0x005B, 0x007B); 760 b43legacy_radio_write16(dev, 0x005B, 0x007B);
759 b43legacy_radio_write16(dev, 0x005C, 0x00B0); 761 b43legacy_radio_write16(dev, 0x005C, 0x00B0);
@@ -771,7 +773,7 @@ static void b43legacy_phy_initb6(struct b43legacy_wldev *dev)
771 b43legacy_phy_write(dev, 0x002A, 0x8AC0); 773 b43legacy_phy_write(dev, 0x002A, 0x8AC0);
772 b43legacy_phy_write(dev, 0x0038, 0x0668); 774 b43legacy_phy_write(dev, 0x0038, 0x0668);
773 b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF); 775 b43legacy_radio_set_txpower_bg(dev, 0xFFFF, 0xFFFF, 0xFFFF);
774 if (phy->radio_rev <= 5) 776 if (phy->radio_rev == 4 || phy->radio_rev == 5)
775 b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev, 777 b43legacy_phy_write(dev, 0x005D, (b43legacy_phy_read(dev,
776 0x005D) & 0xFF80) | 0x0003); 778 0x005D) & 0xFF80) | 0x0003);
777 if (phy->radio_rev <= 2) 779 if (phy->radio_rev <= 2)
@@ -1010,7 +1012,7 @@ static void b43legacy_phy_initg(struct b43legacy_wldev *dev)
1010 b43legacy_phy_initb5(dev); 1012 b43legacy_phy_initb5(dev);
1011 else 1013 else
1012 b43legacy_phy_initb6(dev); 1014 b43legacy_phy_initb6(dev);
1013 if (phy->rev >= 2 || phy->gmode) 1015 if (phy->rev >= 2 && phy->gmode)
1014 b43legacy_phy_inita(dev); 1016 b43legacy_phy_inita(dev);
1015 1017
1016 if (phy->rev >= 2) { 1018 if (phy->rev >= 2) {
@@ -1025,18 +1027,22 @@ static void b43legacy_phy_initg(struct b43legacy_wldev *dev)
1025 b43legacy_phy_write(dev, 0x0811, 0x0400); 1027 b43legacy_phy_write(dev, 0x0811, 0x0400);
1026 b43legacy_phy_write(dev, 0x0015, 0x00C0); 1028 b43legacy_phy_write(dev, 0x0015, 0x00C0);
1027 } 1029 }
1028 if (phy->rev >= 2 || phy->gmode) { 1030 if (phy->gmode) {
1029 tmp = b43legacy_phy_read(dev, 0x0400) & 0xFF; 1031 tmp = b43legacy_phy_read(dev, 0x0400) & 0xFF;
1030 if (tmp == 3 || tmp == 5) { 1032 if (tmp == 3) {
1033 b43legacy_phy_write(dev, 0x04C2, 0x1816);
1034 b43legacy_phy_write(dev, 0x04C3, 0x8606);
1035 }
1036 if (tmp == 4 || tmp == 5) {
1031 b43legacy_phy_write(dev, 0x04C2, 0x1816); 1037 b43legacy_phy_write(dev, 0x04C2, 0x1816);
1032 b43legacy_phy_write(dev, 0x04C3, 0x8006); 1038 b43legacy_phy_write(dev, 0x04C3, 0x8006);
1033 if (tmp == 5) 1039 b43legacy_phy_write(dev, 0x04CC,
1034 b43legacy_phy_write(dev, 0x04CC, 1040 (b43legacy_phy_read(dev,
1035 (b43legacy_phy_read(dev, 1041 0x04CC) & 0x00FF) |
1036 0x04CC) & 0x00FF) | 1042 0x1F00);
1037 0x1F00);
1038 } 1043 }
1039 b43legacy_phy_write(dev, 0x047E, 0x0078); 1044 if (phy->rev >= 2)
1045 b43legacy_phy_write(dev, 0x047E, 0x0078);
1040 } 1046 }
1041 if (phy->radio_rev == 8) { 1047 if (phy->radio_rev == 8) {
1042 b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801) 1048 b43legacy_phy_write(dev, 0x0801, b43legacy_phy_read(dev, 0x0801)
@@ -1078,7 +1084,7 @@ static void b43legacy_phy_initg(struct b43legacy_wldev *dev)
1078 else 1084 else
1079 b43legacy_phy_write(dev, 0x002F, 0x0202); 1085 b43legacy_phy_write(dev, 0x002F, 0x0202);
1080 } 1086 }
1081 if (phy->gmode || phy->rev >= 2) { 1087 if (phy->gmode) {
1082 b43legacy_phy_lo_adjust(dev, 0); 1088 b43legacy_phy_lo_adjust(dev, 0);
1083 b43legacy_phy_write(dev, 0x080F, 0x8078); 1089 b43legacy_phy_write(dev, 0x080F, 0x8078);
1084 } 1090 }
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 68e1f8c78727..6835064758fb 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -193,7 +193,6 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
193{ 193{
194 const struct ieee80211_hdr *wlhdr; 194 const struct ieee80211_hdr *wlhdr;
195 int use_encryption = !!info->control.hw_key; 195 int use_encryption = !!info->control.hw_key;
196 u16 fctl;
197 u8 rate; 196 u8 rate;
198 struct ieee80211_rate *rate_fb; 197 struct ieee80211_rate *rate_fb;
199 int rate_ofdm; 198 int rate_ofdm;
@@ -204,7 +203,6 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
204 struct ieee80211_rate *tx_rate; 203 struct ieee80211_rate *tx_rate;
205 204
206 wlhdr = (const struct ieee80211_hdr *)fragment_data; 205 wlhdr = (const struct ieee80211_hdr *)fragment_data;
207 fctl = le16_to_cpu(wlhdr->frame_control);
208 206
209 memset(txhdr, 0, sizeof(*txhdr)); 207 memset(txhdr, 0, sizeof(*txhdr));
210 208
@@ -253,7 +251,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
253 mac_ctl |= (key->algorithm << 251 mac_ctl |= (key->algorithm <<
254 B43legacy_TX4_MAC_KEYALG_SHIFT) & 252 B43legacy_TX4_MAC_KEYALG_SHIFT) &
255 B43legacy_TX4_MAC_KEYALG; 253 B43legacy_TX4_MAC_KEYALG;
256 wlhdr_len = ieee80211_get_hdrlen(fctl); 254 wlhdr_len = ieee80211_hdrlen(wlhdr->frame_control);
257 iv_len = min((size_t)info->control.iv_len, 255 iv_len = min((size_t)info->control.iv_len,
258 ARRAY_SIZE(txhdr->iv)); 256 ARRAY_SIZE(txhdr->iv));
259 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len); 257 memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
@@ -626,7 +624,7 @@ void b43legacy_handle_hwtxstatus(struct b43legacy_wldev *dev,
626 tmp = hw->count; 624 tmp = hw->count;
627 status.frame_count = (tmp >> 4); 625 status.frame_count = (tmp >> 4);
628 status.rts_count = (tmp & 0x0F); 626 status.rts_count = (tmp & 0x0F);
629 tmp = hw->flags; 627 tmp = hw->flags << 1;
630 status.supp_reason = ((tmp & 0x1C) >> 2); 628 status.supp_reason = ((tmp & 0x1C) >> 2);
631 status.pm_indicated = !!(tmp & 0x80); 629 status.pm_indicated = !!(tmp & 0x80);
632 status.intermediate = !!(tmp & 0x40); 630 status.intermediate = !!(tmp & 0x40);
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index 29d39105f5b8..bfa375369df3 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -87,7 +87,8 @@ MODULE_LICENSE("Dual MPL/GPL");
87 87
88 Callable from any context. 88 Callable from any context.
89*/ 89*/
90static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0) 90static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
91 u16 param1, u16 param2)
91{ 92{
92 int k = CMD_BUSY_TIMEOUT; 93 int k = CMD_BUSY_TIMEOUT;
93 u16 reg; 94 u16 reg;
@@ -103,8 +104,8 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0)
103 return -EBUSY; 104 return -EBUSY;
104 } 105 }
105 106
106 hermes_write_regn(hw, PARAM2, 0); 107 hermes_write_regn(hw, PARAM2, param2);
107 hermes_write_regn(hw, PARAM1, 0); 108 hermes_write_regn(hw, PARAM1, param1);
108 hermes_write_regn(hw, PARAM0, param0); 109 hermes_write_regn(hw, PARAM0, param0);
109 hermes_write_regn(hw, CMD, cmd); 110 hermes_write_regn(hw, CMD, cmd);
110 111
@@ -115,16 +116,72 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0)
115 * Function definitions 116 * Function definitions
116 */ 117 */
117 118
119/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
120int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
121 u16 parm0, u16 parm1, u16 parm2,
122 struct hermes_response *resp)
123{
124 int err = 0;
125 int k;
126 u16 status, reg;
127
128 err = hermes_issue_cmd(hw, cmd, parm0, parm1, parm2);
129 if (err)
130 return err;
131
132 reg = hermes_read_regn(hw, EVSTAT);
133 k = CMD_INIT_TIMEOUT;
134 while ((!(reg & HERMES_EV_CMD)) && k) {
135 k--;
136 udelay(10);
137 reg = hermes_read_regn(hw, EVSTAT);
138 }
139
140 hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
141
142 if (!hermes_present(hw)) {
143 DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
144 hw->iobase);
145 err = -ENODEV;
146 goto out;
147 }
148
149 if (!(reg & HERMES_EV_CMD)) {
150 printk(KERN_ERR "hermes @ %p: "
151 "Timeout waiting for card to reset (reg=0x%04x)!\n",
152 hw->iobase, reg);
153 err = -ETIMEDOUT;
154 goto out;
155 }
156
157 status = hermes_read_regn(hw, STATUS);
158 if (resp) {
159 resp->status = status;
160 resp->resp0 = hermes_read_regn(hw, RESP0);
161 resp->resp1 = hermes_read_regn(hw, RESP1);
162 resp->resp2 = hermes_read_regn(hw, RESP2);
163 }
164
165 hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
166
167 if (status & HERMES_STATUS_RESULT)
168 err = -EIO;
169out:
170 return err;
171}
172EXPORT_SYMBOL(hermes_doicmd_wait);
173
118void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing) 174void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
119{ 175{
120 hw->iobase = address; 176 hw->iobase = address;
121 hw->reg_spacing = reg_spacing; 177 hw->reg_spacing = reg_spacing;
122 hw->inten = 0x0; 178 hw->inten = 0x0;
123} 179}
180EXPORT_SYMBOL(hermes_struct_init);
124 181
125int hermes_init(hermes_t *hw) 182int hermes_init(hermes_t *hw)
126{ 183{
127 u16 status, reg; 184 u16 reg;
128 int err = 0; 185 int err = 0;
129 int k; 186 int k;
130 187
@@ -162,45 +219,11 @@ int hermes_init(hermes_t *hw)
162 219
163 /* We don't use hermes_docmd_wait here, because the reset wipes 220 /* We don't use hermes_docmd_wait here, because the reset wipes
164 the magic constant in SWSUPPORT0 away, and it gets confused */ 221 the magic constant in SWSUPPORT0 away, and it gets confused */
165 err = hermes_issue_cmd(hw, HERMES_CMD_INIT, 0); 222 err = hermes_doicmd_wait(hw, HERMES_CMD_INIT, 0, 0, 0, NULL);
166 if (err)
167 return err;
168
169 reg = hermes_read_regn(hw, EVSTAT);
170 k = CMD_INIT_TIMEOUT;
171 while ( (! (reg & HERMES_EV_CMD)) && k) {
172 k--;
173 udelay(10);
174 reg = hermes_read_regn(hw, EVSTAT);
175 }
176
177 hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
178
179 if (! hermes_present(hw)) {
180 DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n",
181 hw->iobase);
182 err = -ENODEV;
183 goto out;
184 }
185
186 if (! (reg & HERMES_EV_CMD)) {
187 printk(KERN_ERR "hermes @ %p: "
188 "Timeout waiting for card to reset (reg=0x%04x)!\n",
189 hw->iobase, reg);
190 err = -ETIMEDOUT;
191 goto out;
192 }
193 223
194 status = hermes_read_regn(hw, STATUS);
195
196 hermes_write_regn(hw, EVACK, HERMES_EV_CMD);
197
198 if (status & HERMES_STATUS_RESULT)
199 err = -EIO;
200
201 out:
202 return err; 224 return err;
203} 225}
226EXPORT_SYMBOL(hermes_init);
204 227
205/* Issue a command to the chip, and (busy!) wait for it to 228/* Issue a command to the chip, and (busy!) wait for it to
206 * complete. 229 * complete.
@@ -216,7 +239,7 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
216 u16 reg; 239 u16 reg;
217 u16 status; 240 u16 status;
218 241
219 err = hermes_issue_cmd(hw, cmd, parm0); 242 err = hermes_issue_cmd(hw, cmd, parm0, 0, 0);
220 if (err) { 243 if (err) {
221 if (! hermes_present(hw)) { 244 if (! hermes_present(hw)) {
222 if (net_ratelimit()) 245 if (net_ratelimit())
@@ -271,6 +294,7 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
271 out: 294 out:
272 return err; 295 return err;
273} 296}
297EXPORT_SYMBOL(hermes_docmd_wait);
274 298
275int hermes_allocate(hermes_t *hw, u16 size, u16 *fid) 299int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
276{ 300{
@@ -313,7 +337,7 @@ int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
313 337
314 return 0; 338 return 0;
315} 339}
316 340EXPORT_SYMBOL(hermes_allocate);
317 341
318/* Set up a BAP to read a particular chunk of data from card's internal buffer. 342/* Set up a BAP to read a particular chunk of data from card's internal buffer.
319 * 343 *
@@ -397,6 +421,7 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
397 out: 421 out:
398 return err; 422 return err;
399} 423}
424EXPORT_SYMBOL(hermes_bap_pread);
400 425
401/* Write a block of data to the chip's buffer, via the 426/* Write a block of data to the chip's buffer, via the
402 * BAP. Synchronization/serialization is the caller's problem. 427 * BAP. Synchronization/serialization is the caller's problem.
@@ -422,6 +447,7 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
422 out: 447 out:
423 return err; 448 return err;
424} 449}
450EXPORT_SYMBOL(hermes_bap_pwrite);
425 451
426/* Read a Length-Type-Value record from the card. 452/* Read a Length-Type-Value record from the card.
427 * 453 *
@@ -463,7 +489,7 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
463 if (rtype != rid) 489 if (rtype != rid)
464 printk(KERN_WARNING "hermes @ %p: %s(): " 490 printk(KERN_WARNING "hermes @ %p: %s(): "
465 "rid (0x%04x) does not match type (0x%04x)\n", 491 "rid (0x%04x) does not match type (0x%04x)\n",
466 hw->iobase, __FUNCTION__, rid, rtype); 492 hw->iobase, __func__, rid, rtype);
467 if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize) 493 if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize)
468 printk(KERN_WARNING "hermes @ %p: " 494 printk(KERN_WARNING "hermes @ %p: "
469 "Truncating LTV record from %d to %d bytes. " 495 "Truncating LTV record from %d to %d bytes. "
@@ -475,6 +501,7 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
475 501
476 return 0; 502 return 0;
477} 503}
504EXPORT_SYMBOL(hermes_read_ltv);
478 505
479int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, 506int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
480 u16 length, const void *value) 507 u16 length, const void *value)
@@ -497,20 +524,11 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
497 524
498 hermes_write_bytes(hw, dreg, value, count << 1); 525 hermes_write_bytes(hw, dreg, value, count << 1);
499 526
500 err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE, 527 err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE,
501 rid, NULL); 528 rid, NULL);
502 529
503 return err; 530 return err;
504} 531}
505
506EXPORT_SYMBOL(hermes_struct_init);
507EXPORT_SYMBOL(hermes_init);
508EXPORT_SYMBOL(hermes_docmd_wait);
509EXPORT_SYMBOL(hermes_allocate);
510
511EXPORT_SYMBOL(hermes_bap_pread);
512EXPORT_SYMBOL(hermes_bap_pwrite);
513EXPORT_SYMBOL(hermes_read_ltv);
514EXPORT_SYMBOL(hermes_write_ltv); 532EXPORT_SYMBOL(hermes_write_ltv);
515 533
516static int __init init_hermes(void) 534static int __init init_hermes(void)
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index 8e3f0e3edb58..8b13c8fef3dc 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -179,17 +179,23 @@
179#define HERMES_802_11_OFFSET (14) 179#define HERMES_802_11_OFFSET (14)
180#define HERMES_802_3_OFFSET (14+32) 180#define HERMES_802_3_OFFSET (14+32)
181#define HERMES_802_2_OFFSET (14+32+14) 181#define HERMES_802_2_OFFSET (14+32+14)
182#define HERMES_TXCNTL2_OFFSET (HERMES_802_3_OFFSET - 2)
182 183
183#define HERMES_RXSTAT_ERR (0x0003) 184#define HERMES_RXSTAT_ERR (0x0003)
184#define HERMES_RXSTAT_BADCRC (0x0001) 185#define HERMES_RXSTAT_BADCRC (0x0001)
185#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002) 186#define HERMES_RXSTAT_UNDECRYPTABLE (0x0002)
187#define HERMES_RXSTAT_MIC (0x0010) /* Frame contains MIC */
186#define HERMES_RXSTAT_MACPORT (0x0700) 188#define HERMES_RXSTAT_MACPORT (0x0700)
187#define HERMES_RXSTAT_PCF (0x1000) /* Frame was received in CF period */ 189#define HERMES_RXSTAT_PCF (0x1000) /* Frame was received in CF period */
190#define HERMES_RXSTAT_MIC_KEY_ID (0x1800) /* MIC key used */
188#define HERMES_RXSTAT_MSGTYPE (0xE000) 191#define HERMES_RXSTAT_MSGTYPE (0xE000)
189#define HERMES_RXSTAT_1042 (0x2000) /* RFC-1042 frame */ 192#define HERMES_RXSTAT_1042 (0x2000) /* RFC-1042 frame */
190#define HERMES_RXSTAT_TUNNEL (0x4000) /* bridge-tunnel encoded frame */ 193#define HERMES_RXSTAT_TUNNEL (0x4000) /* bridge-tunnel encoded frame */
191#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */ 194#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */
192 195
196/* Shift amount for key ID in RXSTAT and TXCTRL */
197#define HERMES_MIC_KEY_ID_SHIFT 11
198
193struct hermes_tx_descriptor { 199struct hermes_tx_descriptor {
194 __le16 status; 200 __le16 status;
195 __le16 reserved1; 201 __le16 reserved1;
@@ -208,6 +214,8 @@ struct hermes_tx_descriptor {
208#define HERMES_TXCTRL_TX_OK (0x0002) /* ?? interrupt on Tx complete */ 214#define HERMES_TXCTRL_TX_OK (0x0002) /* ?? interrupt on Tx complete */
209#define HERMES_TXCTRL_TX_EX (0x0004) /* ?? interrupt on Tx exception */ 215#define HERMES_TXCTRL_TX_EX (0x0004) /* ?? interrupt on Tx exception */
210#define HERMES_TXCTRL_802_11 (0x0008) /* We supply 802.11 header */ 216#define HERMES_TXCTRL_802_11 (0x0008) /* We supply 802.11 header */
217#define HERMES_TXCTRL_MIC (0x0010) /* 802.3 + TKIP */
218#define HERMES_TXCTRL_MIC_KEY_ID (0x1800) /* MIC Key ID mask */
211#define HERMES_TXCTRL_ALT_RTRY (0x0020) 219#define HERMES_TXCTRL_ALT_RTRY (0x0020)
212 220
213/* Inquiry constants and data types */ 221/* Inquiry constants and data types */
@@ -302,6 +310,40 @@ union hermes_scan_info {
302 struct symbol_scan_apinfo s; 310 struct symbol_scan_apinfo s;
303}; 311};
304 312
313/* Extended scan struct for HERMES_INQ_CHANNELINFO.
314 * wl_lkm calls this an ACS scan (Automatic Channel Select).
315 * Keep out of union hermes_scan_info because it is much bigger than
316 * the older scan structures. */
317struct agere_ext_scan_info {
318 __le16 reserved0;
319
320 u8 noise;
321 u8 level;
322 u8 rx_flow;
323 u8 rate;
324 __le16 reserved1[2];
325
326 __le16 frame_control;
327 __le16 dur_id;
328 u8 addr1[ETH_ALEN];
329 u8 addr2[ETH_ALEN];
330 u8 bssid[ETH_ALEN];
331 __le16 sequence;
332 u8 addr4[ETH_ALEN];
333
334 __le16 data_length;
335
336 /* Next 3 fields do not get filled in. */
337 u8 daddr[ETH_ALEN];
338 u8 saddr[ETH_ALEN];
339 __le16 len_type;
340
341 __le64 timestamp;
342 __le16 beacon_interval;
343 __le16 capabilities;
344 u8 data[316];
345} __attribute__ ((packed));
346
305#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000) 347#define HERMES_LINKSTATUS_NOT_CONNECTED (0x0000)
306#define HERMES_LINKSTATUS_CONNECTED (0x0001) 348#define HERMES_LINKSTATUS_CONNECTED (0x0001)
307#define HERMES_LINKSTATUS_DISCONNECTED (0x0002) 349#define HERMES_LINKSTATUS_DISCONNECTED (0x0002)
@@ -353,6 +395,9 @@ void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
353int hermes_init(hermes_t *hw); 395int hermes_init(hermes_t *hw);
354int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0, 396int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
355 struct hermes_response *resp); 397 struct hermes_response *resp);
398int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
399 u16 parm0, u16 parm1, u16 parm2,
400 struct hermes_response *resp);
356int hermes_allocate(hermes_t *hw, u16 size, u16 *fid); 401int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
357 402
358int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len, 403int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
diff --git a/drivers/net/wireless/hermes_dld.c b/drivers/net/wireless/hermes_dld.c
new file mode 100644
index 000000000000..d8c626e61a3a
--- /dev/null
+++ b/drivers/net/wireless/hermes_dld.c
@@ -0,0 +1,730 @@
1/*
2 * Hermes download helper driver.
3 *
4 * This could be entirely merged into hermes.c.
5 *
6 * I'm keeping it separate to minimise the amount of merging between
7 * kernel upgrades. It also means the memory overhead for drivers that
8 * don't need firmware download low.
9 *
10 * This driver:
11 * - is capable of writing to the volatile area of the hermes device
12 * - is currently not capable of writing to non-volatile areas
13 * - provide helpers to identify and update plugin data
14 * - is not capable of interpreting a fw image directly. That is up to
15 * the main card driver.
16 * - deals with Hermes I devices. It can probably be modified to deal
17 * with Hermes II devices
18 *
19 * Copyright (C) 2007, David Kilroy
20 *
21 * Plug data code slightly modified from spectrum_cs driver
22 * Copyright (C) 2002-2005 Pavel Roskin <proski@gnu.org>
23 * Portions based on information in wl_lkm_718 Agere driver
24 * COPYRIGHT (C) 2001-2004 by Agere Systems Inc. All Rights Reserved
25 *
26 * The contents of this file are subject to the Mozilla Public License
27 * Version 1.1 (the "License"); you may not use this file except in
28 * compliance with the License. You may obtain a copy of the License
29 * at http://www.mozilla.org/MPL/
30 *
31 * Software distributed under the License is distributed on an "AS IS"
32 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
33 * the License for the specific language governing rights and
34 * limitations under the License.
35 *
36 * Alternatively, the contents of this file may be used under the
37 * terms of the GNU General Public License version 2 (the "GPL"), in
38 * which case the provisions of the GPL are applicable instead of the
39 * above. If you wish to allow the use of your version of this file
40 * only under the terms of the GPL and not to allow others to use your
41 * version of this file under the MPL, indicate your decision by
42 * deleting the provisions above and replace them with the notice and
43 * other provisions required by the GPL. If you do not delete the
44 * provisions above, a recipient may use your version of this file
45 * under either the MPL or the GPL.
46 */
47
48#include <linux/module.h>
49#include <linux/delay.h>
50#include "hermes.h"
51#include "hermes_dld.h"
52
53MODULE_DESCRIPTION("Download helper for Lucent Hermes chipset");
54MODULE_AUTHOR("David Kilroy <kilroyd@gmail.com>");
55MODULE_LICENSE("Dual MPL/GPL");
56
57#define PFX "hermes_dld: "
58
59/*
60 * AUX port access. To unlock the AUX port write the access keys to the
61 * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
62 * register. Then read it and make sure it's HERMES_AUX_ENABLED.
63 */
64#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
65#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
66#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
67#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
68
69#define HERMES_AUX_PW0 0xFE01
70#define HERMES_AUX_PW1 0xDC23
71#define HERMES_AUX_PW2 0xBA45
72
73/* HERMES_CMD_DOWNLD */
74#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
75#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
76#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
77#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
78
79/* End markers used in dblocks */
80#define PDI_END 0x00000000 /* End of PDA */
81#define BLOCK_END 0xFFFFFFFF /* Last image block */
82#define TEXT_END 0x1A /* End of text header */
83
84/*
85 * PDA == Production Data Area
86 *
87 * In principle, the max. size of the PDA is is 4096 words. Currently,
88 * however, only about 500 bytes of this area are used.
89 *
90 * Some USB implementations can't handle sizes in excess of 1016. Note
91 * that PDA is not actually used in those USB environments, but may be
92 * retrieved by common code.
93 */
94#define MAX_PDA_SIZE 1000
95
96/* Limit the amout we try to download in a single shot.
97 * Size is in bytes.
98 */
99#define MAX_DL_SIZE 1024
100#define LIMIT_PROGRAM_SIZE 0
101
102/*
103 * The following structures have little-endian fields denoted by
104 * the leading underscore. Don't access them directly - use inline
105 * functions defined below.
106 */
107
108/*
109 * The binary image to be downloaded consists of series of data blocks.
110 * Each block has the following structure.
111 */
112struct dblock {
113 __le32 addr; /* adapter address where to write the block */
114 __le16 len; /* length of the data only, in bytes */
115 char data[0]; /* data to be written */
116} __attribute__ ((packed));
117
118/*
119 * Plug Data References are located in in the image after the last data
120 * block. They refer to areas in the adapter memory where the plug data
121 * items with matching ID should be written.
122 */
123struct pdr {
124 __le32 id; /* record ID */
125 __le32 addr; /* adapter address where to write the data */
126 __le32 len; /* expected length of the data, in bytes */
127 char next[0]; /* next PDR starts here */
128} __attribute__ ((packed));
129
130/*
131 * Plug Data Items are located in the EEPROM read from the adapter by
132 * primary firmware. They refer to the device-specific data that should
133 * be plugged into the secondary firmware.
134 */
135struct pdi {
136 __le16 len; /* length of ID and data, in words */
137 __le16 id; /* record ID */
138 char data[0]; /* plug data */
139} __attribute__ ((packed));
140
141/*** FW data block access functions ***/
142
143static inline u32
144dblock_addr(const struct dblock *blk)
145{
146 return le32_to_cpu(blk->addr);
147}
148
149static inline u32
150dblock_len(const struct dblock *blk)
151{
152 return le16_to_cpu(blk->len);
153}
154
155/*** PDR Access functions ***/
156
157static inline u32
158pdr_id(const struct pdr *pdr)
159{
160 return le32_to_cpu(pdr->id);
161}
162
163static inline u32
164pdr_addr(const struct pdr *pdr)
165{
166 return le32_to_cpu(pdr->addr);
167}
168
169static inline u32
170pdr_len(const struct pdr *pdr)
171{
172 return le32_to_cpu(pdr->len);
173}
174
175/*** PDI Access functions ***/
176
177static inline u32
178pdi_id(const struct pdi *pdi)
179{
180 return le16_to_cpu(pdi->id);
181}
182
183/* Return length of the data only, in bytes */
184static inline u32
185pdi_len(const struct pdi *pdi)
186{
187 return 2 * (le16_to_cpu(pdi->len) - 1);
188}
189
190/*** Hermes AUX control ***/
191
192static inline void
193hermes_aux_setaddr(hermes_t *hw, u32 addr)
194{
195 hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
196 hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
197}
198
199static inline int
200hermes_aux_control(hermes_t *hw, int enabled)
201{
202 int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
203 int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
204 int i;
205
206 /* Already open? */
207 if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
208 return 0;
209
210 hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
211 hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
212 hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
213 hermes_write_reg(hw, HERMES_CONTROL, action);
214
215 for (i = 0; i < 20; i++) {
216 udelay(10);
217 if (hermes_read_reg(hw, HERMES_CONTROL) ==
218 desired_state)
219 return 0;
220 }
221
222 return -EBUSY;
223}
224
225/*** Plug Data Functions ***/
226
227/*
228 * Scan PDR for the record with the specified RECORD_ID.
229 * If it's not found, return NULL.
230 */
231static struct pdr *
232hermes_find_pdr(struct pdr *first_pdr, u32 record_id)
233{
234 struct pdr *pdr = first_pdr;
235 void *end = (void *)first_pdr + MAX_PDA_SIZE;
236
237 while (((void *)pdr < end) &&
238 (pdr_id(pdr) != PDI_END)) {
239 /*
240 * PDR area is currently not terminated by PDI_END.
241 * It's followed by CRC records, which have the type
242 * field where PDR has length. The type can be 0 or 1.
243 */
244 if (pdr_len(pdr) < 2)
245 return NULL;
246
247 /* If the record ID matches, we are done */
248 if (pdr_id(pdr) == record_id)
249 return pdr;
250
251 pdr = (struct pdr *) pdr->next;
252 }
253 return NULL;
254}
255
256/* Scan production data items for a particular entry */
257static struct pdi *
258hermes_find_pdi(struct pdi *first_pdi, u32 record_id)
259{
260 struct pdi *pdi = first_pdi;
261
262 while (pdi_id(pdi) != PDI_END) {
263
264 /* If the record ID matches, we are done */
265 if (pdi_id(pdi) == record_id)
266 return pdi;
267
268 pdi = (struct pdi *) &pdi->data[pdi_len(pdi)];
269 }
270 return NULL;
271}
272
273/* Process one Plug Data Item - find corresponding PDR and plug it */
274static int
275hermes_plug_pdi(hermes_t *hw, struct pdr *first_pdr, const struct pdi *pdi)
276{
277 struct pdr *pdr;
278
279 /* Find the PDR corresponding to this PDI */
280 pdr = hermes_find_pdr(first_pdr, pdi_id(pdi));
281
282 /* No match is found, safe to ignore */
283 if (!pdr)
284 return 0;
285
286 /* Lengths of the data in PDI and PDR must match */
287 if (pdi_len(pdi) != pdr_len(pdr))
288 return -EINVAL;
289
290 /* do the actual plugging */
291 hermes_aux_setaddr(hw, pdr_addr(pdr));
292 hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
293
294 return 0;
295}
296
297/* Read PDA from the adapter */
298int hermes_read_pda(hermes_t *hw,
299 __le16 *pda,
300 u32 pda_addr,
301 u16 pda_len,
302 int use_eeprom) /* can we get this into hw? */
303{
304 int ret;
305 u16 pda_size;
306 u16 data_len = pda_len;
307 __le16 *data = pda;
308
309 if (use_eeprom) {
310 /* PDA of spectrum symbol is in eeprom */
311
312 /* Issue command to read EEPROM */
313 ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
314 if (ret)
315 return ret;
316 } else {
317 /* wl_lkm does not include PDA size in the PDA area.
318 * We will pad the information into pda, so other routines
319 * don't have to be modified */
320 pda[0] = cpu_to_le16(pda_len - 2);
321 /* Includes CFG_PROD_DATA but not itself */
322 pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
323 data_len = pda_len - 4;
324 data = pda + 2;
325 }
326
327 /* Open auxiliary port */
328 ret = hermes_aux_control(hw, 1);
329 printk(KERN_DEBUG PFX "AUX enable returned %d\n", ret);
330 if (ret)
331 return ret;
332
333 /* read PDA from EEPROM */
334 hermes_aux_setaddr(hw, pda_addr);
335 hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
336
337 /* Close aux port */
338 ret = hermes_aux_control(hw, 0);
339 printk(KERN_DEBUG PFX "AUX disable returned %d\n", ret);
340
341 /* Check PDA length */
342 pda_size = le16_to_cpu(pda[0]);
343 printk(KERN_DEBUG PFX "Actual PDA length %d, Max allowed %d\n",
344 pda_size, pda_len);
345 if (pda_size > pda_len)
346 return -EINVAL;
347
348 return 0;
349}
350EXPORT_SYMBOL(hermes_read_pda);
351
352/* Parse PDA and write the records into the adapter
353 *
354 * Attempt to write every records that is in the specified pda
355 * which also has a valid production data record for the firmware.
356 */
357int hermes_apply_pda(hermes_t *hw,
358 const char *first_pdr,
359 const __le16 *pda)
360{
361 int ret;
362 const struct pdi *pdi;
363 struct pdr *pdr;
364
365 pdr = (struct pdr *) first_pdr;
366
367 /* Go through every PDI and plug them into the adapter */
368 pdi = (const struct pdi *) (pda + 2);
369 while (pdi_id(pdi) != PDI_END) {
370 ret = hermes_plug_pdi(hw, pdr, pdi);
371 if (ret)
372 return ret;
373
374 /* Increment to the next PDI */
375 pdi = (const struct pdi *) &pdi->data[pdi_len(pdi)];
376 }
377 return 0;
378}
379EXPORT_SYMBOL(hermes_apply_pda);
380
381/* Identify the total number of bytes in all blocks
382 * including the header data.
383 */
384size_t
385hermes_blocks_length(const char *first_block)
386{
387 const struct dblock *blk = (const struct dblock *) first_block;
388 int total_len = 0;
389 int len;
390
391 /* Skip all blocks to locate Plug Data References
392 * (Spectrum CS) */
393 while (dblock_addr(blk) != BLOCK_END) {
394 len = dblock_len(blk);
395 total_len += sizeof(*blk) + len;
396 blk = (struct dblock *) &blk->data[len];
397 }
398
399 return total_len;
400}
401EXPORT_SYMBOL(hermes_blocks_length);
402
403/*** Hermes programming ***/
404
405/* About to start programming data (Hermes I)
406 * offset is the entry point
407 *
408 * Spectrum_cs' Symbol fw does not require this
409 * wl_lkm Agere fw does
410 * Don't know about intersil
411 */
412int hermesi_program_init(hermes_t *hw, u32 offset)
413{
414 int err;
415
416 /* Disable interrupts?*/
417 /*hw->inten = 0x0;*/
418 /*hermes_write_regn(hw, INTEN, 0);*/
419 /*hermes_set_irqmask(hw, 0);*/
420
421 /* Acknowledge any outstanding command */
422 hermes_write_regn(hw, EVACK, 0xFFFF);
423
424 /* Using doicmd_wait rather than docmd_wait */
425 err = hermes_doicmd_wait(hw,
426 0x0100 | HERMES_CMD_INIT,
427 0, 0, 0, NULL);
428 if (err)
429 return err;
430
431 err = hermes_doicmd_wait(hw,
432 0x0000 | HERMES_CMD_INIT,
433 0, 0, 0, NULL);
434 if (err)
435 return err;
436
437 err = hermes_aux_control(hw, 1);
438 printk(KERN_DEBUG PFX "AUX enable returned %d\n", err);
439
440 if (err)
441 return err;
442
443 printk(KERN_DEBUG PFX "Enabling volatile, EP 0x%08x\n", offset);
444 err = hermes_doicmd_wait(hw,
445 HERMES_PROGRAM_ENABLE_VOLATILE,
446 offset & 0xFFFFu,
447 offset >> 16,
448 0,
449 NULL);
450 printk(KERN_DEBUG PFX "PROGRAM_ENABLE returned %d\n",
451 err);
452
453 return err;
454}
455EXPORT_SYMBOL(hermesi_program_init);
456
457/* Done programming data (Hermes I)
458 *
459 * Spectrum_cs' Symbol fw does not require this
460 * wl_lkm Agere fw does
461 * Don't know about intersil
462 */
463int hermesi_program_end(hermes_t *hw)
464{
465 struct hermes_response resp;
466 int rc = 0;
467 int err;
468
469 rc = hermes_docmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
470
471 printk(KERN_DEBUG PFX "PROGRAM_DISABLE returned %d, "
472 "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
473 rc, resp.resp0, resp.resp1, resp.resp2);
474
475 if ((rc == 0) &&
476 ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
477 rc = -EIO;
478
479 err = hermes_aux_control(hw, 0);
480 printk(KERN_DEBUG PFX "AUX disable returned %d\n", err);
481
482 /* Acknowledge any outstanding command */
483 hermes_write_regn(hw, EVACK, 0xFFFF);
484
485 /* Reinitialise, ignoring return */
486 (void) hermes_doicmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
487 0, 0, 0, NULL);
488
489 return rc ? rc : err;
490}
491EXPORT_SYMBOL(hermesi_program_end);
492
493/* Program the data blocks */
494int hermes_program(hermes_t *hw, const char *first_block, const char *end)
495{
496 const struct dblock *blk;
497 u32 blkaddr;
498 u32 blklen;
499#if LIMIT_PROGRAM_SIZE
500 u32 addr;
501 u32 len;
502#endif
503
504 blk = (const struct dblock *) first_block;
505
506 if ((const char *) blk > (end - sizeof(*blk)))
507 return -EIO;
508
509 blkaddr = dblock_addr(blk);
510 blklen = dblock_len(blk);
511
512 while ((blkaddr != BLOCK_END) &&
513 (((const char *) blk + blklen) <= end)) {
514 printk(KERN_DEBUG PFX
515 "Programming block of length %d to address 0x%08x\n",
516 blklen, blkaddr);
517
518#if !LIMIT_PROGRAM_SIZE
519 /* wl_lkm driver splits this into writes of 2000 bytes */
520 hermes_aux_setaddr(hw, blkaddr);
521 hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
522 blklen);
523#else
524 len = (blklen < MAX_DL_SIZE) ? blklen : MAX_DL_SIZE;
525 addr = blkaddr;
526
527 while (addr < (blkaddr + blklen)) {
528 printk(KERN_DEBUG PFX
529 "Programming subblock of length %d "
530 "to address 0x%08x. Data @ %p\n",
531 len, addr, &blk->data[addr - blkaddr]);
532
533 hermes_aux_setaddr(hw, addr);
534 hermes_write_bytes(hw, HERMES_AUXDATA,
535 &blk->data[addr - blkaddr],
536 len);
537
538 addr += len;
539 len = ((blkaddr + blklen - addr) < MAX_DL_SIZE) ?
540 (blkaddr + blklen - addr) : MAX_DL_SIZE;
541 }
542#endif
543 blk = (const struct dblock *) &blk->data[blklen];
544
545 if ((const char *) blk > (end - sizeof(*blk)))
546 return -EIO;
547
548 blkaddr = dblock_addr(blk);
549 blklen = dblock_len(blk);
550 }
551 return 0;
552}
553EXPORT_SYMBOL(hermes_program);
554
555static int __init init_hermes_dld(void)
556{
557 return 0;
558}
559
560static void __exit exit_hermes_dld(void)
561{
562}
563
564module_init(init_hermes_dld);
565module_exit(exit_hermes_dld);
566
567/*** Default plugging data for Hermes I ***/
568/* Values from wl_lkm_718/hcf/dhf.c */
569
570#define DEFINE_DEFAULT_PDR(pid, length, data) \
571static const struct { \
572 __le16 len; \
573 __le16 id; \
574 u8 val[length]; \
575} __attribute__ ((packed)) default_pdr_data_##pid = { \
576 __constant_cpu_to_le16((sizeof(default_pdr_data_##pid)/ \
577 sizeof(__le16)) - 1), \
578 __constant_cpu_to_le16(pid), \
579 data \
580}
581
582#define DEFAULT_PDR(pid) default_pdr_data_##pid
583
584/* HWIF Compatiblity */
585DEFINE_DEFAULT_PDR(0x0005, 10, "\x00\x00\x06\x00\x01\x00\x01\x00\x01\x00");
586
587/* PPPPSign */
588DEFINE_DEFAULT_PDR(0x0108, 4, "\x00\x00\x00\x00");
589
590/* PPPPProf */
591DEFINE_DEFAULT_PDR(0x0109, 10, "\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00");
592
593/* Antenna diversity */
594DEFINE_DEFAULT_PDR(0x0150, 2, "\x00\x3F");
595
596/* Modem VCO band Set-up */
597DEFINE_DEFAULT_PDR(0x0160, 28,
598 "\x00\x00\x00\x00\x00\x00\x00\x00"
599 "\x00\x00\x00\x00\x00\x00\x00\x00"
600 "\x00\x00\x00\x00\x00\x00\x00\x00"
601 "\x00\x00\x00\x00");
602
603/* Modem Rx Gain Table Values */
604DEFINE_DEFAULT_PDR(0x0161, 256,
605 "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
606 "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
607 "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
608 "\x3F\x01\x3F\01\x3F\x01\x3F\x01"
609 "\x3F\x01\x3E\01\x3E\x01\x3D\x01"
610 "\x3D\x01\x3C\01\x3C\x01\x3B\x01"
611 "\x3B\x01\x3A\01\x3A\x01\x39\x01"
612 "\x39\x01\x38\01\x38\x01\x37\x01"
613 "\x37\x01\x36\01\x36\x01\x35\x01"
614 "\x35\x01\x34\01\x34\x01\x33\x01"
615 "\x33\x01\x32\x01\x32\x01\x31\x01"
616 "\x31\x01\x30\x01\x30\x01\x7B\x01"
617 "\x7B\x01\x7A\x01\x7A\x01\x79\x01"
618 "\x79\x01\x78\x01\x78\x01\x77\x01"
619 "\x77\x01\x76\x01\x76\x01\x75\x01"
620 "\x75\x01\x74\x01\x74\x01\x73\x01"
621 "\x73\x01\x72\x01\x72\x01\x71\x01"
622 "\x71\x01\x70\x01\x70\x01\x68\x01"
623 "\x68\x01\x67\x01\x67\x01\x66\x01"
624 "\x66\x01\x65\x01\x65\x01\x57\x01"
625 "\x57\x01\x56\x01\x56\x01\x55\x01"
626 "\x55\x01\x54\x01\x54\x01\x53\x01"
627 "\x53\x01\x52\x01\x52\x01\x51\x01"
628 "\x51\x01\x50\x01\x50\x01\x48\x01"
629 "\x48\x01\x47\x01\x47\x01\x46\x01"
630 "\x46\x01\x45\x01\x45\x01\x44\x01"
631 "\x44\x01\x43\x01\x43\x01\x42\x01"
632 "\x42\x01\x41\x01\x41\x01\x40\x01"
633 "\x40\x01\x40\x01\x40\x01\x40\x01"
634 "\x40\x01\x40\x01\x40\x01\x40\x01"
635 "\x40\x01\x40\x01\x40\x01\x40\x01"
636 "\x40\x01\x40\x01\x40\x01\x40\x01");
637
638/* Write PDA according to certain rules.
639 *
640 * For every production data record, look for a previous setting in
641 * the pda, and use that.
642 *
643 * For certain records, use defaults if they are not found in pda.
644 */
645int hermes_apply_pda_with_defaults(hermes_t *hw,
646 const char *first_pdr,
647 const __le16 *pda)
648{
649 const struct pdr *pdr = (const struct pdr *) first_pdr;
650 struct pdi *first_pdi = (struct pdi *) &pda[2];
651 struct pdi *pdi;
652 struct pdi *default_pdi = NULL;
653 struct pdi *outdoor_pdi;
654 void *end = (void *)first_pdr + MAX_PDA_SIZE;
655 int record_id;
656
657 while (((void *)pdr < end) &&
658 (pdr_id(pdr) != PDI_END)) {
659 /*
660 * For spectrum_cs firmwares,
661 * PDR area is currently not terminated by PDI_END.
662 * It's followed by CRC records, which have the type
663 * field where PDR has length. The type can be 0 or 1.
664 */
665 if (pdr_len(pdr) < 2)
666 break;
667 record_id = pdr_id(pdr);
668
669 pdi = hermes_find_pdi(first_pdi, record_id);
670 if (pdi)
671 printk(KERN_DEBUG PFX "Found record 0x%04x at %p\n",
672 record_id, pdi);
673
674 switch (record_id) {
675 case 0x110: /* Modem REFDAC values */
676 case 0x120: /* Modem VGDAC values */
677 outdoor_pdi = hermes_find_pdi(first_pdi, record_id + 1);
678 default_pdi = NULL;
679 if (outdoor_pdi) {
680 pdi = outdoor_pdi;
681 printk(KERN_DEBUG PFX
682 "Using outdoor record 0x%04x at %p\n",
683 record_id + 1, pdi);
684 }
685 break;
686 case 0x5: /* HWIF Compatiblity */
687 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0005);
688 break;
689 case 0x108: /* PPPPSign */
690 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0108);
691 break;
692 case 0x109: /* PPPPProf */
693 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0109);
694 break;
695 case 0x150: /* Antenna diversity */
696 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0150);
697 break;
698 case 0x160: /* Modem VCO band Set-up */
699 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0160);
700 break;
701 case 0x161: /* Modem Rx Gain Table Values */
702 default_pdi = (struct pdi *) &DEFAULT_PDR(0x0161);
703 break;
704 default:
705 default_pdi = NULL;
706 break;
707 }
708 if (!pdi && default_pdi) {
709 /* Use default */
710 pdi = default_pdi;
711 printk(KERN_DEBUG PFX
712 "Using default record 0x%04x at %p\n",
713 record_id, pdi);
714 }
715
716 if (pdi) {
717 /* Lengths of the data in PDI and PDR must match */
718 if (pdi_len(pdi) == pdr_len(pdr)) {
719 /* do the actual plugging */
720 hermes_aux_setaddr(hw, pdr_addr(pdr));
721 hermes_write_bytes(hw, HERMES_AUXDATA,
722 pdi->data, pdi_len(pdi));
723 }
724 }
725
726 pdr++;
727 }
728 return 0;
729}
730EXPORT_SYMBOL(hermes_apply_pda_with_defaults);
diff --git a/drivers/net/wireless/hermes_dld.h b/drivers/net/wireless/hermes_dld.h
new file mode 100644
index 000000000000..6fcb26277999
--- /dev/null
+++ b/drivers/net/wireless/hermes_dld.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright (C) 2007, David Kilroy
3 *
4 * The contents of this file are subject to the Mozilla Public License
5 * Version 1.1 (the "License"); you may not use this file except in
6 * compliance with the License. You may obtain a copy of the License
7 * at http://www.mozilla.org/MPL/
8 *
9 * Software distributed under the License is distributed on an "AS IS"
10 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
11 * the License for the specific language governing rights and
12 * limitations under the License.
13 *
14 * Alternatively, the contents of this file may be used under the
15 * terms of the GNU General Public License version 2 (the "GPL"), in
16 * which case the provisions of the GPL are applicable instead of the
17 * above. If you wish to allow the use of your version of this file
18 * only under the terms of the GPL and not to allow others to use your
19 * version of this file under the MPL, indicate your decision by
20 * deleting the provisions above and replace them with the notice and
21 * other provisions required by the GPL. If you do not delete the
22 * provisions above, a recipient may use your version of this file
23 * under either the MPL or the GPL.
24 */
25#ifndef _HERMES_DLD_H
26#define _HERMES_DLD_H
27
28#include "hermes.h"
29
30int hermesi_program_init(hermes_t *hw, u32 offset);
31int hermesi_program_end(hermes_t *hw);
32int hermes_program(hermes_t *hw, const char *first_block, const char *end);
33
34int hermes_read_pda(hermes_t *hw,
35 __le16 *pda,
36 u32 pda_addr,
37 u16 pda_len,
38 int use_eeprom);
39int hermes_apply_pda(hermes_t *hw,
40 const char *first_pdr,
41 const __le16 *pda);
42int hermes_apply_pda_with_defaults(hermes_t *hw,
43 const char *first_pdr,
44 const __le16 *pda);
45
46size_t hermes_blocks_length(const char *first_block);
47
48#endif /* _HERMES_DLD_H */
diff --git a/drivers/net/wireless/hermes_rid.h b/drivers/net/wireless/hermes_rid.h
index 4f46b4809e55..42eb67dea1df 100644
--- a/drivers/net/wireless/hermes_rid.h
+++ b/drivers/net/wireless/hermes_rid.h
@@ -30,6 +30,7 @@
30#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20 30#define HERMES_RID_CNFWEPENABLED_AGERE 0xFC20
31#define HERMES_RID_CNFAUTHENTICATION_AGERE 0xFC21 31#define HERMES_RID_CNFAUTHENTICATION_AGERE 0xFC21
32#define HERMES_RID_CNFMANDATORYBSSID_SYMBOL 0xFC21 32#define HERMES_RID_CNFMANDATORYBSSID_SYMBOL 0xFC21
33#define HERMES_RID_CNFDROPUNENCRYPTED 0xFC22
33#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23 34#define HERMES_RID_CNFWEPDEFAULTKEYID 0xFC23
34#define HERMES_RID_CNFDEFAULTKEY0 0xFC24 35#define HERMES_RID_CNFDEFAULTKEY0 0xFC24
35#define HERMES_RID_CNFDEFAULTKEY1 0xFC25 36#define HERMES_RID_CNFDEFAULTKEY1 0xFC25
@@ -85,6 +86,16 @@
85#define HERMES_RID_CNFSCANSSID_AGERE 0xFCB2 86#define HERMES_RID_CNFSCANSSID_AGERE 0xFCB2
86#define HERMES_RID_CNFBASICRATES 0xFCB3 87#define HERMES_RID_CNFBASICRATES 0xFCB3
87#define HERMES_RID_CNFSUPPORTEDRATES 0xFCB4 88#define HERMES_RID_CNFSUPPORTEDRATES 0xFCB4
89#define HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE 0xFCB4
90#define HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE 0xFCB5
91#define HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE 0xFCB6
92#define HERMES_RID_CNFADDMAPPEDTKIPKEY_AGERE 0xFCB7
93#define HERMES_RID_CNFREMMAPPEDTKIPKEY_AGERE 0xFCB8
94#define HERMES_RID_CNFSETWPACAPABILITIES_AGERE 0xFCB9
95#define HERMES_RID_CNFCACHEDPMKADDRESS 0xFCBA
96#define HERMES_RID_CNFREMOVEPMKADDRESS 0xFCBB
97#define HERMES_RID_CNFSCANCHANNELS2GHZ 0xFCC2
98#define HERMES_RID_CNFDISASSOCIATE 0xFCC8
88#define HERMES_RID_CNFTICKTIME 0xFCE0 99#define HERMES_RID_CNFTICKTIME 0xFCE0
89#define HERMES_RID_CNFSCANREQUEST 0xFCE1 100#define HERMES_RID_CNFSCANREQUEST 0xFCE1
90#define HERMES_RID_CNFJOINREQUEST 0xFCE2 101#define HERMES_RID_CNFJOINREQUEST 0xFCE2
@@ -137,6 +148,12 @@
137#define HERMES_RID_CURRENTTXRATE6 0xFD85 148#define HERMES_RID_CURRENTTXRATE6 0xFD85
138#define HERMES_RID_OWNMACADDR 0xFD86 149#define HERMES_RID_OWNMACADDR 0xFD86
139#define HERMES_RID_SCANRESULTSTABLE 0xFD88 150#define HERMES_RID_SCANRESULTSTABLE 0xFD88
151#define HERMES_RID_CURRENT_COUNTRY_INFO 0xFD89
152#define HERMES_RID_CURRENT_WPA_IE 0xFD8A
153#define HERMES_RID_CURRENT_TKIP_IV 0xFD8B
154#define HERMES_RID_CURRENT_ASSOC_REQ_INFO 0xFD8C
155#define HERMES_RID_CURRENT_ASSOC_RESP_INFO 0xFD8D
156#define HERMES_RID_TXQUEUEEMPTY 0xFD91
140#define HERMES_RID_PHYTYPE 0xFDC0 157#define HERMES_RID_PHYTYPE 0xFDC0
141#define HERMES_RID_CURRENTCHANNEL 0xFDC1 158#define HERMES_RID_CURRENTCHANNEL 0xFDC1
142#define HERMES_RID_CURRENTPOWERSTATE 0xFDC2 159#define HERMES_RID_CURRENTPOWERSTATE 0xFDC2
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 19a401c4a0dc..bca74811bc7f 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -211,7 +211,7 @@ static u32 ipw2100_debug_level = IPW_DL_NONE;
211do { \ 211do { \
212 if (ipw2100_debug_level & (level)) { \ 212 if (ipw2100_debug_level & (level)) { \
213 printk(KERN_DEBUG "ipw2100: %c %s ", \ 213 printk(KERN_DEBUG "ipw2100: %c %s ", \
214 in_interrupt() ? 'I' : 'U', __FUNCTION__); \ 214 in_interrupt() ? 'I' : 'U', __func__); \
215 printk(message); \ 215 printk(message); \
216 } \ 216 } \
217} while (0) 217} while (0)
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index d4ab28b73b32..0bad1ec3e7e0 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1394,13 +1394,13 @@ BIT_ARG16(x)
1394#define IPW_DEBUG(level, fmt, args...) \ 1394#define IPW_DEBUG(level, fmt, args...) \
1395do { if (ipw_debug_level & (level)) \ 1395do { if (ipw_debug_level & (level)) \
1396 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ 1396 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
1397 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 1397 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
1398 1398
1399#ifdef CONFIG_IPW2200_DEBUG 1399#ifdef CONFIG_IPW2200_DEBUG
1400#define IPW_LL_DEBUG(level, fmt, args...) \ 1400#define IPW_LL_DEBUG(level, fmt, args...) \
1401do { if (ipw_debug_level & (level)) \ 1401do { if (ipw_debug_level & (level)) \
1402 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ 1402 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
1403 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 1403 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
1404#else 1404#else
1405#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0) 1405#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0)
1406#endif /* CONFIG_IPW2200_DEBUG */ 1406#endif /* CONFIG_IPW2200_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
index f1d002f7b790..33016fb5e9b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
@@ -34,12 +34,12 @@ extern u32 iwl3945_debug_level;
34#define IWL_DEBUG(level, fmt, args...) \ 34#define IWL_DEBUG(level, fmt, args...) \
35do { if (iwl3945_debug_level & (level)) \ 35do { if (iwl3945_debug_level & (level)) \
36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
37 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 37 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
38 38
39#define IWL_DEBUG_LIMIT(level, fmt, args...) \ 39#define IWL_DEBUG_LIMIT(level, fmt, args...) \
40do { if ((iwl3945_debug_level & (level)) && net_ratelimit()) \ 40do { if ((iwl3945_debug_level & (level)) && net_ratelimit()) \
41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 42 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
43 43
44static inline void iwl3945_print_hex_dump(int level, void *p, u32 len) 44static inline void iwl3945_print_hex_dump(int level, void *p, u32 len)
45{ 45{
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-io.h b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
index 0b9475114618..b3fe48de3ae7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
@@ -59,7 +59,7 @@
59 * 59 *
60 */ 60 */
61 61
62#define _iwl3945_write32(priv, ofs, val) writel((val), (priv)->hw_base + (ofs)) 62#define _iwl3945_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs))
63#ifdef CONFIG_IWL3945_DEBUG 63#ifdef CONFIG_IWL3945_DEBUG
64static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *priv, 64static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *priv,
65 u32 ofs, u32 val) 65 u32 ofs, u32 val)
@@ -73,14 +73,14 @@ static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *
73#define iwl3945_write32(priv, ofs, val) _iwl3945_write32(priv, ofs, val) 73#define iwl3945_write32(priv, ofs, val) _iwl3945_write32(priv, ofs, val)
74#endif 74#endif
75 75
76#define _iwl3945_read32(priv, ofs) readl((priv)->hw_base + (ofs)) 76#define _iwl3945_read32(priv, ofs) ioread32((priv)->hw_base + (ofs))
77#ifdef CONFIG_IWL3945_DEBUG 77#ifdef CONFIG_IWL3945_DEBUG
78static inline u32 __iwl3945_read32(char *f, u32 l, struct iwl3945_priv *priv, u32 ofs) 78static inline u32 __iwl3945_read32(char *f, u32 l, struct iwl3945_priv *priv, u32 ofs)
79{ 79{
80 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l); 80 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l);
81 return _iwl3945_read32(priv, ofs); 81 return _iwl3945_read32(priv, ofs);
82} 82}
83#define iwl3945_read32(priv, ofs) __iwl3945_read32(__FILE__, __LINE__, priv, ofs) 83#define iwl3945_read32(priv, ofs)__iwl3945_read32(__FILE__, __LINE__, priv, ofs)
84#else 84#else
85#define iwl3945_read32(p, o) _iwl3945_read32(p, o) 85#define iwl3945_read32(p, o) _iwl3945_read32(p, o)
86#endif 86#endif
@@ -153,28 +153,10 @@ static inline void __iwl3945_clear_bit(const char *f, u32 l,
153static inline int _iwl3945_grab_nic_access(struct iwl3945_priv *priv) 153static inline int _iwl3945_grab_nic_access(struct iwl3945_priv *priv)
154{ 154{
155 int ret; 155 int ret;
156 u32 gp_ctl;
157
158#ifdef CONFIG_IWL3945_DEBUG 156#ifdef CONFIG_IWL3945_DEBUG
159 if (atomic_read(&priv->restrict_refcnt)) 157 if (atomic_read(&priv->restrict_refcnt))
160 return 0; 158 return 0;
161#endif 159#endif
162 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
163 test_bit(STATUS_RF_KILL_SW, &priv->status)) {
164 IWL_WARNING("WARNING: Requesting MAC access during RFKILL "
165 "wakes up NIC\n");
166
167 /* 10 msec allows time for NIC to complete its data save */
168 gp_ctl = _iwl3945_read32(priv, CSR_GP_CNTRL);
169 if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
170 IWL_DEBUG_RF_KILL("Wait for complete power-down, "
171 "gpctl = 0x%08x\n", gp_ctl);
172 mdelay(10);
173 } else
174 IWL_DEBUG_RF_KILL("power-down complete, "
175 "gpctl = 0x%08x\n", gp_ctl);
176 }
177
178 /* this bit wakes up the NIC */ 160 /* this bit wakes up the NIC */
179 _iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 161 _iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
180 ret = _iwl3945_poll_bit(priv, CSR_GP_CNTRL, 162 ret = _iwl3945_poll_bit(priv, CSR_GP_CNTRL,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 10c64bdb314c..6fc5e7361f26 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -36,8 +36,6 @@
36 36
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38 38
39#include "../net/mac80211/rate.h"
40
41#include "iwl-3945.h" 39#include "iwl-3945.h"
42 40
43#define RS_NAME "iwl-3945-rs" 41#define RS_NAME "iwl-3945-rs"
@@ -65,6 +63,9 @@ struct iwl3945_rs_sta {
65 u8 ibss_sta_added; 63 u8 ibss_sta_added;
66 struct timer_list rate_scale_flush; 64 struct timer_list rate_scale_flush;
67 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT]; 65 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT];
66
67 /* used to be in sta_info */
68 int last_txrate_idx;
68}; 69};
69 70
70static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT] = { 71static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT] = {
@@ -316,9 +317,10 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
316 } 317 }
317} 318}
318 319
319static void rs_rate_init(void *priv_rate, void *priv_sta, 320static void rs_rate_init(void *priv, struct ieee80211_supported_band *sband,
320 struct ieee80211_local *local, struct sta_info *sta) 321 struct ieee80211_sta *sta, void *priv_sta)
321{ 322{
323 struct iwl3945_rs_sta *rs_sta = priv_sta;
322 int i; 324 int i;
323 325
324 IWL_DEBUG_RATE("enter\n"); 326 IWL_DEBUG_RATE("enter\n");
@@ -329,24 +331,22 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
329 * after assoc.. */ 331 * after assoc.. */
330 332
331 for (i = IWL_RATE_COUNT - 1; i >= 0; i--) { 333 for (i = IWL_RATE_COUNT - 1; i >= 0; i--) {
332 if (sta->supp_rates[local->hw.conf.channel->band] & (1 << i)) { 334 if (sta->supp_rates[sband->band] & (1 << i)) {
333 sta->txrate_idx = i; 335 rs_sta->last_txrate_idx = i;
334 break; 336 break;
335 } 337 }
336 } 338 }
337 339
338 sta->last_txrate_idx = sta->txrate_idx;
339
340 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */ 340 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
341 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) 341 if (sband->band == IEEE80211_BAND_5GHZ)
342 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 342 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
343 343
344 IWL_DEBUG_RATE("leave\n"); 344 IWL_DEBUG_RATE("leave\n");
345} 345}
346 346
347static void *rs_alloc(struct ieee80211_local *local) 347static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
348{ 348{
349 return local->hw.priv; 349 return hw->priv;
350} 350}
351 351
352/* rate scale requires free function to be implemented */ 352/* rate scale requires free function to be implemented */
@@ -354,17 +354,24 @@ static void rs_free(void *priv)
354{ 354{
355 return; 355 return;
356} 356}
357
357static void rs_clear(void *priv) 358static void rs_clear(void *priv)
358{ 359{
359 return; 360 return;
360} 361}
361 362
362 363
363static void *rs_alloc_sta(void *priv, gfp_t gfp) 364static void *rs_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
364{ 365{
365 struct iwl3945_rs_sta *rs_sta; 366 struct iwl3945_rs_sta *rs_sta;
367 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
366 int i; 368 int i;
367 369
370 /*
371 * XXX: If it's using sta->drv_priv anyway, it might
372 * as well just put all the information there.
373 */
374
368 IWL_DEBUG_RATE("enter\n"); 375 IWL_DEBUG_RATE("enter\n");
369 376
370 rs_sta = kzalloc(sizeof(struct iwl3945_rs_sta), gfp); 377 rs_sta = kzalloc(sizeof(struct iwl3945_rs_sta), gfp);
@@ -373,6 +380,8 @@ static void *rs_alloc_sta(void *priv, gfp_t gfp)
373 return NULL; 380 return NULL;
374 } 381 }
375 382
383 psta->rs_sta = rs_sta;
384
376 spin_lock_init(&rs_sta->lock); 385 spin_lock_init(&rs_sta->lock);
377 386
378 rs_sta->start_rate = IWL_RATE_INVALID; 387 rs_sta->start_rate = IWL_RATE_INVALID;
@@ -398,10 +407,14 @@ static void *rs_alloc_sta(void *priv, gfp_t gfp)
398 return rs_sta; 407 return rs_sta;
399} 408}
400 409
401static void rs_free_sta(void *priv, void *priv_sta) 410static void rs_free_sta(void *priv, struct ieee80211_sta *sta,
411 void *priv_sta)
402{ 412{
413 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
403 struct iwl3945_rs_sta *rs_sta = priv_sta; 414 struct iwl3945_rs_sta *rs_sta = priv_sta;
404 415
416 psta->rs_sta = NULL;
417
405 IWL_DEBUG_RATE("enter\n"); 418 IWL_DEBUG_RATE("enter\n");
406 del_timer_sync(&rs_sta->rate_scale_flush); 419 del_timer_sync(&rs_sta->rate_scale_flush);
407 kfree(rs_sta); 420 kfree(rs_sta);
@@ -443,26 +456,19 @@ static int rs_adjust_next_rate(struct iwl3945_priv *priv, int rate)
443 * NOTE: Uses iwl3945_priv->retry_rate for the # of retries attempted by 456 * NOTE: Uses iwl3945_priv->retry_rate for the # of retries attempted by
444 * the hardware for each rate. 457 * the hardware for each rate.
445 */ 458 */
446static void rs_tx_status(void *priv_rate, 459static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
447 struct net_device *dev, 460 struct ieee80211_sta *sta, void *priv_sta,
448 struct sk_buff *skb) 461 struct sk_buff *skb)
449{ 462{
450 u8 retries, current_count; 463 u8 retries, current_count;
451 int scale_rate_index, first_index, last_index; 464 int scale_rate_index, first_index, last_index;
452 unsigned long flags; 465 unsigned long flags;
453 struct sta_info *sta;
454 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
455 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate; 466 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate;
456 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 467 struct iwl3945_rs_sta *rs_sta = priv_sta;
457 struct iwl3945_rs_sta *rs_sta;
458 struct ieee80211_supported_band *sband;
459 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
460 469
461 IWL_DEBUG_RATE("enter\n"); 470 IWL_DEBUG_RATE("enter\n");
462 471
463 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
464
465
466 retries = info->status.retry_count; 472 retries = info->status.retry_count;
467 first_index = sband->bitrates[info->tx_rate_idx].hw_value; 473 first_index = sband->bitrates[info->tx_rate_idx].hw_value;
468 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) { 474 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) {
@@ -470,17 +476,11 @@ static void rs_tx_status(void *priv_rate,
470 return; 476 return;
471 } 477 }
472 478
473 rcu_read_lock(); 479 if (!priv_sta) {
474
475 sta = sta_info_get(local, hdr->addr1);
476 if (!sta || !sta->rate_ctrl_priv) {
477 rcu_read_unlock();
478 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 480 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
479 return; 481 return;
480 } 482 }
481 483
482 rs_sta = (void *)sta->rate_ctrl_priv;
483
484 rs_sta->tx_packets++; 484 rs_sta->tx_packets++;
485 485
486 scale_rate_index = first_index; 486 scale_rate_index = first_index;
@@ -547,8 +547,6 @@ static void rs_tx_status(void *priv_rate,
547 547
548 spin_unlock_irqrestore(&rs_sta->lock, flags); 548 spin_unlock_irqrestore(&rs_sta->lock, flags);
549 549
550 rcu_read_unlock();
551
552 IWL_DEBUG_RATE("leave\n"); 550 IWL_DEBUG_RATE("leave\n");
553 551
554 return; 552 return;
@@ -632,16 +630,15 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
632 * rate table and must reference the driver allocated rate table 630 * rate table and must reference the driver allocated rate table
633 * 631 *
634 */ 632 */
635static void rs_get_rate(void *priv_rate, struct net_device *dev, 633static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
636 struct ieee80211_supported_band *sband, 634 struct ieee80211_sta *sta, void *priv_sta,
637 struct sk_buff *skb, 635 struct sk_buff *skb, struct rate_selection *sel)
638 struct rate_selection *sel)
639{ 636{
640 u8 low = IWL_RATE_INVALID; 637 u8 low = IWL_RATE_INVALID;
641 u8 high = IWL_RATE_INVALID; 638 u8 high = IWL_RATE_INVALID;
642 u16 high_low; 639 u16 high_low;
643 int index; 640 int index;
644 struct iwl3945_rs_sta *rs_sta; 641 struct iwl3945_rs_sta *rs_sta = priv_sta;
645 struct iwl3945_rate_scale_data *window = NULL; 642 struct iwl3945_rate_scale_data *window = NULL;
646 int current_tpt = IWL_INV_TPT; 643 int current_tpt = IWL_INV_TPT;
647 int low_tpt = IWL_INV_TPT; 644 int low_tpt = IWL_INV_TPT;
@@ -649,40 +646,31 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
649 u32 fail_count; 646 u32 fail_count;
650 s8 scale_action = 0; 647 s8 scale_action = 0;
651 unsigned long flags; 648 unsigned long flags;
652 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
653 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 649 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
654 struct sta_info *sta;
655 u16 fc, rate_mask; 650 u16 fc, rate_mask;
656 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate; 651 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r;
657 DECLARE_MAC_BUF(mac); 652 DECLARE_MAC_BUF(mac);
658 653
659 IWL_DEBUG_RATE("enter\n"); 654 IWL_DEBUG_RATE("enter\n");
660 655
661 rcu_read_lock();
662
663 sta = sta_info_get(local, hdr->addr1);
664
665 /* Send management frames and broadcast/multicast data using lowest 656 /* Send management frames and broadcast/multicast data using lowest
666 * rate. */ 657 * rate. */
667 fc = le16_to_cpu(hdr->frame_control); 658 fc = le16_to_cpu(hdr->frame_control);
668 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 659 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
669 is_multicast_ether_addr(hdr->addr1) || 660 is_multicast_ether_addr(hdr->addr1) ||
670 !sta || !sta->rate_ctrl_priv) { 661 !sta || !priv_sta) {
671 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 662 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
672 sel->rate_idx = rate_lowest_index(local, sband, sta); 663 sel->rate_idx = rate_lowest_index(sband, sta);
673 rcu_read_unlock();
674 return; 664 return;
675 } 665 }
676 666
677 rate_mask = sta->supp_rates[sband->band]; 667 rate_mask = sta->supp_rates[sband->band];
678 index = min(sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1); 668 index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1);
679 669
680 if (sband->band == IEEE80211_BAND_5GHZ) 670 if (sband->band == IEEE80211_BAND_5GHZ)
681 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; 671 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
682 672
683 rs_sta = (void *)sta->rate_ctrl_priv; 673 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
684
685 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
686 !rs_sta->ibss_sta_added) { 674 !rs_sta->ibss_sta_added) {
687 u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1); 675 u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
688 676
@@ -803,17 +791,13 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
803 791
804 out: 792 out:
805 793
806 sta->last_txrate_idx = index; 794 rs_sta->last_txrate_idx = index;
807 if (sband->band == IEEE80211_BAND_5GHZ) 795 if (sband->band == IEEE80211_BAND_5GHZ)
808 sta->txrate_idx = sta->last_txrate_idx - IWL_FIRST_OFDM_RATE; 796 sel->rate_idx = rs_sta->last_txrate_idx - IWL_FIRST_OFDM_RATE;
809 else 797 else
810 sta->txrate_idx = sta->last_txrate_idx; 798 sel->rate_idx = rs_sta->last_txrate_idx;
811
812 rcu_read_unlock();
813 799
814 IWL_DEBUG_RATE("leave: %d\n", index); 800 IWL_DEBUG_RATE("leave: %d\n", index);
815
816 sel->rate_idx = sta->txrate_idx;
817} 801}
818 802
819static struct rate_control_ops rs_ops = { 803static struct rate_control_ops rs_ops = {
@@ -829,114 +813,28 @@ static struct rate_control_ops rs_ops = {
829 .free_sta = rs_free_sta, 813 .free_sta = rs_free_sta,
830}; 814};
831 815
832int iwl3945_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
833{
834 struct ieee80211_local *local = hw_to_local(hw);
835 struct iwl3945_priv *priv = hw->priv;
836 struct iwl3945_rs_sta *rs_sta;
837 struct sta_info *sta;
838 unsigned long flags;
839 int count = 0, i;
840 u32 samples = 0, success = 0, good = 0;
841 unsigned long now = jiffies;
842 u32 max_time = 0;
843
844 rcu_read_lock();
845
846 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
847 if (!sta || !sta->rate_ctrl_priv) {
848 if (sta)
849 IWL_DEBUG_RATE("leave - no private rate data!\n");
850 else
851 IWL_DEBUG_RATE("leave - no station!\n");
852 rcu_read_unlock();
853 return sprintf(buf, "station %d not found\n", sta_id);
854 }
855
856 rs_sta = (void *)sta->rate_ctrl_priv;
857 spin_lock_irqsave(&rs_sta->lock, flags);
858 i = IWL_RATE_54M_INDEX;
859 while (1) {
860 u64 mask;
861 int j;
862
863 count +=
864 sprintf(&buf[count], " %2dMbs: ", iwl3945_rates[i].ieee / 2);
865
866 mask = (1ULL << (IWL_RATE_MAX_WINDOW - 1));
867 for (j = 0; j < IWL_RATE_MAX_WINDOW; j++, mask >>= 1)
868 buf[count++] =
869 (rs_sta->win[i].data & mask) ? '1' : '0';
870
871 samples += rs_sta->win[i].counter;
872 good += rs_sta->win[i].success_counter;
873 success += rs_sta->win[i].success_counter *
874 iwl3945_rates[i].ieee;
875
876 if (rs_sta->win[i].stamp) {
877 int delta =
878 jiffies_to_msecs(now - rs_sta->win[i].stamp);
879
880 if (delta > max_time)
881 max_time = delta;
882
883 count += sprintf(&buf[count], "%5dms\n", delta);
884 } else
885 buf[count++] = '\n';
886
887 j = iwl3945_get_prev_ieee_rate(i);
888 if (j == i)
889 break;
890 i = j;
891 }
892 spin_unlock_irqrestore(&rs_sta->lock, flags);
893 rcu_read_unlock();
894
895 /* Display the average rate of all samples taken.
896 *
897 * NOTE: We multiple # of samples by 2 since the IEEE measurement
898 * added from iwl3945_rates is actually 2X the rate */
899 if (samples)
900 count += sprintf(
901 &buf[count],
902 "\nAverage rate is %3d.%02dMbs over last %4dms\n"
903 "%3d%% success (%d good packets over %d tries)\n",
904 success / (2 * samples), (success * 5 / samples) % 10,
905 max_time, good * 100 / samples, good, samples);
906 else
907 count += sprintf(&buf[count], "\nAverage rate: 0Mbs\n");
908
909 return count;
910}
911
912void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) 816void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
913{ 817{
914 struct iwl3945_priv *priv = hw->priv; 818 struct iwl3945_priv *priv = hw->priv;
915 s32 rssi = 0; 819 s32 rssi = 0;
916 unsigned long flags; 820 unsigned long flags;
917 struct ieee80211_local *local = hw_to_local(hw);
918 struct iwl3945_rs_sta *rs_sta; 821 struct iwl3945_rs_sta *rs_sta;
919 struct sta_info *sta; 822 struct ieee80211_sta *sta;
823 struct iwl3945_sta_priv *psta;
920 824
921 IWL_DEBUG_RATE("enter\n"); 825 IWL_DEBUG_RATE("enter\n");
922 826
923 if (!local->rate_ctrl->ops->name ||
924 strcmp(local->rate_ctrl->ops->name, RS_NAME)) {
925 IWL_WARNING("iwl-3945-rs not selected as rate control algo!\n");
926 IWL_DEBUG_RATE("leave - mac80211 picked the wrong RC algo.\n");
927 return;
928 }
929
930 rcu_read_lock(); 827 rcu_read_lock();
931 828
932 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); 829 sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr);
933 if (!sta || !sta->rate_ctrl_priv) { 830 psta = (void *) sta->drv_priv;
831 if (!sta || !psta) {
934 IWL_DEBUG_RATE("leave - no private rate data!\n"); 832 IWL_DEBUG_RATE("leave - no private rate data!\n");
935 rcu_read_unlock(); 833 rcu_read_unlock();
936 return; 834 return;
937 } 835 }
938 836
939 rs_sta = (void *)sta->rate_ctrl_priv; 837 rs_sta = psta->rs_sta;
940 838
941 spin_lock_irqsave(&rs_sta->lock, flags); 839 spin_lock_irqsave(&rs_sta->lock, flags);
942 840
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
index f085d330bdcf..98b17ae6ef24 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
@@ -176,15 +176,6 @@ static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
176} 176}
177 177
178/** 178/**
179 * iwl3945_fill_rs_info - Fill an output text buffer with the rate representation
180 *
181 * NOTE: This is provided as a quick mechanism for a user to visualize
182 * the performance of the rate control algorithm and is not meant to be
183 * parsed software.
184 */
185extern int iwl3945_fill_rs_info(struct ieee80211_hw *, char *buf, u8 sta_id);
186
187/**
188 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info 179 * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
189 * 180 *
190 * The specific throughput table used is based on the type of network 181 * The specific throughput table used is based on the type of network
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 3f51f3635344..7ca5627cc078 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -520,10 +520,10 @@ static int iwl3945_is_network_packet(struct iwl3945_priv *priv,
520 /* Filter incoming packets to determine if they are targeted toward 520 /* Filter incoming packets to determine if they are targeted toward
521 * this network, discarding packets coming from ourselves */ 521 * this network, discarding packets coming from ourselves */
522 switch (priv->iw_mode) { 522 switch (priv->iw_mode) {
523 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ 523 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
524 /* packets to our IBSS update information */ 524 /* packets to our IBSS update information */
525 return !compare_ether_addr(header->addr3, priv->bssid); 525 return !compare_ether_addr(header->addr3, priv->bssid);
526 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ 526 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
527 /* packets to our IBSS update information */ 527 /* packets to our IBSS update information */
528 return !compare_ether_addr(header->addr2, priv->bssid); 528 return !compare_ether_addr(header->addr2, priv->bssid);
529 default: 529 default:
@@ -531,99 +531,6 @@ static int iwl3945_is_network_packet(struct iwl3945_priv *priv,
531 } 531 }
532} 532}
533 533
534static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
535 struct sk_buff *skb,
536 struct iwl3945_rx_frame_hdr *rx_hdr,
537 struct ieee80211_rx_status *stats)
538{
539 /* First cache any information we need before we overwrite
540 * the information provided in the skb from the hardware */
541 s8 signal = stats->signal;
542 s8 noise = 0;
543 int rate = stats->rate_idx;
544 u64 tsf = stats->mactime;
545 __le16 phy_flags_hw = rx_hdr->phy_flags, antenna;
546
547 struct iwl3945_rt_rx_hdr {
548 struct ieee80211_radiotap_header rt_hdr;
549 __le64 rt_tsf; /* TSF */
550 u8 rt_flags; /* radiotap packet flags */
551 u8 rt_rate; /* rate in 500kb/s */
552 __le16 rt_channelMHz; /* channel in MHz */
553 __le16 rt_chbitmask; /* channel bitfield */
554 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
555 s8 rt_dbmnoise;
556 u8 rt_antenna; /* antenna number */
557 } __attribute__ ((packed)) *iwl3945_rt;
558
559 if (skb_headroom(skb) < sizeof(*iwl3945_rt)) {
560 if (net_ratelimit())
561 printk(KERN_ERR "not enough headroom [%d] for "
562 "radiotap head [%zd]\n",
563 skb_headroom(skb), sizeof(*iwl3945_rt));
564 return;
565 }
566
567 /* put radiotap header in front of 802.11 header and data */
568 iwl3945_rt = (void *)skb_push(skb, sizeof(*iwl3945_rt));
569
570 /* initialise radiotap header */
571 iwl3945_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
572 iwl3945_rt->rt_hdr.it_pad = 0;
573
574 /* total header + data */
575 put_unaligned_le16(sizeof(*iwl3945_rt), &iwl3945_rt->rt_hdr.it_len);
576
577 /* Indicate all the fields we add to the radiotap header */
578 put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
579 (1 << IEEE80211_RADIOTAP_FLAGS) |
580 (1 << IEEE80211_RADIOTAP_RATE) |
581 (1 << IEEE80211_RADIOTAP_CHANNEL) |
582 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
583 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
584 (1 << IEEE80211_RADIOTAP_ANTENNA),
585 &iwl3945_rt->rt_hdr.it_present);
586
587 /* Zero the flags, we'll add to them as we go */
588 iwl3945_rt->rt_flags = 0;
589
590 put_unaligned_le64(tsf, &iwl3945_rt->rt_tsf);
591
592 iwl3945_rt->rt_dbmsignal = signal;
593 iwl3945_rt->rt_dbmnoise = noise;
594
595 /* Convert the channel frequency and set the flags */
596 put_unaligned_le16(stats->freq, &iwl3945_rt->rt_channelMHz);
597 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
598 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
599 &iwl3945_rt->rt_chbitmask);
600 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
601 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
602 &iwl3945_rt->rt_chbitmask);
603 else /* 802.11g */
604 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
605 &iwl3945_rt->rt_chbitmask);
606
607 if (rate == -1)
608 iwl3945_rt->rt_rate = 0;
609 else {
610 if (stats->band == IEEE80211_BAND_5GHZ)
611 rate += IWL_FIRST_OFDM_RATE;
612
613 iwl3945_rt->rt_rate = iwl3945_rates[rate].ieee;
614 }
615
616 /* antenna number */
617 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
618 iwl3945_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
619
620 /* set the preamble flag if we have it */
621 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
622 iwl3945_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
623
624 stats->flag |= RX_FLAG_RADIOTAP;
625}
626
627static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv, 534static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
628 struct iwl3945_rx_mem_buffer *rxb, 535 struct iwl3945_rx_mem_buffer *rxb,
629 struct ieee80211_rx_status *stats) 536 struct ieee80211_rx_status *stats)
@@ -657,9 +564,6 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl3945_priv *priv,
657 iwl3945_set_decrypted_flag(priv, rxb->skb, 564 iwl3945_set_decrypted_flag(priv, rxb->skb,
658 le32_to_cpu(rx_end->status), stats); 565 le32_to_cpu(rx_end->status), stats);
659 566
660 if (priv->add_radiotap)
661 iwl3945_add_radiotap(priv, rxb->skb, rx_hdr, stats);
662
663#ifdef CONFIG_IWL3945_LEDS 567#ifdef CONFIG_IWL3945_LEDS
664 if (ieee80211_is_data(hdr->frame_control)) 568 if (ieee80211_is_data(hdr->frame_control))
665 priv->rxtxpackets += len; 569 priv->rxtxpackets += len;
@@ -684,7 +588,6 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
684 u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff); 588 u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
685 u8 network_packet; 589 u8 network_packet;
686 590
687 rx_status.antenna = 0;
688 rx_status.flag = 0; 591 rx_status.flag = 0;
689 rx_status.mactime = le64_to_cpu(rx_end->timestamp); 592 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
690 rx_status.freq = 593 rx_status.freq =
@@ -696,6 +599,13 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
696 if (rx_status.band == IEEE80211_BAND_5GHZ) 599 if (rx_status.band == IEEE80211_BAND_5GHZ)
697 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; 600 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
698 601
602 rx_status.antenna = le16_to_cpu(rx_hdr->phy_flags &
603 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
604
605 /* set the preamble flag if appropriate */
606 if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
607 rx_status.flag |= RX_FLAG_SHORTPRE;
608
699 if ((unlikely(rx_stats->phy_count > 20))) { 609 if ((unlikely(rx_stats->phy_count > 20))) {
700 IWL_DEBUG_DROP 610 IWL_DEBUG_DROP
701 ("dsp size out of range [0,20]: " 611 ("dsp size out of range [0,20]: "
@@ -771,100 +681,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
771 priv->last_rx_noise = rx_status.noise; 681 priv->last_rx_noise = rx_status.noise;
772 } 682 }
773 683
774 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 684 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
775 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
776 return;
777 }
778
779 switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) {
780 case IEEE80211_FTYPE_MGMT:
781 switch (le16_to_cpu(header->frame_control) &
782 IEEE80211_FCTL_STYPE) {
783 case IEEE80211_STYPE_PROBE_RESP:
784 case IEEE80211_STYPE_BEACON:{
785 /* If this is a beacon or probe response for
786 * our network then cache the beacon
787 * timestamp */
788 if ((((priv->iw_mode == IEEE80211_IF_TYPE_STA)
789 && !compare_ether_addr(header->addr2,
790 priv->bssid)) ||
791 ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
792 && !compare_ether_addr(header->addr3,
793 priv->bssid)))) {
794 struct ieee80211_mgmt *mgmt =
795 (struct ieee80211_mgmt *)header;
796 __le32 *pos;
797 pos = (__le32 *)&mgmt->u.beacon.
798 timestamp;
799 priv->timestamp0 = le32_to_cpu(pos[0]);
800 priv->timestamp1 = le32_to_cpu(pos[1]);
801 priv->beacon_int = le16_to_cpu(
802 mgmt->u.beacon.beacon_int);
803 if (priv->call_post_assoc_from_beacon &&
804 (priv->iw_mode ==
805 IEEE80211_IF_TYPE_STA))
806 queue_work(priv->workqueue,
807 &priv->post_associate.work);
808
809 priv->call_post_assoc_from_beacon = 0;
810 }
811
812 break;
813 }
814
815 case IEEE80211_STYPE_ACTION:
816 /* TODO: Parse 802.11h frames for CSA... */
817 break;
818
819 /*
820 * TODO: Use the new callback function from
821 * mac80211 instead of sniffing these packets.
822 */
823 case IEEE80211_STYPE_ASSOC_RESP:
824 case IEEE80211_STYPE_REASSOC_RESP:{
825 struct ieee80211_mgmt *mgnt =
826 (struct ieee80211_mgmt *)header;
827
828 /* We have just associated, give some
829 * time for the 4-way handshake if
830 * any. Don't start scan too early. */
831 priv->next_scan_jiffies = jiffies +
832 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
833
834 priv->assoc_id = (~((1 << 15) | (1 << 14)) &
835 le16_to_cpu(mgnt->u.
836 assoc_resp.aid));
837 priv->assoc_capability =
838 le16_to_cpu(mgnt->u.assoc_resp.capab_info);
839 if (priv->beacon_int)
840 queue_work(priv->workqueue,
841 &priv->post_associate.work);
842 else
843 priv->call_post_assoc_from_beacon = 1;
844 break;
845 }
846
847 case IEEE80211_STYPE_PROBE_REQ:{
848 DECLARE_MAC_BUF(mac1);
849 DECLARE_MAC_BUF(mac2);
850 DECLARE_MAC_BUF(mac3);
851 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
852 IWL_DEBUG_DROP
853 ("Dropping (non network): %s"
854 ", %s, %s\n",
855 print_mac(mac1, header->addr1),
856 print_mac(mac2, header->addr2),
857 print_mac(mac3, header->addr3));
858 return;
859 }
860 }
861
862 case IEEE80211_FTYPE_DATA:
863 /* fall through */
864 default:
865 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
866 break;
867 }
868} 685}
869 686
870int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *ptr, 687int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl3945_priv *priv, void *ptr,
@@ -990,7 +807,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
990 807
991 priv->stations[sta_id].current_rate.rate_n_flags = rate; 808 priv->stations[sta_id].current_rate.rate_n_flags = rate;
992 809
993 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 810 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
994 (sta_id != priv->hw_setting.bcast_sta_id) && 811 (sta_id != priv->hw_setting.bcast_sta_id) &&
995 (sta_id != IWL_MULTICAST_ID)) 812 (sta_id != IWL_MULTICAST_ID))
996 priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate; 813 priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index fa81ba1af3d3..bdd32475b99c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -73,6 +73,10 @@ extern struct pci_device_id iwl3945_hw_card_ids[];
73extern int iwl3945_param_hwcrypto; 73extern int iwl3945_param_hwcrypto;
74extern int iwl3945_param_queues_num; 74extern int iwl3945_param_queues_num;
75 75
76struct iwl3945_sta_priv {
77 struct iwl3945_rs_sta *rs_sta;
78};
79
76enum iwl3945_antenna { 80enum iwl3945_antenna {
77 IWL_ANTENNA_DIVERSITY, 81 IWL_ANTENNA_DIVERSITY,
78 IWL_ANTENNA_MAIN, 82 IWL_ANTENNA_MAIN,
@@ -707,7 +711,6 @@ struct iwl3945_priv {
707 711
708 enum ieee80211_band band; 712 enum ieee80211_band band;
709 int alloc_rxb_skb; 713 int alloc_rxb_skb;
710 bool add_radiotap;
711 714
712 void (*rx_handlers[REPLY_MAX])(struct iwl3945_priv *priv, 715 void (*rx_handlers[REPLY_MAX])(struct iwl3945_priv *priv,
713 struct iwl3945_rx_mem_buffer *rxb); 716 struct iwl3945_rx_mem_buffer *rxb);
@@ -852,7 +855,7 @@ struct iwl3945_priv {
852 /* eeprom */ 855 /* eeprom */
853 struct iwl3945_eeprom eeprom; 856 struct iwl3945_eeprom eeprom;
854 857
855 enum ieee80211_if_types iw_mode; 858 enum nl80211_iftype iw_mode;
856 859
857 struct sk_buff *ibss_beacon; 860 struct sk_buff *ibss_beacon;
858 861
@@ -895,7 +898,6 @@ struct iwl3945_priv {
895 struct delayed_work thermal_periodic; 898 struct delayed_work thermal_periodic;
896 struct delayed_work gather_stats; 899 struct delayed_work gather_stats;
897 struct delayed_work scan_check; 900 struct delayed_work scan_check;
898 struct delayed_work post_associate;
899 901
900#define IWL_DEFAULT_TX_POWER 0x0F 902#define IWL_DEFAULT_TX_POWER 0x0F
901 s8 user_txpower_limit; 903 s8 user_txpower_limit;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index fce950f4163c..f4793a609443 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -98,16 +98,17 @@
98#define IWL_RSSI_OFFSET 44 98#define IWL_RSSI_OFFSET 44
99 99
100 100
101#include "iwl-commands.h"
102 101
103/* PCI registers */ 102/* PCI registers */
104#define PCI_LINK_CTRL 0x0F0 /* 1 byte */ 103#define PCI_CFG_RETRY_TIMEOUT 0x041
105#define PCI_POWER_SOURCE 0x0C8 104#define PCI_CFG_POWER_SOURCE 0x0C8
106#define PCI_REG_WUM8 0x0E8 105#define PCI_REG_WUM8 0x0E8
106#define PCI_CFG_LINK_CTRL 0x0F0
107 107
108/* PCI register values */ 108/* PCI register values */
109#define PCI_LINK_VAL_L0S_EN 0x01 109#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
110#define PCI_LINK_VAL_L1_EN 0x02 110#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
111#define PCI_CFG_CMD_REG_INT_DIS_MSK 0x04
111#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 112#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
112 113
113#define TFD_QUEUE_SIZE_MAX (256) 114#define TFD_QUEUE_SIZE_MAX (256)
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 23fed3298962..9838de5f4369 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -399,7 +399,7 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
399 unsigned long flags; 399 unsigned long flags;
400 u32 val; 400 u32 val;
401 u16 radio_cfg; 401 u16 radio_cfg;
402 u8 val_link; 402 u16 link;
403 403
404 spin_lock_irqsave(&priv->lock, flags); 404 spin_lock_irqsave(&priv->lock, flags);
405 405
@@ -410,10 +410,10 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
410 val & ~(1 << 11)); 410 val & ~(1 << 11));
411 } 411 }
412 412
413 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); 413 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link);
414 414
415 /* L1 is enabled by BIOS */ 415 /* L1 is enabled by BIOS */
416 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN) 416 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
417 /* diable L0S disabled L1A enabled */ 417 /* diable L0S disabled L1A enabled */
418 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 418 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
419 else 419 else
@@ -1607,8 +1607,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1607 return ret; 1607 return ret;
1608} 1608}
1609 1609
1610 1610#ifdef IEEE80211_CONF_CHANNEL_SWITCH
1611int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) 1611static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1612{ 1612{
1613 int rc; 1613 int rc;
1614 u8 band = 0; 1614 u8 band = 0;
@@ -1648,6 +1648,7 @@ int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1648 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1648 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1649 return rc; 1649 return rc;
1650} 1650}
1651#endif
1651 1652
1652static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv) 1653static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
1653{ 1654{
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 17d4f31c5934..c479ee211c5c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -129,6 +129,13 @@ struct iwl5000_shared {
129 __le32 padding2; 129 __le32 padding2;
130} __attribute__ ((packed)); 130} __attribute__ ((packed));
131 131
132/* calibrations defined for 5000 */
133/* defines the order in which results should be sent to the runtime uCode */
134enum iwl5000_calib {
135 IWL5000_CALIB_LO,
136 IWL5000_CALIB_TX_IQ,
137 IWL5000_CALIB_TX_IQ_PERD,
138};
132 139
133#endif /* __iwl_5000_hw_h__ */ 140#endif /* __iwl_5000_hw_h__ */
134 141
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index b08036a9d894..f6003e7996af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -209,14 +209,14 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
209{ 209{
210 unsigned long flags; 210 unsigned long flags;
211 u16 radio_cfg; 211 u16 radio_cfg;
212 u8 val_link; 212 u16 link;
213 213
214 spin_lock_irqsave(&priv->lock, flags); 214 spin_lock_irqsave(&priv->lock, flags);
215 215
216 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link); 216 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &link);
217 217
218 /* L1 is enabled by BIOS */ 218 /* L1 is enabled by BIOS */
219 if ((val_link & PCI_LINK_VAL_L1_EN) == PCI_LINK_VAL_L1_EN) 219 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
220 /* diable L0S disabled L1A enabled */ 220 /* diable L0S disabled L1A enabled */
221 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 221 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
222 else 222 else
@@ -445,48 +445,6 @@ static int iwl5000_send_Xtal_calib(struct iwl_priv *priv)
445 sizeof(cal_cmd), &cal_cmd); 445 sizeof(cal_cmd), &cal_cmd);
446} 446}
447 447
448static int iwl5000_send_calib_results(struct iwl_priv *priv)
449{
450 int ret = 0;
451
452 struct iwl_host_cmd hcmd = {
453 .id = REPLY_PHY_CALIBRATION_CMD,
454 .meta.flags = CMD_SIZE_HUGE,
455 };
456
457 if (priv->calib_results.lo_res) {
458 hcmd.len = priv->calib_results.lo_res_len;
459 hcmd.data = priv->calib_results.lo_res;
460 ret = iwl_send_cmd_sync(priv, &hcmd);
461
462 if (ret)
463 goto err;
464 }
465
466 if (priv->calib_results.tx_iq_res) {
467 hcmd.len = priv->calib_results.tx_iq_res_len;
468 hcmd.data = priv->calib_results.tx_iq_res;
469 ret = iwl_send_cmd_sync(priv, &hcmd);
470
471 if (ret)
472 goto err;
473 }
474
475 if (priv->calib_results.tx_iq_perd_res) {
476 hcmd.len = priv->calib_results.tx_iq_perd_res_len;
477 hcmd.data = priv->calib_results.tx_iq_perd_res;
478 ret = iwl_send_cmd_sync(priv, &hcmd);
479
480 if (ret)
481 goto err;
482 }
483
484 return 0;
485err:
486 IWL_ERROR("Error %d\n", ret);
487 return ret;
488}
489
490static int iwl5000_send_calib_cfg(struct iwl_priv *priv) 448static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
491{ 449{
492 struct iwl5000_calib_cfg_cmd calib_cfg_cmd; 450 struct iwl5000_calib_cfg_cmd calib_cfg_cmd;
@@ -511,33 +469,30 @@ static void iwl5000_rx_calib_result(struct iwl_priv *priv,
511 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 469 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
512 struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw; 470 struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw;
513 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK; 471 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK;
514 472 int index;
515 iwl_free_calib_results(priv);
516 473
517 /* reduce the size of the length field itself */ 474 /* reduce the size of the length field itself */
518 len -= 4; 475 len -= 4;
519 476
477 /* Define the order in which the results will be sent to the runtime
478 * uCode. iwl_send_calib_results sends them in a row according to their
479 * index. We sort them here */
520 switch (hdr->op_code) { 480 switch (hdr->op_code) {
521 case IWL5000_PHY_CALIBRATE_LO_CMD: 481 case IWL5000_PHY_CALIBRATE_LO_CMD:
522 priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC); 482 index = IWL5000_CALIB_LO;
523 priv->calib_results.lo_res_len = len;
524 memcpy(priv->calib_results.lo_res, pkt->u.raw, len);
525 break; 483 break;
526 case IWL5000_PHY_CALIBRATE_TX_IQ_CMD: 484 case IWL5000_PHY_CALIBRATE_TX_IQ_CMD:
527 priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC); 485 index = IWL5000_CALIB_TX_IQ;
528 priv->calib_results.tx_iq_res_len = len;
529 memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len);
530 break; 486 break;
531 case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD: 487 case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD:
532 priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC); 488 index = IWL5000_CALIB_TX_IQ_PERD;
533 priv->calib_results.tx_iq_perd_res_len = len;
534 memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len);
535 break; 489 break;
536 default: 490 default:
537 IWL_ERROR("Unknown calibration notification %d\n", 491 IWL_ERROR("Unknown calibration notification %d\n",
538 hdr->op_code); 492 hdr->op_code);
539 return; 493 return;
540 } 494 }
495 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
541} 496}
542 497
543static void iwl5000_rx_calib_complete(struct iwl_priv *priv, 498static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
@@ -832,7 +787,7 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
832 iwl5000_send_Xtal_calib(priv); 787 iwl5000_send_Xtal_calib(priv);
833 788
834 if (priv->ucode_type == UCODE_RT) 789 if (priv->ucode_type == UCODE_RT)
835 iwl5000_send_calib_results(priv); 790 iwl_send_calib_results(priv);
836 791
837 return 0; 792 return 0;
838} 793}
@@ -1614,6 +1569,8 @@ struct iwl_cfg iwl5350_agn_cfg = {
1614 .mod_params = &iwl50_mod_params, 1569 .mod_params = &iwl50_mod_params,
1615}; 1570};
1616 1571
1572MODULE_FIRMWARE("iwlwifi-5000" IWL5000_UCODE_API ".ucode");
1573
1617module_param_named(disable50, iwl50_mod_params.disable, int, 0444); 1574module_param_named(disable50, iwl50_mod_params.disable, int, 0444);
1618MODULE_PARM_DESC(disable50, 1575MODULE_PARM_DESC(disable50,
1619 "manually disable the 50XX radio (default 0 [radio on])"); 1576 "manually disable the 50XX radio (default 0 [radio on])");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 90a2b6dee7c0..93944de923ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -35,8 +35,6 @@
35 35
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37 37
38#include "../net/mac80211/rate.h"
39
40#include "iwl-dev.h" 38#include "iwl-dev.h"
41#include "iwl-sta.h" 39#include "iwl-sta.h"
42#include "iwl-core.h" 40#include "iwl-core.h"
@@ -163,12 +161,15 @@ struct iwl_lq_sta {
163 u32 dbg_fixed_rate; 161 u32 dbg_fixed_rate;
164#endif 162#endif
165 struct iwl_priv *drv; 163 struct iwl_priv *drv;
164
165 /* used to be in sta_info */
166 int last_txrate_idx;
166}; 167};
167 168
168static void rs_rate_scale_perform(struct iwl_priv *priv, 169static void rs_rate_scale_perform(struct iwl_priv *priv,
169 struct net_device *dev,
170 struct ieee80211_hdr *hdr, 170 struct ieee80211_hdr *hdr,
171 struct sta_info *sta); 171 struct ieee80211_sta *sta,
172 struct iwl_lq_sta *lq_sta);
172static void rs_fill_link_cmd(const struct iwl_priv *priv, 173static void rs_fill_link_cmd(const struct iwl_priv *priv,
173 struct iwl_lq_sta *lq_sta, u32 rate_n_flags); 174 struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
174 175
@@ -354,17 +355,11 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
354 355
355static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, 356static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
356 struct iwl_lq_sta *lq_data, u8 tid, 357 struct iwl_lq_sta *lq_data, u8 tid,
357 struct sta_info *sta) 358 struct ieee80211_sta *sta)
358{ 359{
359 unsigned long state;
360 DECLARE_MAC_BUF(mac); 360 DECLARE_MAC_BUF(mac);
361 361
362 spin_lock_bh(&sta->lock); 362 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
363 state = sta->ampdu_mlme.tid_state_tx[tid];
364 spin_unlock_bh(&sta->lock);
365
366 if (state == HT_AGG_STATE_IDLE &&
367 rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
368 IWL_DEBUG_HT("Starting Tx agg: STA: %s tid: %d\n", 363 IWL_DEBUG_HT("Starting Tx agg: STA: %s tid: %d\n",
369 print_mac(mac, sta->addr), tid); 364 print_mac(mac, sta->addr), tid);
370 ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid); 365 ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid);
@@ -373,7 +368,7 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
373 368
374static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, 369static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
375 struct iwl_lq_sta *lq_data, 370 struct iwl_lq_sta *lq_data,
376 struct sta_info *sta) 371 struct ieee80211_sta *sta)
377{ 372{
378 if ((tid < TID_MAX_LOAD_COUNT)) 373 if ((tid < TID_MAX_LOAD_COUNT))
379 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 374 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
@@ -436,7 +431,7 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
436 /* Shift bitmap by one frame (throw away oldest history), 431 /* Shift bitmap by one frame (throw away oldest history),
437 * OR in "1", and increment "success" if this 432 * OR in "1", and increment "success" if this
438 * frame was successful. */ 433 * frame was successful. */
439 window->data <<= 1;; 434 window->data <<= 1;
440 if (successes > 0) { 435 if (successes > 0) {
441 window->success_counter++; 436 window->success_counter++;
442 window->data |= 0x1; 437 window->data |= 0x1;
@@ -773,7 +768,8 @@ out:
773/* 768/*
774 * mac80211 sends us Tx status 769 * mac80211 sends us Tx status
775 */ 770 */
776static void rs_tx_status(void *priv_rate, struct net_device *dev, 771static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
772 struct ieee80211_sta *sta, void *priv_sta,
777 struct sk_buff *skb) 773 struct sk_buff *skb)
778{ 774{
779 int status; 775 int status;
@@ -781,11 +777,9 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
781 int rs_index, index = 0; 777 int rs_index, index = 0;
782 struct iwl_lq_sta *lq_sta; 778 struct iwl_lq_sta *lq_sta;
783 struct iwl_link_quality_cmd *table; 779 struct iwl_link_quality_cmd *table;
784 struct sta_info *sta;
785 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 780 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
786 struct iwl_priv *priv = (struct iwl_priv *)priv_rate; 781 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
787 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 782 struct ieee80211_hw *hw = priv->hw;
788 struct ieee80211_hw *hw = local_to_hw(local);
789 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 783 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
790 struct iwl_rate_scale_data *window = NULL; 784 struct iwl_rate_scale_data *window = NULL;
791 struct iwl_rate_scale_data *search_win = NULL; 785 struct iwl_rate_scale_data *search_win = NULL;
@@ -811,17 +805,9 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
811 if (retries > 15) 805 if (retries > 15)
812 retries = 15; 806 retries = 15;
813 807
814 rcu_read_lock(); 808 lq_sta = (struct iwl_lq_sta *)priv_sta;
815 809
816 sta = sta_info_get(local, hdr->addr1); 810 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
817
818 if (!sta || !sta->rate_ctrl_priv)
819 goto out;
820
821
822 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv;
823
824 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
825 !lq_sta->ibss_sta_added) 811 !lq_sta->ibss_sta_added)
826 goto out; 812 goto out;
827 813
@@ -965,9 +951,8 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
965 } 951 }
966 952
967 /* See if there's a better rate or modulation mode to try. */ 953 /* See if there's a better rate or modulation mode to try. */
968 rs_rate_scale_perform(priv, dev, hdr, sta); 954 rs_rate_scale_perform(priv, hdr, sta, lq_sta);
969out: 955out:
970 rcu_read_unlock();
971 return; 956 return;
972} 957}
973 958
@@ -1128,6 +1113,7 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1128 1113
1129 /* Higher rate not available, use the original */ 1114 /* Higher rate not available, use the original */
1130 } else { 1115 } else {
1116 new_rate = rate;
1131 break; 1117 break;
1132 } 1118 }
1133 } 1119 }
@@ -1142,7 +1128,7 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
1142static int rs_switch_to_mimo2(struct iwl_priv *priv, 1128static int rs_switch_to_mimo2(struct iwl_priv *priv,
1143 struct iwl_lq_sta *lq_sta, 1129 struct iwl_lq_sta *lq_sta,
1144 struct ieee80211_conf *conf, 1130 struct ieee80211_conf *conf,
1145 struct sta_info *sta, 1131 struct ieee80211_sta *sta,
1146 struct iwl_scale_tbl_info *tbl, int index) 1132 struct iwl_scale_tbl_info *tbl, int index)
1147{ 1133{
1148 u16 rate_mask; 1134 u16 rate_mask;
@@ -1153,8 +1139,8 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1153 !sta->ht_info.ht_supported) 1139 !sta->ht_info.ht_supported)
1154 return -1; 1140 return -1;
1155 1141
1156 if (((sta->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS) >> 2) 1142 if (((sta->ht_info.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1157 == IWL_MIMO_PS_STATIC) 1143 == WLAN_HT_CAP_SM_PS_STATIC)
1158 return -1; 1144 return -1;
1159 1145
1160 /* Need both Tx chains/antennas to support MIMO */ 1146 /* Need both Tx chains/antennas to support MIMO */
@@ -1210,7 +1196,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1210static int rs_switch_to_siso(struct iwl_priv *priv, 1196static int rs_switch_to_siso(struct iwl_priv *priv,
1211 struct iwl_lq_sta *lq_sta, 1197 struct iwl_lq_sta *lq_sta,
1212 struct ieee80211_conf *conf, 1198 struct ieee80211_conf *conf,
1213 struct sta_info *sta, 1199 struct ieee80211_sta *sta,
1214 struct iwl_scale_tbl_info *tbl, int index) 1200 struct iwl_scale_tbl_info *tbl, int index)
1215{ 1201{
1216 u16 rate_mask; 1202 u16 rate_mask;
@@ -1270,7 +1256,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1270static int rs_move_legacy_other(struct iwl_priv *priv, 1256static int rs_move_legacy_other(struct iwl_priv *priv,
1271 struct iwl_lq_sta *lq_sta, 1257 struct iwl_lq_sta *lq_sta,
1272 struct ieee80211_conf *conf, 1258 struct ieee80211_conf *conf,
1273 struct sta_info *sta, 1259 struct ieee80211_sta *sta,
1274 int index) 1260 int index)
1275{ 1261{
1276 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 1262 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1281,15 +1267,23 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1281 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1267 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1282 u8 start_action = tbl->action; 1268 u8 start_action = tbl->action;
1283 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1269 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1270 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1284 int ret = 0; 1271 int ret = 0;
1285 1272
1286 for (; ;) { 1273 for (; ;) {
1287 switch (tbl->action) { 1274 switch (tbl->action) {
1288 case IWL_LEGACY_SWITCH_ANTENNA: 1275 case IWL_LEGACY_SWITCH_ANTENNA1:
1276 case IWL_LEGACY_SWITCH_ANTENNA2:
1289 IWL_DEBUG_RATE("LQ: Legacy toggle Antenna\n"); 1277 IWL_DEBUG_RATE("LQ: Legacy toggle Antenna\n");
1290 1278
1291 lq_sta->action_counter++; 1279 lq_sta->action_counter++;
1292 1280
1281 if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
1282 tx_chains_num <= 1) ||
1283 (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
1284 tx_chains_num <= 2))
1285 break;
1286
1293 /* Don't change antenna if success has been great */ 1287 /* Don't change antenna if success has been great */
1294 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1288 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1295 break; 1289 break;
@@ -1299,7 +1293,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1299 1293
1300 if (rs_toggle_antenna(valid_tx_ant, 1294 if (rs_toggle_antenna(valid_tx_ant,
1301 &search_tbl->current_rate, search_tbl)) { 1295 &search_tbl->current_rate, search_tbl)) {
1302 lq_sta->search_better_tbl = 1; 1296 rs_set_expected_tpt_table(lq_sta, search_tbl);
1303 goto out; 1297 goto out;
1304 } 1298 }
1305 break; 1299 break;
@@ -1312,43 +1306,54 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1312 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1306 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1313 search_tbl, index); 1307 search_tbl, index);
1314 if (!ret) { 1308 if (!ret) {
1315 lq_sta->search_better_tbl = 1;
1316 lq_sta->action_counter = 0; 1309 lq_sta->action_counter = 0;
1317 goto out; 1310 goto out;
1318 } 1311 }
1319 1312
1320 break; 1313 break;
1321 case IWL_LEGACY_SWITCH_MIMO2: 1314 case IWL_LEGACY_SWITCH_MIMO2_AB:
1315 case IWL_LEGACY_SWITCH_MIMO2_AC:
1316 case IWL_LEGACY_SWITCH_MIMO2_BC:
1322 IWL_DEBUG_RATE("LQ: Legacy switch to MIMO2\n"); 1317 IWL_DEBUG_RATE("LQ: Legacy switch to MIMO2\n");
1323 1318
1324 /* Set up search table to try MIMO */ 1319 /* Set up search table to try MIMO */
1325 memcpy(search_tbl, tbl, sz); 1320 memcpy(search_tbl, tbl, sz);
1326 search_tbl->is_SGI = 0; 1321 search_tbl->is_SGI = 0;
1327 search_tbl->ant_type = ANT_AB;/*FIXME:RS*/ 1322
1328 /*FIXME:RS:need to check ant validity*/ 1323 if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
1324 search_tbl->ant_type = ANT_AB;
1325 else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
1326 search_tbl->ant_type = ANT_AC;
1327 else
1328 search_tbl->ant_type = ANT_BC;
1329
1330 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1331 break;
1332
1329 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, 1333 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1330 search_tbl, index); 1334 search_tbl, index);
1331 if (!ret) { 1335 if (!ret) {
1332 lq_sta->search_better_tbl = 1;
1333 lq_sta->action_counter = 0; 1336 lq_sta->action_counter = 0;
1334 goto out; 1337 goto out;
1335 } 1338 }
1336 break; 1339 break;
1337 } 1340 }
1338 tbl->action++; 1341 tbl->action++;
1339 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2) 1342 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1340 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1343 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1341 1344
1342 if (tbl->action == start_action) 1345 if (tbl->action == start_action)
1343 break; 1346 break;
1344 1347
1345 } 1348 }
1349 search_tbl->lq_type = LQ_NONE;
1346 return 0; 1350 return 0;
1347 1351
1348 out: 1352out:
1353 lq_sta->search_better_tbl = 1;
1349 tbl->action++; 1354 tbl->action++;
1350 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2) 1355 if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
1351 tbl->action = IWL_LEGACY_SWITCH_ANTENNA; 1356 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1352 return 0; 1357 return 0;
1353 1358
1354} 1359}
@@ -1359,7 +1364,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1359static int rs_move_siso_to_other(struct iwl_priv *priv, 1364static int rs_move_siso_to_other(struct iwl_priv *priv,
1360 struct iwl_lq_sta *lq_sta, 1365 struct iwl_lq_sta *lq_sta,
1361 struct ieee80211_conf *conf, 1366 struct ieee80211_conf *conf,
1362 struct sta_info *sta, int index) 1367 struct ieee80211_sta *sta, int index)
1363{ 1368{
1364 u8 is_green = lq_sta->is_green; 1369 u8 is_green = lq_sta->is_green;
1365 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 1370 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1370,34 +1375,51 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1370 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1375 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1371 u8 start_action = tbl->action; 1376 u8 start_action = tbl->action;
1372 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1377 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1378 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1373 int ret; 1379 int ret;
1374 1380
1375 for (;;) { 1381 for (;;) {
1376 lq_sta->action_counter++; 1382 lq_sta->action_counter++;
1377 switch (tbl->action) { 1383 switch (tbl->action) {
1378 case IWL_SISO_SWITCH_ANTENNA: 1384 case IWL_SISO_SWITCH_ANTENNA1:
1385 case IWL_SISO_SWITCH_ANTENNA2:
1379 IWL_DEBUG_RATE("LQ: SISO toggle Antenna\n"); 1386 IWL_DEBUG_RATE("LQ: SISO toggle Antenna\n");
1387
1388 if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
1389 tx_chains_num <= 1) ||
1390 (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
1391 tx_chains_num <= 2))
1392 break;
1393
1380 if (window->success_ratio >= IWL_RS_GOOD_RATIO) 1394 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1381 break; 1395 break;
1382 1396
1383 memcpy(search_tbl, tbl, sz); 1397 memcpy(search_tbl, tbl, sz);
1384 if (rs_toggle_antenna(valid_tx_ant, 1398 if (rs_toggle_antenna(valid_tx_ant,
1385 &search_tbl->current_rate, search_tbl)) { 1399 &search_tbl->current_rate, search_tbl))
1386 lq_sta->search_better_tbl = 1;
1387 goto out; 1400 goto out;
1388 }
1389 break; 1401 break;
1390 case IWL_SISO_SWITCH_MIMO2: 1402 case IWL_SISO_SWITCH_MIMO2_AB:
1403 case IWL_SISO_SWITCH_MIMO2_AC:
1404 case IWL_SISO_SWITCH_MIMO2_BC:
1391 IWL_DEBUG_RATE("LQ: SISO switch to MIMO2\n"); 1405 IWL_DEBUG_RATE("LQ: SISO switch to MIMO2\n");
1392 memcpy(search_tbl, tbl, sz); 1406 memcpy(search_tbl, tbl, sz);
1393 search_tbl->is_SGI = 0; 1407 search_tbl->is_SGI = 0;
1394 search_tbl->ant_type = ANT_AB; /*FIXME:RS*/ 1408
1409 if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
1410 search_tbl->ant_type = ANT_AB;
1411 else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
1412 search_tbl->ant_type = ANT_AC;
1413 else
1414 search_tbl->ant_type = ANT_BC;
1415
1416 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1417 break;
1418
1395 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta, 1419 ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
1396 search_tbl, index); 1420 search_tbl, index);
1397 if (!ret) { 1421 if (!ret)
1398 lq_sta->search_better_tbl = 1;
1399 goto out; 1422 goto out;
1400 }
1401 break; 1423 break;
1402 case IWL_SISO_SWITCH_GI: 1424 case IWL_SISO_SWITCH_GI:
1403 if (!tbl->is_fat && 1425 if (!tbl->is_fat &&
@@ -1427,22 +1449,23 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1427 } 1449 }
1428 search_tbl->current_rate = rate_n_flags_from_tbl( 1450 search_tbl->current_rate = rate_n_flags_from_tbl(
1429 search_tbl, index, is_green); 1451 search_tbl, index, is_green);
1430 lq_sta->search_better_tbl = 1;
1431 goto out; 1452 goto out;
1432 } 1453 }
1433 tbl->action++; 1454 tbl->action++;
1434 if (tbl->action > IWL_SISO_SWITCH_GI) 1455 if (tbl->action > IWL_SISO_SWITCH_GI)
1435 tbl->action = IWL_SISO_SWITCH_ANTENNA; 1456 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1436 1457
1437 if (tbl->action == start_action) 1458 if (tbl->action == start_action)
1438 break; 1459 break;
1439 } 1460 }
1461 search_tbl->lq_type = LQ_NONE;
1440 return 0; 1462 return 0;
1441 1463
1442 out: 1464 out:
1465 lq_sta->search_better_tbl = 1;
1443 tbl->action++; 1466 tbl->action++;
1444 if (tbl->action > IWL_SISO_SWITCH_GI) 1467 if (tbl->action > IWL_SISO_SWITCH_GI)
1445 tbl->action = IWL_SISO_SWITCH_ANTENNA; 1468 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1446 return 0; 1469 return 0;
1447} 1470}
1448 1471
@@ -1452,43 +1475,64 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1452static int rs_move_mimo_to_other(struct iwl_priv *priv, 1475static int rs_move_mimo_to_other(struct iwl_priv *priv,
1453 struct iwl_lq_sta *lq_sta, 1476 struct iwl_lq_sta *lq_sta,
1454 struct ieee80211_conf *conf, 1477 struct ieee80211_conf *conf,
1455 struct sta_info *sta, int index) 1478 struct ieee80211_sta *sta, int index)
1456{ 1479{
1457 s8 is_green = lq_sta->is_green; 1480 s8 is_green = lq_sta->is_green;
1458 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 1481 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1459 struct iwl_scale_tbl_info *search_tbl = 1482 struct iwl_scale_tbl_info *search_tbl =
1460 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 1483 &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
1484 struct iwl_rate_scale_data *window = &(tbl->win[index]);
1461 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1485 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1462 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1486 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1463 u8 start_action = tbl->action; 1487 u8 start_action = tbl->action;
1464 /*u8 valid_tx_ant = priv->hw_params.valid_tx_ant;*/ 1488 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1489 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1465 int ret; 1490 int ret;
1466 1491
1467 for (;;) { 1492 for (;;) {
1468 lq_sta->action_counter++; 1493 lq_sta->action_counter++;
1469 switch (tbl->action) { 1494 switch (tbl->action) {
1470 case IWL_MIMO_SWITCH_ANTENNA_A: 1495 case IWL_MIMO2_SWITCH_ANTENNA1:
1471 case IWL_MIMO_SWITCH_ANTENNA_B: 1496 case IWL_MIMO2_SWITCH_ANTENNA2:
1497 IWL_DEBUG_RATE("LQ: MIMO toggle Antennas\n");
1498
1499 if (tx_chains_num <= 2)
1500 break;
1501
1502 if (window->success_ratio >= IWL_RS_GOOD_RATIO)
1503 break;
1504
1505 memcpy(search_tbl, tbl, sz);
1506 if (rs_toggle_antenna(valid_tx_ant,
1507 &search_tbl->current_rate, search_tbl))
1508 goto out;
1509 break;
1510 case IWL_MIMO2_SWITCH_SISO_A:
1511 case IWL_MIMO2_SWITCH_SISO_B:
1512 case IWL_MIMO2_SWITCH_SISO_C:
1472 IWL_DEBUG_RATE("LQ: MIMO2 switch to SISO\n"); 1513 IWL_DEBUG_RATE("LQ: MIMO2 switch to SISO\n");
1473 1514
1474 /* Set up new search table for SISO */ 1515 /* Set up new search table for SISO */
1475 memcpy(search_tbl, tbl, sz); 1516 memcpy(search_tbl, tbl, sz);
1476 1517
1477 /*FIXME:RS:need to check ant validity + C*/ 1518 if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
1478 if (tbl->action == IWL_MIMO_SWITCH_ANTENNA_A)
1479 search_tbl->ant_type = ANT_A; 1519 search_tbl->ant_type = ANT_A;
1480 else 1520 else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
1481 search_tbl->ant_type = ANT_B; 1521 search_tbl->ant_type = ANT_B;
1522 else
1523 search_tbl->ant_type = ANT_C;
1524
1525 if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
1526 break;
1482 1527
1483 ret = rs_switch_to_siso(priv, lq_sta, conf, sta, 1528 ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
1484 search_tbl, index); 1529 search_tbl, index);
1485 if (!ret) { 1530 if (!ret)
1486 lq_sta->search_better_tbl = 1;
1487 goto out; 1531 goto out;
1488 } 1532
1489 break; 1533 break;
1490 1534
1491 case IWL_MIMO_SWITCH_GI: 1535 case IWL_MIMO2_SWITCH_GI:
1492 if (!tbl->is_fat && 1536 if (!tbl->is_fat &&
1493 !(priv->current_ht_config.sgf & 1537 !(priv->current_ht_config.sgf &
1494 HT_SHORT_GI_20MHZ)) 1538 HT_SHORT_GI_20MHZ))
@@ -1517,23 +1561,23 @@ static int rs_move_mimo_to_other(struct iwl_priv *priv,
1517 } 1561 }
1518 search_tbl->current_rate = rate_n_flags_from_tbl( 1562 search_tbl->current_rate = rate_n_flags_from_tbl(
1519 search_tbl, index, is_green); 1563 search_tbl, index, is_green);
1520 lq_sta->search_better_tbl = 1;
1521 goto out; 1564 goto out;
1522 1565
1523 } 1566 }
1524 tbl->action++; 1567 tbl->action++;
1525 if (tbl->action > IWL_MIMO_SWITCH_GI) 1568 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1526 tbl->action = IWL_MIMO_SWITCH_ANTENNA_A; 1569 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1527 1570
1528 if (tbl->action == start_action) 1571 if (tbl->action == start_action)
1529 break; 1572 break;
1530 } 1573 }
1531 1574 search_tbl->lq_type = LQ_NONE;
1532 return 0; 1575 return 0;
1533 out: 1576 out:
1577 lq_sta->search_better_tbl = 1;
1534 tbl->action++; 1578 tbl->action++;
1535 if (tbl->action > IWL_MIMO_SWITCH_GI) 1579 if (tbl->action > IWL_MIMO2_SWITCH_GI)
1536 tbl->action = IWL_MIMO_SWITCH_ANTENNA_A; 1580 tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
1537 return 0; 1581 return 0;
1538 1582
1539} 1583}
@@ -1624,12 +1668,11 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
1624 * Do rate scaling and search for new modulation mode. 1668 * Do rate scaling and search for new modulation mode.
1625 */ 1669 */
1626static void rs_rate_scale_perform(struct iwl_priv *priv, 1670static void rs_rate_scale_perform(struct iwl_priv *priv,
1627 struct net_device *dev,
1628 struct ieee80211_hdr *hdr, 1671 struct ieee80211_hdr *hdr,
1629 struct sta_info *sta) 1672 struct ieee80211_sta *sta,
1673 struct iwl_lq_sta *lq_sta)
1630{ 1674{
1631 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1675 struct ieee80211_hw *hw = priv->hw;
1632 struct ieee80211_hw *hw = local_to_hw(local);
1633 struct ieee80211_conf *conf = &hw->conf; 1676 struct ieee80211_conf *conf = &hw->conf;
1634 int low = IWL_RATE_INVALID; 1677 int low = IWL_RATE_INVALID;
1635 int high = IWL_RATE_INVALID; 1678 int high = IWL_RATE_INVALID;
@@ -1644,7 +1687,6 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1644 __le16 fc; 1687 __le16 fc;
1645 u16 rate_mask; 1688 u16 rate_mask;
1646 u8 update_lq = 0; 1689 u8 update_lq = 0;
1647 struct iwl_lq_sta *lq_sta;
1648 struct iwl_scale_tbl_info *tbl, *tbl1; 1690 struct iwl_scale_tbl_info *tbl, *tbl1;
1649 u16 rate_scale_index_msk = 0; 1691 u16 rate_scale_index_msk = 0;
1650 u32 rate; 1692 u32 rate;
@@ -1665,10 +1707,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1665 return; 1707 return;
1666 } 1708 }
1667 1709
1668 if (!sta || !sta->rate_ctrl_priv) 1710 if (!sta || !lq_sta)
1669 return; 1711 return;
1670 1712
1671 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv; 1713 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
1672 1714
1673 tid = rs_tl_add_packet(lq_sta, hdr); 1715 tid = rs_tl_add_packet(lq_sta, hdr);
1674 1716
@@ -1686,7 +1728,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1686 is_green = lq_sta->is_green; 1728 is_green = lq_sta->is_green;
1687 1729
1688 /* current tx rate */ 1730 /* current tx rate */
1689 index = sta->last_txrate_idx; 1731 index = lq_sta->last_txrate_idx;
1690 1732
1691 IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index, 1733 IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index,
1692 tbl->lq_type); 1734 tbl->lq_type);
@@ -1747,19 +1789,13 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1747 rs_stay_in_table(lq_sta); 1789 rs_stay_in_table(lq_sta);
1748 1790
1749 goto out; 1791 goto out;
1792 }
1750 1793
1751 /* Else we have enough samples; calculate estimate of 1794 /* Else we have enough samples; calculate estimate of
1752 * actual average throughput */ 1795 * actual average throughput */
1753 } else { 1796
1754 /*FIXME:RS remove this else if we don't get this error*/ 1797 BUG_ON(window->average_tpt != ((window->success_ratio *
1755 if (window->average_tpt != ((window->success_ratio * 1798 tbl->expected_tpt[index] + 64) / 128));
1756 tbl->expected_tpt[index] + 64) / 128)) {
1757 IWL_ERROR("expected_tpt should have been calculated"
1758 " by now\n");
1759 window->average_tpt = ((window->success_ratio *
1760 tbl->expected_tpt[index] + 64) / 128);
1761 }
1762 }
1763 1799
1764 /* If we are searching for better modulation mode, check success. */ 1800 /* If we are searching for better modulation mode, check success. */
1765 if (lq_sta->search_better_tbl) { 1801 if (lq_sta->search_better_tbl) {
@@ -1769,7 +1805,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1769 * continuing to use the setup that we've been trying. */ 1805 * continuing to use the setup that we've been trying. */
1770 if (window->average_tpt > lq_sta->last_tpt) { 1806 if (window->average_tpt > lq_sta->last_tpt) {
1771 1807
1772 IWL_DEBUG_RATE("LQ: SWITCHING TO CURRENT TABLE " 1808 IWL_DEBUG_RATE("LQ: SWITCHING TO NEW TABLE "
1773 "suc=%d cur-tpt=%d old-tpt=%d\n", 1809 "suc=%d cur-tpt=%d old-tpt=%d\n",
1774 window->success_ratio, 1810 window->success_ratio,
1775 window->average_tpt, 1811 window->average_tpt,
@@ -2005,15 +2041,7 @@ lq_update:
2005out: 2041out:
2006 tbl->current_rate = rate_n_flags_from_tbl(tbl, index, is_green); 2042 tbl->current_rate = rate_n_flags_from_tbl(tbl, index, is_green);
2007 i = index; 2043 i = index;
2008 sta->last_txrate_idx = i; 2044 lq_sta->last_txrate_idx = i;
2009
2010 /* sta->txrate_idx is an index to A mode rates which start
2011 * at IWL_FIRST_OFDM_RATE
2012 */
2013 if (lq_sta->band == IEEE80211_BAND_5GHZ)
2014 sta->txrate_idx = i - IWL_FIRST_OFDM_RATE;
2015 else
2016 sta->txrate_idx = i;
2017 2045
2018 return; 2046 return;
2019} 2047}
@@ -2021,9 +2049,9 @@ out:
2021 2049
2022static void rs_initialize_lq(struct iwl_priv *priv, 2050static void rs_initialize_lq(struct iwl_priv *priv,
2023 struct ieee80211_conf *conf, 2051 struct ieee80211_conf *conf,
2024 struct sta_info *sta) 2052 struct ieee80211_sta *sta,
2053 struct iwl_lq_sta *lq_sta)
2025{ 2054{
2026 struct iwl_lq_sta *lq_sta;
2027 struct iwl_scale_tbl_info *tbl; 2055 struct iwl_scale_tbl_info *tbl;
2028 int rate_idx; 2056 int rate_idx;
2029 int i; 2057 int i;
@@ -2032,14 +2060,13 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2032 u8 active_tbl = 0; 2060 u8 active_tbl = 0;
2033 u8 valid_tx_ant; 2061 u8 valid_tx_ant;
2034 2062
2035 if (!sta || !sta->rate_ctrl_priv) 2063 if (!sta || !lq_sta)
2036 goto out; 2064 goto out;
2037 2065
2038 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv; 2066 i = lq_sta->last_txrate_idx;
2039 i = sta->last_txrate_idx;
2040 2067
2041 if ((lq_sta->lq.sta_id == 0xff) && 2068 if ((lq_sta->lq.sta_id == 0xff) &&
2042 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) 2069 (priv->iw_mode == NL80211_IFTYPE_ADHOC))
2043 goto out; 2070 goto out;
2044 2071
2045 valid_tx_ant = priv->hw_params.valid_tx_ant; 2072 valid_tx_ant = priv->hw_params.valid_tx_ant;
@@ -2076,40 +2103,33 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2076 return; 2103 return;
2077} 2104}
2078 2105
2079static void rs_get_rate(void *priv_rate, struct net_device *dev, 2106static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
2080 struct ieee80211_supported_band *sband, 2107 struct ieee80211_sta *sta, void *priv_sta,
2081 struct sk_buff *skb, 2108 struct sk_buff *skb, struct rate_selection *sel)
2082 struct rate_selection *sel)
2083{ 2109{
2084 2110
2085 int i; 2111 int i;
2086 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2112 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
2087 struct ieee80211_conf *conf = &local->hw.conf; 2113 struct ieee80211_conf *conf = &priv->hw->conf;
2088 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2114 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2089 struct sta_info *sta;
2090 __le16 fc; 2115 __le16 fc;
2091 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
2092 struct iwl_lq_sta *lq_sta; 2116 struct iwl_lq_sta *lq_sta;
2093 2117
2094 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2118 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
2095 2119
2096 rcu_read_lock();
2097
2098 sta = sta_info_get(local, hdr->addr1);
2099
2100 /* Send management frames and broadcast/multicast data using lowest 2120 /* Send management frames and broadcast/multicast data using lowest
2101 * rate. */ 2121 * rate. */
2102 fc = hdr->frame_control; 2122 fc = hdr->frame_control;
2103 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) || 2123 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) ||
2104 !sta || !sta->rate_ctrl_priv) { 2124 !sta || !priv_sta) {
2105 sel->rate_idx = rate_lowest_index(local, sband, sta); 2125 sel->rate_idx = rate_lowest_index(sband, sta);
2106 goto out; 2126 return;
2107 } 2127 }
2108 2128
2109 lq_sta = (struct iwl_lq_sta *)sta->rate_ctrl_priv; 2129 lq_sta = (struct iwl_lq_sta *)priv_sta;
2110 i = sta->last_txrate_idx; 2130 i = lq_sta->last_txrate_idx;
2111 2131
2112 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2132 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
2113 !lq_sta->ibss_sta_added) { 2133 !lq_sta->ibss_sta_added) {
2114 u8 sta_id = iwl_find_station(priv, hdr->addr1); 2134 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2115 DECLARE_MAC_BUF(mac); 2135 DECLARE_MAC_BUF(mac);
@@ -2124,23 +2144,22 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2124 lq_sta->lq.sta_id = sta_id; 2144 lq_sta->lq.sta_id = sta_id;
2125 lq_sta->lq.rs_table[0].rate_n_flags = 0; 2145 lq_sta->lq.rs_table[0].rate_n_flags = 0;
2126 lq_sta->ibss_sta_added = 1; 2146 lq_sta->ibss_sta_added = 1;
2127 rs_initialize_lq(priv, conf, sta); 2147 rs_initialize_lq(priv, conf, sta, lq_sta);
2128 } 2148 }
2129 } 2149 }
2130 2150
2131 if ((i < 0) || (i > IWL_RATE_COUNT)) { 2151 if ((i < 0) || (i > IWL_RATE_COUNT)) {
2132 sel->rate_idx = rate_lowest_index(local, sband, sta); 2152 sel->rate_idx = rate_lowest_index(sband, sta);
2133 goto out; 2153 return;
2134 } 2154 }
2135 2155
2136 if (sband->band == IEEE80211_BAND_5GHZ) 2156 if (sband->band == IEEE80211_BAND_5GHZ)
2137 i -= IWL_FIRST_OFDM_RATE; 2157 i -= IWL_FIRST_OFDM_RATE;
2138 sel->rate_idx = i; 2158 sel->rate_idx = i;
2139out:
2140 rcu_read_unlock();
2141} 2159}
2142 2160
2143static void *rs_alloc_sta(void *priv_rate, gfp_t gfp) 2161static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2162 gfp_t gfp)
2144{ 2163{
2145 struct iwl_lq_sta *lq_sta; 2164 struct iwl_lq_sta *lq_sta;
2146 struct iwl_priv *priv; 2165 struct iwl_priv *priv;
@@ -2163,33 +2182,28 @@ static void *rs_alloc_sta(void *priv_rate, gfp_t gfp)
2163 return lq_sta; 2182 return lq_sta;
2164} 2183}
2165 2184
2166static void rs_rate_init(void *priv_rate, void *priv_sta, 2185static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2167 struct ieee80211_local *local, 2186 struct ieee80211_sta *sta, void *priv_sta)
2168 struct sta_info *sta)
2169{ 2187{
2170 int i, j; 2188 int i, j;
2171 struct ieee80211_conf *conf = &local->hw.conf; 2189 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
2172 struct ieee80211_supported_band *sband; 2190 struct ieee80211_conf *conf = &priv->hw->conf;
2173 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
2174 struct iwl_lq_sta *lq_sta = priv_sta; 2191 struct iwl_lq_sta *lq_sta = priv_sta;
2175 2192
2176 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2177
2178 lq_sta->flush_timer = 0; 2193 lq_sta->flush_timer = 0;
2179 lq_sta->supp_rates = sta->supp_rates[sband->band]; 2194 lq_sta->supp_rates = sta->supp_rates[sband->band];
2180 sta->txrate_idx = 3;
2181 for (j = 0; j < LQ_SIZE; j++) 2195 for (j = 0; j < LQ_SIZE; j++)
2182 for (i = 0; i < IWL_RATE_COUNT; i++) 2196 for (i = 0; i < IWL_RATE_COUNT; i++)
2183 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); 2197 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2184 2198
2185 IWL_DEBUG_RATE("LQ: *** rate scale global init ***\n"); 2199 IWL_DEBUG_RATE("LQ: *** rate scale station global init ***\n");
2186 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2200 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2187 * the lowest or the highest rate.. Could consider using RSSI from 2201 * the lowest or the highest rate.. Could consider using RSSI from
2188 * previous packets? Need to have IEEE 802.1X auth succeed immediately 2202 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2189 * after assoc.. */ 2203 * after assoc.. */
2190 2204
2191 lq_sta->ibss_sta_added = 0; 2205 lq_sta->ibss_sta_added = 0;
2192 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2206 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2193 u8 sta_id = iwl_find_station(priv, sta->addr); 2207 u8 sta_id = iwl_find_station(priv, sta->addr);
2194 DECLARE_MAC_BUF(mac); 2208 DECLARE_MAC_BUF(mac);
2195 2209
@@ -2212,15 +2226,14 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2212 } 2226 }
2213 2227
2214 /* Find highest tx rate supported by hardware and destination station */ 2228 /* Find highest tx rate supported by hardware and destination station */
2229 lq_sta->last_txrate_idx = 3;
2215 for (i = 0; i < sband->n_bitrates; i++) 2230 for (i = 0; i < sband->n_bitrates; i++)
2216 if (sta->supp_rates[sband->band] & BIT(i)) 2231 if (sta->supp_rates[sband->band] & BIT(i))
2217 sta->txrate_idx = i; 2232 lq_sta->last_txrate_idx = i;
2218 2233
2219 sta->last_txrate_idx = sta->txrate_idx; 2234 /* For MODE_IEEE80211A, skip over cck rates in global rate table */
2220 /* WTF is with this bogus comment? A doesn't have cck rates */ 2235 if (sband->band == IEEE80211_BAND_5GHZ)
2221 /* For MODE_IEEE80211A, cck rates are at end of rate table */ 2236 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2222 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
2223 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2224 2237
2225 lq_sta->is_dup = 0; 2238 lq_sta->is_dup = 0;
2226 lq_sta->is_green = rs_use_green(priv, conf); 2239 lq_sta->is_green = rs_use_green(priv, conf);
@@ -2260,7 +2273,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2260 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; 2273 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2261 lq_sta->drv = priv; 2274 lq_sta->drv = priv;
2262 2275
2263 rs_initialize_lq(priv, conf, sta); 2276 rs_initialize_lq(priv, conf, sta, lq_sta);
2264} 2277}
2265 2278
2266static void rs_fill_link_cmd(const struct iwl_priv *priv, 2279static void rs_fill_link_cmd(const struct iwl_priv *priv,
@@ -2382,9 +2395,9 @@ static void rs_fill_link_cmd(const struct iwl_priv *priv,
2382 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000); 2395 lq_cmd->agg_params.agg_time_limit = cpu_to_le16(4000);
2383} 2396}
2384 2397
2385static void *rs_alloc(struct ieee80211_local *local) 2398static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2386{ 2399{
2387 return local->hw.priv; 2400 return hw->priv;
2388} 2401}
2389/* rate scale requires free function to be implemented */ 2402/* rate scale requires free function to be implemented */
2390static void rs_free(void *priv_rate) 2403static void rs_free(void *priv_rate)
@@ -2405,12 +2418,12 @@ static void rs_clear(void *priv_rate)
2405#endif /* CONFIG_IWLWIFI_DEBUG */ 2418#endif /* CONFIG_IWLWIFI_DEBUG */
2406} 2419}
2407 2420
2408static void rs_free_sta(void *priv_rate, void *priv_sta) 2421static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2422 void *priv_sta)
2409{ 2423{
2410 struct iwl_lq_sta *lq_sta = priv_sta; 2424 struct iwl_lq_sta *lq_sta = priv_sta;
2411 struct iwl_priv *priv; 2425 struct iwl_priv *priv = priv_r;
2412 2426
2413 priv = (struct iwl_priv *)priv_rate;
2414 IWL_DEBUG_RATE("enter\n"); 2427 IWL_DEBUG_RATE("enter\n");
2415 kfree(lq_sta); 2428 kfree(lq_sta);
2416 IWL_DEBUG_RATE("leave\n"); 2429 IWL_DEBUG_RATE("leave\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 84d4d1e33755..d148d73635eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -206,21 +206,28 @@ enum {
206#define IWL_RATE_DECREASE_TH 1920 /* 15% */ 206#define IWL_RATE_DECREASE_TH 1920 /* 15% */
207 207
208/* possible actions when in legacy mode */ 208/* possible actions when in legacy mode */
209#define IWL_LEGACY_SWITCH_ANTENNA 0 209#define IWL_LEGACY_SWITCH_ANTENNA1 0
210#define IWL_LEGACY_SWITCH_SISO 1 210#define IWL_LEGACY_SWITCH_ANTENNA2 1
211#define IWL_LEGACY_SWITCH_MIMO2 2 211#define IWL_LEGACY_SWITCH_SISO 2
212#define IWL_LEGACY_SWITCH_MIMO2_AB 3
213#define IWL_LEGACY_SWITCH_MIMO2_AC 4
214#define IWL_LEGACY_SWITCH_MIMO2_BC 5
212 215
213/* possible actions when in siso mode */ 216/* possible actions when in siso mode */
214#define IWL_SISO_SWITCH_ANTENNA 0 217#define IWL_SISO_SWITCH_ANTENNA1 0
215#define IWL_SISO_SWITCH_MIMO2 1 218#define IWL_SISO_SWITCH_ANTENNA2 1
216#define IWL_SISO_SWITCH_GI 2 219#define IWL_SISO_SWITCH_MIMO2_AB 2
220#define IWL_SISO_SWITCH_MIMO2_AC 3
221#define IWL_SISO_SWITCH_MIMO2_BC 4
222#define IWL_SISO_SWITCH_GI 5
217 223
218/* possible actions when in mimo mode */ 224/* possible actions when in mimo mode */
219#define IWL_MIMO_SWITCH_ANTENNA_A 0 225#define IWL_MIMO2_SWITCH_ANTENNA1 0
220#define IWL_MIMO_SWITCH_ANTENNA_B 1 226#define IWL_MIMO2_SWITCH_ANTENNA2 1
221#define IWL_MIMO_SWITCH_GI 2 227#define IWL_MIMO2_SWITCH_SISO_A 2
222 228#define IWL_MIMO2_SWITCH_SISO_B 3
223/*FIXME:RS:separate MIMO2/3 transitions*/ 229#define IWL_MIMO2_SWITCH_SISO_C 4
230#define IWL_MIMO2_SWITCH_GI 5
224 231
225/*FIXME:RS:add posible acctions for MIMO3*/ 232/*FIXME:RS:add posible acctions for MIMO3*/
226 233
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index e01f048a02dd..204abab76449 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -337,7 +337,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
337 /* If we have set the ASSOC_MSK and we are in BSS mode then 337 /* If we have set the ASSOC_MSK and we are in BSS mode then
338 * add the IWL_AP_ID to the station rate table */ 338 * add the IWL_AP_ID to the station rate table */
339 if (new_assoc) { 339 if (new_assoc) {
340 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { 340 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
341 ret = iwl_rxon_add_station(priv, 341 ret = iwl_rxon_add_station(priv,
342 priv->active_rxon.bssid_addr, 1); 342 priv->active_rxon.bssid_addr, 1);
343 if (ret == IWL_INVALID_STATION) { 343 if (ret == IWL_INVALID_STATION) {
@@ -448,8 +448,8 @@ static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
448 const u8 *dest, int left) 448 const u8 *dest, int left)
449{ 449{
450 if (!iwl_is_associated(priv) || !priv->ibss_beacon || 450 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
451 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && 451 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
452 (priv->iw_mode != IEEE80211_IF_TYPE_AP))) 452 (priv->iw_mode != NL80211_IFTYPE_AP)))
453 return 0; 453 return 0;
454 454
455 if (priv->ibss_beacon->len > left) 455 if (priv->ibss_beacon->len > left)
@@ -485,7 +485,7 @@ static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv)
485 return IWL_RATE_6M_PLCP; 485 return IWL_RATE_6M_PLCP;
486} 486}
487 487
488unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 488static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
489 struct iwl_frame *frame, u8 rate) 489 struct iwl_frame *frame, u8 rate)
490{ 490{
491 struct iwl_tx_beacon_cmd *tx_beacon_cmd; 491 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
@@ -564,8 +564,6 @@ static void iwl4965_ht_conf(struct iwl_priv *priv,
564 if (!iwl_conf->is_ht) 564 if (!iwl_conf->is_ht)
565 return; 565 return;
566 566
567 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
568
569 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20) 567 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
570 iwl_conf->sgf |= HT_SHORT_GI_20MHZ; 568 iwl_conf->sgf |= HT_SHORT_GI_20MHZ;
571 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40) 569 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
@@ -586,6 +584,8 @@ static void iwl4965_ht_conf(struct iwl_priv *priv,
586 iwl_conf->supported_chan_width = 0; 584 iwl_conf->supported_chan_width = 0;
587 } 585 }
588 586
587 iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2);
588
589 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16); 589 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
590 590
591 iwl_conf->control_channel = ht_bss_conf->primary_channel; 591 iwl_conf->control_channel = ht_bss_conf->primary_channel;
@@ -672,7 +672,7 @@ static void iwl4965_setup_rxon_timing(struct iwl_priv *priv)
672 beacon_int = priv->beacon_int; 672 beacon_int = priv->beacon_int;
673 spin_unlock_irqrestore(&priv->lock, flags); 673 spin_unlock_irqrestore(&priv->lock, flags);
674 674
675 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { 675 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
676 if (beacon_int == 0) { 676 if (beacon_int == 0) {
677 priv->rxon_timing.beacon_interval = cpu_to_le16(100); 677 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
678 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); 678 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
@@ -721,7 +721,7 @@ static void iwl_set_flags_for_band(struct iwl_priv *priv,
721 else 721 else
722 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 722 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
723 723
724 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 724 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
725 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 725 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
726 726
727 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 727 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -740,23 +740,23 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
740 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 740 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
741 741
742 switch (priv->iw_mode) { 742 switch (priv->iw_mode) {
743 case IEEE80211_IF_TYPE_AP: 743 case NL80211_IFTYPE_AP:
744 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; 744 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
745 break; 745 break;
746 746
747 case IEEE80211_IF_TYPE_STA: 747 case NL80211_IFTYPE_STATION:
748 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; 748 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
749 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 749 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
750 break; 750 break;
751 751
752 case IEEE80211_IF_TYPE_IBSS: 752 case NL80211_IFTYPE_ADHOC:
753 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; 753 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
754 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 754 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
755 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | 755 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
756 RXON_FILTER_ACCEPT_GRP_MSK; 756 RXON_FILTER_ACCEPT_GRP_MSK;
757 break; 757 break;
758 758
759 case IEEE80211_IF_TYPE_MNTR: 759 case NL80211_IFTYPE_MONITOR:
760 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; 760 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
761 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | 761 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
762 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 762 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
@@ -785,7 +785,7 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
785 * in some case A channels are all non IBSS 785 * in some case A channels are all non IBSS
786 * in this case force B/G channel 786 * in this case force B/G channel
787 */ 787 */
788 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 788 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
789 !(is_channel_ibss(ch_info))) 789 !(is_channel_ibss(ch_info)))
790 ch_info = &priv->channel_info[0]; 790 ch_info = &priv->channel_info[0];
791 791
@@ -1182,7 +1182,7 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
1182 le32_to_cpu(beacon->low_tsf), rate); 1182 le32_to_cpu(beacon->low_tsf), rate);
1183#endif 1183#endif
1184 1184
1185 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 1185 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
1186 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 1186 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
1187 queue_work(priv->workqueue, &priv->beacon_update); 1187 queue_work(priv->workqueue, &priv->beacon_update);
1188} 1188}
@@ -1270,7 +1270,7 @@ int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
1270 1270
1271 if (src == IWL_PWR_SRC_VAUX) { 1271 if (src == IWL_PWR_SRC_VAUX) {
1272 u32 val; 1272 u32 val;
1273 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE, 1273 ret = pci_read_config_dword(priv->pci_dev, PCI_CFG_POWER_SOURCE,
1274 &val); 1274 &val);
1275 1275
1276 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) 1276 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
@@ -2388,7 +2388,7 @@ static void iwl4965_bg_set_monitor(struct work_struct *work)
2388 2388
2389 mutex_lock(&priv->mutex); 2389 mutex_lock(&priv->mutex);
2390 2390
2391 ret = iwl4965_set_mode(priv, IEEE80211_IF_TYPE_MNTR); 2391 ret = iwl4965_set_mode(priv, NL80211_IFTYPE_MONITOR);
2392 2392
2393 if (ret) { 2393 if (ret) {
2394 if (ret == -EAGAIN) 2394 if (ret == -EAGAIN)
@@ -2469,7 +2469,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2469 DECLARE_MAC_BUF(mac); 2469 DECLARE_MAC_BUF(mac);
2470 unsigned long flags; 2470 unsigned long flags;
2471 2471
2472 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2472 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2473 IWL_ERROR("%s Should not be called in AP mode\n", __func__); 2473 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
2474 return; 2474 return;
2475 } 2475 }
@@ -2486,6 +2486,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2486 if (!priv->vif || !priv->is_open) 2486 if (!priv->vif || !priv->is_open)
2487 return; 2487 return;
2488 2488
2489 iwl_power_cancel_timeout(priv);
2489 iwl_scan_cancel_timeout(priv, 200); 2490 iwl_scan_cancel_timeout(priv, 200);
2490 2491
2491 conf = ieee80211_get_hw_conf(priv->hw); 2492 conf = ieee80211_get_hw_conf(priv->hw);
@@ -2503,8 +2504,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2503 2504
2504 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2505 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
2505 2506
2506 if (priv->current_ht_config.is_ht) 2507 iwl_set_rxon_ht(priv, &priv->current_ht_config);
2507 iwl_set_rxon_ht(priv, &priv->current_ht_config);
2508 2508
2509 iwl_set_rxon_chain(priv); 2509 iwl_set_rxon_chain(priv);
2510 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 2510 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
@@ -2523,7 +2523,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2523 else 2523 else
2524 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2524 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2525 2525
2526 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 2526 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2527 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2527 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2528 2528
2529 } 2529 }
@@ -2531,10 +2531,10 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2531 iwl4965_commit_rxon(priv); 2531 iwl4965_commit_rxon(priv);
2532 2532
2533 switch (priv->iw_mode) { 2533 switch (priv->iw_mode) {
2534 case IEEE80211_IF_TYPE_STA: 2534 case NL80211_IFTYPE_STATION:
2535 break; 2535 break;
2536 2536
2537 case IEEE80211_IF_TYPE_IBSS: 2537 case NL80211_IFTYPE_ADHOC:
2538 2538
2539 /* assume default assoc id */ 2539 /* assume default assoc id */
2540 priv->assoc_id = 1; 2540 priv->assoc_id = 1;
@@ -2550,20 +2550,23 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2550 break; 2550 break;
2551 } 2551 }
2552 2552
2553 /* Enable Rx differential gain and sensitivity calibrations */ 2553 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2554 iwl_chain_noise_reset(priv);
2555 priv->start_calib = 1;
2556
2557 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
2558 priv->assoc_station_added = 1; 2554 priv->assoc_station_added = 1;
2559 2555
2560 spin_lock_irqsave(&priv->lock, flags); 2556 spin_lock_irqsave(&priv->lock, flags);
2561 iwl_activate_qos(priv, 0); 2557 iwl_activate_qos(priv, 0);
2562 spin_unlock_irqrestore(&priv->lock, flags); 2558 spin_unlock_irqrestore(&priv->lock, flags);
2563 2559
2564 iwl_power_update_mode(priv, 0); 2560 /* the chain noise calibration will enabled PM upon completion
2565 /* we have just associated, don't start scan too early */ 2561 * If chain noise has already been run, then we need to enable
2566 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 2562 * power management here */
2563 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
2564 iwl_power_enable_management(priv);
2565
2566 /* Enable Rx differential gain and sensitivity calibrations */
2567 iwl_chain_noise_reset(priv);
2568 priv->start_calib = 1;
2569
2567} 2570}
2568 2571
2569static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf); 2572static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf);
@@ -2728,12 +2731,6 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2728 2731
2729 IWL_DEBUG_MACDUMP("enter\n"); 2732 IWL_DEBUG_MACDUMP("enter\n");
2730 2733
2731 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
2732 IWL_DEBUG_MAC80211("leave - monitor\n");
2733 dev_kfree_skb_any(skb);
2734 return 0;
2735 }
2736
2737 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2734 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2738 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2735 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2739 2736
@@ -2798,8 +2795,6 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2798 mutex_lock(&priv->mutex); 2795 mutex_lock(&priv->mutex);
2799 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 2796 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
2800 2797
2801 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
2802
2803 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { 2798 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) {
2804 IWL_DEBUG_MAC80211("leave - RF-KILL - waiting for uCode\n"); 2799 IWL_DEBUG_MAC80211("leave - RF-KILL - waiting for uCode\n");
2805 goto out; 2800 goto out;
@@ -2830,7 +2825,7 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2830 goto out; 2825 goto out;
2831 } 2826 }
2832 2827
2833 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS && 2828 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2834 !is_channel_ibss(ch_info)) { 2829 !is_channel_ibss(ch_info)) {
2835 IWL_ERROR("channel %d in band %d not IBSS channel\n", 2830 IWL_ERROR("channel %d in band %d not IBSS channel\n",
2836 conf->channel->hw_value, conf->channel->band); 2831 conf->channel->hw_value, conf->channel->band);
@@ -2851,7 +2846,7 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2851 ) 2846 )
2852 priv->staging_rxon.flags = 0; 2847 priv->staging_rxon.flags = 0;
2853 2848
2854 iwl_set_rxon_channel(priv, conf->channel->band, channel); 2849 iwl_set_rxon_channel(priv, conf->channel);
2855 2850
2856 iwl_set_flags_for_band(priv, conf->channel->band); 2851 iwl_set_flags_for_band(priv, conf->channel->band);
2857 2852
@@ -2880,6 +2875,13 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2880 goto out; 2875 goto out;
2881 } 2876 }
2882 2877
2878 if (conf->flags & IEEE80211_CONF_PS)
2879 ret = iwl_power_set_user_mode(priv, IWL_POWER_INDEX_3);
2880 else
2881 ret = iwl_power_set_user_mode(priv, IWL_POWER_MODE_CAM);
2882 if (ret)
2883 IWL_DEBUG_MAC80211("Error setting power level\n");
2884
2883 IWL_DEBUG_MAC80211("TX Power old=%d new=%d\n", 2885 IWL_DEBUG_MAC80211("TX Power old=%d new=%d\n",
2884 priv->tx_power_user_lmt, conf->power_level); 2886 priv->tx_power_user_lmt, conf->power_level);
2885 2887
@@ -2945,7 +2947,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2945 priv->staging_rxon.flags &= 2947 priv->staging_rxon.flags &=
2946 ~RXON_FLG_SHORT_SLOT_MSK; 2948 ~RXON_FLG_SHORT_SLOT_MSK;
2947 2949
2948 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 2950 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2949 priv->staging_rxon.flags &= 2951 priv->staging_rxon.flags &=
2950 ~RXON_FLG_SHORT_SLOT_MSK; 2952 ~RXON_FLG_SHORT_SLOT_MSK;
2951 } 2953 }
@@ -2984,7 +2986,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
2984 return 0; 2986 return 0;
2985 } 2987 }
2986 2988
2987 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS && 2989 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2988 conf->changed & IEEE80211_IFCC_BEACON) { 2990 conf->changed & IEEE80211_IFCC_BEACON) {
2989 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 2991 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2990 if (!beacon) 2992 if (!beacon)
@@ -2994,7 +2996,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
2994 return rc; 2996 return rc;
2995 } 2997 }
2996 2998
2997 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 2999 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
2998 (!conf->ssid_len)) { 3000 (!conf->ssid_len)) {
2999 IWL_DEBUG_MAC80211 3001 IWL_DEBUG_MAC80211
3000 ("Leaving in AP mode because HostAPD is not ready.\n"); 3002 ("Leaving in AP mode because HostAPD is not ready.\n");
@@ -3017,7 +3019,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
3017 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { 3019 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
3018 */ 3020 */
3019 3021
3020 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 3022 if (priv->iw_mode == NL80211_IFTYPE_AP) {
3021 if (!conf->bssid) { 3023 if (!conf->bssid) {
3022 conf->bssid = priv->mac_addr; 3024 conf->bssid = priv->mac_addr;
3023 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); 3025 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
@@ -3052,11 +3054,11 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
3052 * to verify) - jpk */ 3054 * to verify) - jpk */
3053 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 3055 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
3054 3056
3055 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 3057 if (priv->iw_mode == NL80211_IFTYPE_AP)
3056 iwl4965_config_ap(priv); 3058 iwl4965_config_ap(priv);
3057 else { 3059 else {
3058 rc = iwl4965_commit_rxon(priv); 3060 rc = iwl4965_commit_rxon(priv);
3059 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) 3061 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
3060 iwl_rxon_add_station( 3062 iwl_rxon_add_station(
3061 priv, priv->active_rxon.bssid_addr, 1); 3063 priv, priv->active_rxon.bssid_addr, 1);
3062 } 3064 }
@@ -3092,7 +3094,7 @@ static void iwl4965_configure_filter(struct ieee80211_hw *hw,
3092 3094
3093 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) { 3095 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) {
3094 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n", 3096 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
3095 IEEE80211_IF_TYPE_MNTR, 3097 NL80211_IFTYPE_MONITOR,
3096 changed_flags, *total_flags); 3098 changed_flags, *total_flags);
3097 /* queue work 'cuz mac80211 is holding a lock which 3099 /* queue work 'cuz mac80211 is holding a lock which
3098 * prevents us from issuing (synchronous) f/w cmds */ 3100 * prevents us from issuing (synchronous) f/w cmds */
@@ -3173,6 +3175,10 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
3173 priv->power_data.dtim_period = bss_conf->dtim_period; 3175 priv->power_data.dtim_period = bss_conf->dtim_period;
3174 priv->timestamp = bss_conf->timestamp; 3176 priv->timestamp = bss_conf->timestamp;
3175 priv->assoc_capability = bss_conf->assoc_capability; 3177 priv->assoc_capability = bss_conf->assoc_capability;
3178
3179 /* we have just associated, don't start scan too early
3180 * leave time for EAPOL exchange to complete
3181 */
3176 priv->next_scan_jiffies = jiffies + 3182 priv->next_scan_jiffies = jiffies +
3177 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC; 3183 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
3178 mutex_lock(&priv->mutex); 3184 mutex_lock(&priv->mutex);
@@ -3189,11 +3195,11 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
3189 3195
3190} 3196}
3191 3197
3192static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) 3198static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t ssid_len)
3193{ 3199{
3194 int rc = 0;
3195 unsigned long flags; 3200 unsigned long flags;
3196 struct iwl_priv *priv = hw->priv; 3201 struct iwl_priv *priv = hw->priv;
3202 int ret;
3197 3203
3198 IWL_DEBUG_MAC80211("enter\n"); 3204 IWL_DEBUG_MAC80211("enter\n");
3199 3205
@@ -3201,41 +3207,47 @@ static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
3201 spin_lock_irqsave(&priv->lock, flags); 3207 spin_lock_irqsave(&priv->lock, flags);
3202 3208
3203 if (!iwl_is_ready_rf(priv)) { 3209 if (!iwl_is_ready_rf(priv)) {
3204 rc = -EIO; 3210 ret = -EIO;
3205 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); 3211 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
3206 goto out_unlock; 3212 goto out_unlock;
3207 } 3213 }
3208 3214
3209 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ 3215 if (priv->iw_mode == NL80211_IFTYPE_AP) { /* APs don't scan */
3210 rc = -EIO; 3216 ret = -EIO;
3211 IWL_ERROR("ERROR: APs don't scan\n"); 3217 IWL_ERROR("ERROR: APs don't scan\n");
3212 goto out_unlock; 3218 goto out_unlock;
3213 } 3219 }
3214 3220
3215 /* we don't schedule scan within next_scan_jiffies period */ 3221 /* We don't schedule scan within next_scan_jiffies period.
3222 * Avoid scanning during possible EAPOL exchange, return
3223 * success immediately.
3224 */
3216 if (priv->next_scan_jiffies && 3225 if (priv->next_scan_jiffies &&
3217 time_after(priv->next_scan_jiffies, jiffies)) { 3226 time_after(priv->next_scan_jiffies, jiffies)) {
3218 rc = -EAGAIN; 3227 IWL_DEBUG_SCAN("scan rejected: within next scan period\n");
3228 queue_work(priv->workqueue, &priv->scan_completed);
3229 ret = 0;
3219 goto out_unlock; 3230 goto out_unlock;
3220 } 3231 }
3232
3221 /* if we just finished scan ask for delay */ 3233 /* if we just finished scan ask for delay */
3222 if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies + 3234 if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
3223 IWL_DELAY_NEXT_SCAN, jiffies)) { 3235 time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
3224 rc = -EAGAIN; 3236 IWL_DEBUG_SCAN("scan rejected: within previous scan period\n");
3237 queue_work(priv->workqueue, &priv->scan_completed);
3238 ret = 0;
3225 goto out_unlock; 3239 goto out_unlock;
3226 } 3240 }
3227 if (len) {
3228 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
3229 iwl_escape_essid(ssid, len), (int)len);
3230 3241
3242 if (ssid_len) {
3231 priv->one_direct_scan = 1; 3243 priv->one_direct_scan = 1;
3232 priv->direct_ssid_len = (u8) 3244 priv->direct_ssid_len = min_t(u8, ssid_len, IW_ESSID_MAX_SIZE);
3233 min((u8) len, (u8) IW_ESSID_MAX_SIZE);
3234 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); 3245 memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len);
3235 } else 3246 } else {
3236 priv->one_direct_scan = 0; 3247 priv->one_direct_scan = 0;
3248 }
3237 3249
3238 rc = iwl_scan_initiate(priv); 3250 ret = iwl_scan_initiate(priv);
3239 3251
3240 IWL_DEBUG_MAC80211("leave\n"); 3252 IWL_DEBUG_MAC80211("leave\n");
3241 3253
@@ -3243,7 +3255,7 @@ out_unlock:
3243 spin_unlock_irqrestore(&priv->lock, flags); 3255 spin_unlock_irqrestore(&priv->lock, flags);
3244 mutex_unlock(&priv->mutex); 3256 mutex_unlock(&priv->mutex);
3245 3257
3246 return rc; 3258 return ret;
3247} 3259}
3248 3260
3249static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, 3261static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -3332,7 +3344,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3332 * in 1X mode. 3344 * in 1X mode.
3333 * In legacy wep mode, we use another host command to the uCode */ 3345 * In legacy wep mode, we use another host command to the uCode */
3334 if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id && 3346 if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id &&
3335 priv->iw_mode != IEEE80211_IF_TYPE_AP) { 3347 priv->iw_mode != NL80211_IFTYPE_AP) {
3336 if (cmd == SET_KEY) 3348 if (cmd == SET_KEY)
3337 is_default_wep_key = !priv->key_mapping_key; 3349 is_default_wep_key = !priv->key_mapping_key;
3338 else 3350 else
@@ -3403,7 +3415,7 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3403 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; 3415 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
3404 priv->qos_data.qos_active = 1; 3416 priv->qos_data.qos_active = 1;
3405 3417
3406 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 3418 if (priv->iw_mode == NL80211_IFTYPE_AP)
3407 iwl_activate_qos(priv, 1); 3419 iwl_activate_qos(priv, 1);
3408 else if (priv->assoc_id && iwl_is_associated(priv)) 3420 else if (priv->assoc_id && iwl_is_associated(priv))
3409 iwl_activate_qos(priv, 0); 3421 iwl_activate_qos(priv, 0);
@@ -3416,13 +3428,13 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3416 3428
3417static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 3429static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3418 enum ieee80211_ampdu_mlme_action action, 3430 enum ieee80211_ampdu_mlme_action action,
3419 const u8 *addr, u16 tid, u16 *ssn) 3431 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3420{ 3432{
3421 struct iwl_priv *priv = hw->priv; 3433 struct iwl_priv *priv = hw->priv;
3422 DECLARE_MAC_BUF(mac); 3434 DECLARE_MAC_BUF(mac);
3423 3435
3424 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n", 3436 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
3425 print_mac(mac, addr), tid); 3437 print_mac(mac, sta->addr), tid);
3426 3438
3427 if (!(priv->cfg->sku & IWL_SKU_N)) 3439 if (!(priv->cfg->sku & IWL_SKU_N))
3428 return -EACCES; 3440 return -EACCES;
@@ -3430,16 +3442,16 @@ static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3430 switch (action) { 3442 switch (action) {
3431 case IEEE80211_AMPDU_RX_START: 3443 case IEEE80211_AMPDU_RX_START:
3432 IWL_DEBUG_HT("start Rx\n"); 3444 IWL_DEBUG_HT("start Rx\n");
3433 return iwl_rx_agg_start(priv, addr, tid, *ssn); 3445 return iwl_rx_agg_start(priv, sta->addr, tid, *ssn);
3434 case IEEE80211_AMPDU_RX_STOP: 3446 case IEEE80211_AMPDU_RX_STOP:
3435 IWL_DEBUG_HT("stop Rx\n"); 3447 IWL_DEBUG_HT("stop Rx\n");
3436 return iwl_rx_agg_stop(priv, addr, tid); 3448 return iwl_rx_agg_stop(priv, sta->addr, tid);
3437 case IEEE80211_AMPDU_TX_START: 3449 case IEEE80211_AMPDU_TX_START:
3438 IWL_DEBUG_HT("start Tx\n"); 3450 IWL_DEBUG_HT("start Tx\n");
3439 return iwl_tx_agg_start(priv, addr, tid, ssn); 3451 return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
3440 case IEEE80211_AMPDU_TX_STOP: 3452 case IEEE80211_AMPDU_TX_STOP:
3441 IWL_DEBUG_HT("stop Tx\n"); 3453 IWL_DEBUG_HT("stop Tx\n");
3442 return iwl_tx_agg_stop(priv, addr, tid); 3454 return iwl_tx_agg_stop(priv, sta->addr, tid);
3443 default: 3455 default:
3444 IWL_DEBUG_HT("unknown\n"); 3456 IWL_DEBUG_HT("unknown\n");
3445 return -EINVAL; 3457 return -EINVAL;
@@ -3521,7 +3533,7 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
3521 3533
3522 priv->beacon_int = priv->hw->conf.beacon_int; 3534 priv->beacon_int = priv->hw->conf.beacon_int;
3523 priv->timestamp = 0; 3535 priv->timestamp = 0;
3524 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) 3536 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
3525 priv->beacon_int = 0; 3537 priv->beacon_int = 0;
3526 3538
3527 spin_unlock_irqrestore(&priv->lock, flags); 3539 spin_unlock_irqrestore(&priv->lock, flags);
@@ -3535,7 +3547,7 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
3535 /* we are restarting association process 3547 /* we are restarting association process
3536 * clear RXON_FILTER_ASSOC_MSK bit 3548 * clear RXON_FILTER_ASSOC_MSK bit
3537 */ 3549 */
3538 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 3550 if (priv->iw_mode != NL80211_IFTYPE_AP) {
3539 iwl_scan_cancel_timeout(priv, 100); 3551 iwl_scan_cancel_timeout(priv, 100);
3540 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3552 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3541 iwl4965_commit_rxon(priv); 3553 iwl4965_commit_rxon(priv);
@@ -3544,7 +3556,17 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
3544 iwl_power_update_mode(priv, 0); 3556 iwl_power_update_mode(priv, 0);
3545 3557
3546 /* Per mac80211.h: This is only used in IBSS mode... */ 3558 /* Per mac80211.h: This is only used in IBSS mode... */
3547 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 3559 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
3560
3561 /* switch to CAM during association period.
3562 * the ucode will block any association/authentication
3563 * frome during assiciation period if it can not hear
3564 * the AP because of PM. the timer enable PM back is
3565 * association do not complete
3566 */
3567 if (priv->hw->conf.channel->flags & (IEEE80211_CHAN_PASSIVE_SCAN |
3568 IEEE80211_CHAN_RADAR))
3569 iwl_power_disable_management(priv, 3000);
3548 3570
3549 IWL_DEBUG_MAC80211("leave - not in IBSS\n"); 3571 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
3550 mutex_unlock(&priv->mutex); 3572 mutex_unlock(&priv->mutex);
@@ -3573,7 +3595,7 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
3573 return -EIO; 3595 return -EIO;
3574 } 3596 }
3575 3597
3576 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 3598 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
3577 IWL_DEBUG_MAC80211("leave - not IBSS\n"); 3599 IWL_DEBUG_MAC80211("leave - not IBSS\n");
3578 mutex_unlock(&priv->mutex); 3600 mutex_unlock(&priv->mutex);
3579 return -EIO; 3601 return -EIO;
@@ -3630,11 +3652,11 @@ static ssize_t store_debug_level(struct device *d,
3630 const char *buf, size_t count) 3652 const char *buf, size_t count)
3631{ 3653{
3632 struct iwl_priv *priv = d->driver_data; 3654 struct iwl_priv *priv = d->driver_data;
3633 char *p = (char *)buf; 3655 unsigned long val;
3634 u32 val; 3656 int ret;
3635 3657
3636 val = simple_strtoul(p, &p, 0); 3658 ret = strict_strtoul(buf, 0, &val);
3637 if (p == buf) 3659 if (ret)
3638 printk(KERN_INFO DRV_NAME 3660 printk(KERN_INFO DRV_NAME
3639 ": %s is not in hex or decimal form.\n", buf); 3661 ": %s is not in hex or decimal form.\n", buf);
3640 else 3662 else
@@ -3706,11 +3728,11 @@ static ssize_t store_tx_power(struct device *d,
3706 const char *buf, size_t count) 3728 const char *buf, size_t count)
3707{ 3729{
3708 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; 3730 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
3709 char *p = (char *)buf; 3731 unsigned long val;
3710 u32 val; 3732 int ret;
3711 3733
3712 val = simple_strtoul(p, &p, 10); 3734 ret = strict_strtoul(buf, 10, &val);
3713 if (p == buf) 3735 if (ret)
3714 printk(KERN_INFO DRV_NAME 3736 printk(KERN_INFO DRV_NAME
3715 ": %s is not in decimal form.\n", buf); 3737 ": %s is not in decimal form.\n", buf);
3716 else 3738 else
@@ -3734,7 +3756,12 @@ static ssize_t store_flags(struct device *d,
3734 const char *buf, size_t count) 3756 const char *buf, size_t count)
3735{ 3757{
3736 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; 3758 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
3737 u32 flags = simple_strtoul(buf, NULL, 0); 3759 unsigned long val;
3760 u32 flags;
3761 int ret = strict_strtoul(buf, 0, &val);
3762 if (ret)
3763 return ret;
3764 flags = (u32)val;
3738 3765
3739 mutex_lock(&priv->mutex); 3766 mutex_lock(&priv->mutex);
3740 if (le32_to_cpu(priv->staging_rxon.flags) != flags) { 3767 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
@@ -3742,8 +3769,7 @@ static ssize_t store_flags(struct device *d,
3742 if (iwl_scan_cancel_timeout(priv, 100)) 3769 if (iwl_scan_cancel_timeout(priv, 100))
3743 IWL_WARNING("Could not cancel scan.\n"); 3770 IWL_WARNING("Could not cancel scan.\n");
3744 else { 3771 else {
3745 IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", 3772 IWL_DEBUG_INFO("Commit rxon.flags = 0x%04X\n", flags);
3746 flags);
3747 priv->staging_rxon.flags = cpu_to_le32(flags); 3773 priv->staging_rxon.flags = cpu_to_le32(flags);
3748 iwl4965_commit_rxon(priv); 3774 iwl4965_commit_rxon(priv);
3749 } 3775 }
@@ -3769,7 +3795,12 @@ static ssize_t store_filter_flags(struct device *d,
3769 const char *buf, size_t count) 3795 const char *buf, size_t count)
3770{ 3796{
3771 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; 3797 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
3772 u32 filter_flags = simple_strtoul(buf, NULL, 0); 3798 unsigned long val;
3799 u32 filter_flags;
3800 int ret = strict_strtoul(buf, 0, &val);
3801 if (ret)
3802 return ret;
3803 filter_flags = (u32)val;
3773 3804
3774 mutex_lock(&priv->mutex); 3805 mutex_lock(&priv->mutex);
3775 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { 3806 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
@@ -3870,10 +3901,12 @@ static ssize_t store_retry_rate(struct device *d,
3870 const char *buf, size_t count) 3901 const char *buf, size_t count)
3871{ 3902{
3872 struct iwl_priv *priv = dev_get_drvdata(d); 3903 struct iwl_priv *priv = dev_get_drvdata(d);
3904 long val;
3905 int ret = strict_strtol(buf, 10, &val);
3906 if (!ret)
3907 return ret;
3873 3908
3874 priv->retry_rate = simple_strtoul(buf, NULL, 0); 3909 priv->retry_rate = (val > 0) ? val : 1;
3875 if (priv->retry_rate <= 0)
3876 priv->retry_rate = 1;
3877 3910
3878 return count; 3911 return count;
3879} 3912}
@@ -3894,9 +3927,9 @@ static ssize_t store_power_level(struct device *d,
3894{ 3927{
3895 struct iwl_priv *priv = dev_get_drvdata(d); 3928 struct iwl_priv *priv = dev_get_drvdata(d);
3896 int ret; 3929 int ret;
3897 int mode; 3930 unsigned long mode;
3931
3898 3932
3899 mode = simple_strtoul(buf, NULL, 0);
3900 mutex_lock(&priv->mutex); 3933 mutex_lock(&priv->mutex);
3901 3934
3902 if (!iwl_is_ready(priv)) { 3935 if (!iwl_is_ready(priv)) {
@@ -3904,6 +3937,10 @@ static ssize_t store_power_level(struct device *d,
3904 goto out; 3937 goto out;
3905 } 3938 }
3906 3939
3940 ret = strict_strtoul(buf, 10, &mode);
3941 if (ret)
3942 goto out;
3943
3907 ret = iwl_power_set_user_mode(priv, mode); 3944 ret = iwl_power_set_user_mode(priv, mode);
3908 if (ret) { 3945 if (ret) {
3909 IWL_DEBUG_MAC80211("failed setting power mode.\n"); 3946 IWL_DEBUG_MAC80211("failed setting power mode.\n");
@@ -4083,6 +4120,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
4083 /* FIXME : remove when resolved PENDING */ 4120 /* FIXME : remove when resolved PENDING */
4084 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 4121 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
4085 iwl_setup_scan_deferred_work(priv); 4122 iwl_setup_scan_deferred_work(priv);
4123 iwl_setup_power_deferred_work(priv);
4086 4124
4087 if (priv->cfg->ops->lib->setup_deferred_work) 4125 if (priv->cfg->ops->lib->setup_deferred_work)
4088 priv->cfg->ops->lib->setup_deferred_work(priv); 4126 priv->cfg->ops->lib->setup_deferred_work(priv);
@@ -4102,6 +4140,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
4102 4140
4103 cancel_delayed_work_sync(&priv->init_alive_start); 4141 cancel_delayed_work_sync(&priv->init_alive_start);
4104 cancel_delayed_work(&priv->scan_check); 4142 cancel_delayed_work(&priv->scan_check);
4143 cancel_delayed_work_sync(&priv->set_power_save);
4105 cancel_delayed_work(&priv->alive_start); 4144 cancel_delayed_work(&priv->alive_start);
4106 cancel_work_sync(&priv->beacon_update); 4145 cancel_work_sync(&priv->beacon_update);
4107 del_timer_sync(&priv->statistics_periodic); 4146 del_timer_sync(&priv->statistics_periodic);
@@ -4150,7 +4189,7 @@ static struct ieee80211_ops iwl4965_hw_ops = {
4150 .reset_tsf = iwl4965_mac_reset_tsf, 4189 .reset_tsf = iwl4965_mac_reset_tsf,
4151 .bss_info_changed = iwl4965_bss_info_changed, 4190 .bss_info_changed = iwl4965_bss_info_changed,
4152 .ampdu_action = iwl4965_mac_ampdu_action, 4191 .ampdu_action = iwl4965_mac_ampdu_action,
4153 .hw_scan = iwl4965_mac_hw_scan 4192 .hw_scan = iwl_mac_hw_scan
4154}; 4193};
4155 4194
4156static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 4195static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -4204,13 +4243,13 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4204 4243
4205 pci_set_master(pdev); 4244 pci_set_master(pdev);
4206 4245
4207 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 4246 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
4208 if (!err) 4247 if (!err)
4209 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 4248 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
4210 if (err) { 4249 if (err) {
4211 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 4250 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4212 if (!err) 4251 if (!err)
4213 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 4252 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4214 /* both attempts failed: */ 4253 /* both attempts failed: */
4215 if (err) { 4254 if (err) {
4216 printk(KERN_WARNING "%s: No suitable DMA available.\n", 4255 printk(KERN_WARNING "%s: No suitable DMA available.\n",
@@ -4225,9 +4264,6 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4225 4264
4226 pci_set_drvdata(pdev, priv); 4265 pci_set_drvdata(pdev, priv);
4227 4266
4228 /* We disable the RETRY_TIMEOUT register (0x41) to keep
4229 * PCI Tx retries from interfering with C3 CPU state */
4230 pci_write_config_byte(pdev, 0x41, 0x00);
4231 4267
4232 /*********************** 4268 /***********************
4233 * 3. Read REV register 4269 * 3. Read REV register
@@ -4247,6 +4283,10 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4247 ": Detected Intel Wireless WiFi Link %s REV=0x%X\n", 4283 ": Detected Intel Wireless WiFi Link %s REV=0x%X\n",
4248 priv->cfg->name, priv->hw_rev); 4284 priv->cfg->name, priv->hw_rev);
4249 4285
4286 /* We disable the RETRY_TIMEOUT register (0x41) to keep
4287 * PCI Tx retries from interfering with C3 CPU state */
4288 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
4289
4250 /* amp init */ 4290 /* amp init */
4251 err = priv->cfg->ops->lib->apm_ops.init(priv); 4291 err = priv->cfg->ops->lib->apm_ops.init(priv);
4252 if (err < 0) { 4292 if (err < 0) {
@@ -4481,7 +4521,10 @@ static struct pci_device_id iwl_hw_card_ids[] = {
4481 {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)}, 4521 {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)},
4482 {IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)}, 4522 {IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)},
4483 {IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)}, 4523 {IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)},
4484 {IWL_PCI_DEVICE(0x423A, PCI_ANY_ID, iwl5350_agn_cfg)}, 4524/* 5350 WiFi/WiMax */
4525 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)},
4526 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)},
4527 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)},
4485#endif /* CONFIG_IWL5000 */ 4528#endif /* CONFIG_IWL5000 */
4486 {0} 4529 {0}
4487}; 4530};
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index ef49440bd7f6..72fbf47229db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -66,6 +66,66 @@
66#include "iwl-core.h" 66#include "iwl-core.h"
67#include "iwl-calib.h" 67#include "iwl-calib.h"
68 68
69/*****************************************************************************
70 * INIT calibrations framework
71 *****************************************************************************/
72
73 int iwl_send_calib_results(struct iwl_priv *priv)
74{
75 int ret = 0;
76 int i = 0;
77
78 struct iwl_host_cmd hcmd = {
79 .id = REPLY_PHY_CALIBRATION_CMD,
80 .meta.flags = CMD_SIZE_HUGE,
81 };
82
83 for (i = 0; i < IWL_CALIB_MAX; i++)
84 if (priv->calib_results[i].buf) {
85 hcmd.len = priv->calib_results[i].buf_len;
86 hcmd.data = priv->calib_results[i].buf;
87 ret = iwl_send_cmd_sync(priv, &hcmd);
88 if (ret)
89 goto err;
90 }
91
92 return 0;
93err:
94 IWL_ERROR("Error %d iteration %d\n", ret, i);
95 return ret;
96}
97EXPORT_SYMBOL(iwl_send_calib_results);
98
99int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
100{
101 if (res->buf_len != len) {
102 kfree(res->buf);
103 res->buf = kzalloc(len, GFP_ATOMIC);
104 }
105 if (unlikely(res->buf == NULL))
106 return -ENOMEM;
107
108 res->buf_len = len;
109 memcpy(res->buf, buf, len);
110 return 0;
111}
112EXPORT_SYMBOL(iwl_calib_set);
113
114void iwl_calib_free_results(struct iwl_priv *priv)
115{
116 int i;
117
118 for (i = 0; i < IWL_CALIB_MAX; i++) {
119 kfree(priv->calib_results[i].buf);
120 priv->calib_results[i].buf = NULL;
121 priv->calib_results[i].buf_len = 0;
122 }
123}
124
125/*****************************************************************************
126 * RUNTIME calibrations framework
127 *****************************************************************************/
128
69/* "false alarms" are signals that our DSP tries to lock onto, 129/* "false alarms" are signals that our DSP tries to lock onto,
70 * but then determines that they are either noise, or transmissions 130 * but then determines that they are either noise, or transmissions
71 * from a distant wireless network (also "noise", really) that get 131 * from a distant wireless network (also "noise", really) that get
@@ -748,13 +808,11 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
748 } 808 }
749 } 809 }
750 810
811 /* Save for use within RXON, TX, SCAN commands, etc. */
812 priv->chain_noise_data.active_chains = active_chains;
751 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n", 813 IWL_DEBUG_CALIB("active_chains (bitwise) = 0x%x\n",
752 active_chains); 814 active_chains);
753 815
754 /* Save for use within RXON, TX, SCAN commands, etc. */
755 /*priv->valid_antenna = active_chains;*/
756 /*FIXME: should be reflected in RX chains in RXON */
757
758 /* Analyze noise for rx balance */ 816 /* Analyze noise for rx balance */
759 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS); 817 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS);
760 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS); 818 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS);
@@ -779,6 +837,15 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
779 837
780 priv->cfg->ops->utils->gain_computation(priv, average_noise, 838 priv->cfg->ops->utils->gain_computation(priv, average_noise,
781 min_average_noise_antenna_i, min_average_noise); 839 min_average_noise_antenna_i, min_average_noise);
840
841 /* Some power changes may have been made during the calibration.
842 * Update and commit the RXON
843 */
844 if (priv->cfg->ops->lib->update_chain_flags)
845 priv->cfg->ops->lib->update_chain_flags(priv);
846
847 data->state = IWL_CHAIN_NOISE_DONE;
848 iwl_power_enable_management(priv);
782} 849}
783EXPORT_SYMBOL(iwl_chain_noise_calibration); 850EXPORT_SYMBOL(iwl_chain_noise_calibration);
784 851
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 28b5b09996ed..8d04e966ad48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -163,6 +163,13 @@ enum {
163/* iwl_cmd_header flags value */ 163/* iwl_cmd_header flags value */
164#define IWL_CMD_FAILED_MSK 0x40 164#define IWL_CMD_FAILED_MSK 0x40
165 165
166#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
167#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
168#define SEQ_TO_INDEX(s) ((s) & 0xff)
169#define INDEX_TO_SEQ(i) ((i) & 0xff)
170#define SEQ_HUGE_FRAME __constant_cpu_to_le16(0x4000)
171#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000)
172
166/** 173/**
167 * struct iwl_cmd_header 174 * struct iwl_cmd_header
168 * 175 *
@@ -171,7 +178,7 @@ enum {
171 */ 178 */
172struct iwl_cmd_header { 179struct iwl_cmd_header {
173 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 180 u8 cmd; /* Command ID: REPLY_RXON, etc. */
174 u8 flags; /* IWL_CMD_* */ 181 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
175 /* 182 /*
176 * The driver sets up the sequence number to values of its chosing. 183 * The driver sets up the sequence number to values of its chosing.
177 * uCode does not use this value, but passes it back to the driver 184 * uCode does not use this value, but passes it back to the driver
@@ -187,11 +194,12 @@ struct iwl_cmd_header {
187 * 194 *
188 * The Linux driver uses the following format: 195 * The Linux driver uses the following format:
189 * 196 *
190 * 0:7 index/position within Tx queue 197 * 0:7 tfd index - position within TX queue
191 * 8:13 Tx queue selection 198 * 8:12 TX queue id
192 * 14:14 driver sets this to indicate command is in the 'huge' 199 * 13 reserved
193 * storage at the end of the command buffers, i.e. scan cmd 200 * 14 huge - driver sets this to indicate command is in the
194 * 15:15 uCode sets this in uCode-originated response/notification 201 * 'huge' storage at the end of the command buffers
202 * 15 unsolicited RX or uCode-originated notification
195 */ 203 */
196 __le16 sequence; 204 __le16 sequence;
197 205
@@ -2026,8 +2034,8 @@ struct iwl4965_spectrum_notification {
2026 * bit 2 - '0' PM have to walk up every DTIM 2034 * bit 2 - '0' PM have to walk up every DTIM
2027 * '1' PM could sleep over DTIM till listen Interval. 2035 * '1' PM could sleep over DTIM till listen Interval.
2028 * PCI power managed 2036 * PCI power managed
2029 * bit 3 - '0' (PCI_LINK_CTRL & 0x1) 2037 * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
2030 * '1' !(PCI_LINK_CTRL & 0x1) 2038 * '1' !(PCI_CFG_LINK_CTRL & 0x1)
2031 * Force sleep Modes 2039 * Force sleep Modes
2032 * bit 31/30- '00' use both mac/xtal sleeps 2040 * bit 31/30- '00' use both mac/xtal sleeps
2033 * '01' force Mac sleep 2041 * '01' force Mac sleep
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 80f2f84defa8..4c312c55f90c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -306,14 +306,14 @@ void iwl_reset_qos(struct iwl_priv *priv)
306 spin_lock_irqsave(&priv->lock, flags); 306 spin_lock_irqsave(&priv->lock, flags);
307 priv->qos_data.qos_active = 0; 307 priv->qos_data.qos_active = 0;
308 308
309 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { 309 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) {
310 if (priv->qos_data.qos_enable) 310 if (priv->qos_data.qos_enable)
311 priv->qos_data.qos_active = 1; 311 priv->qos_data.qos_active = 1;
312 if (!(priv->active_rate & 0xfff0)) { 312 if (!(priv->active_rate & 0xfff0)) {
313 cw_min = 31; 313 cw_min = 31;
314 is_legacy = 1; 314 is_legacy = 1;
315 } 315 }
316 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 316 } else if (priv->iw_mode == NL80211_IFTYPE_AP) {
317 if (priv->qos_data.qos_enable) 317 if (priv->qos_data.qos_enable)
318 priv->qos_data.qos_active = 1; 318 priv->qos_data.qos_active = 1;
319 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { 319 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
@@ -399,8 +399,8 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
399 399
400 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD; 400 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD;
401 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20; 401 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
402 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS & 402 ht_info->cap |= (u16)(IEEE80211_HT_CAP_SM_PS &
403 (IWL_MIMO_PS_NONE << 2)); 403 (WLAN_HT_CAP_SM_PS_DISABLED << 2));
404 404
405 max_bit_rate = MAX_BIT_RATE_20_MHZ; 405 max_bit_rate = MAX_BIT_RATE_20_MHZ;
406 if (priv->hw_params.fat_channel & BIT(band)) { 406 if (priv->hw_params.fat_channel & BIT(band)) {
@@ -646,8 +646,14 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
646 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 646 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
647 u32 val; 647 u32 val;
648 648
649 if (!ht_info->is_ht) 649 if (!ht_info->is_ht) {
650 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
651 RXON_FLG_CHANNEL_MODE_PURE_40_MSK |
652 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
653 RXON_FLG_FAT_PROT_MSK |
654 RXON_FLG_HT_PROT_MSK);
650 return; 655 return;
656 }
651 657
652 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */ 658 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
653 if (iwl_is_fat_tx_allowed(priv, NULL)) 659 if (iwl_is_fat_tx_allowed(priv, NULL))
@@ -697,8 +703,12 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
697} 703}
698EXPORT_SYMBOL(iwl_set_rxon_ht); 704EXPORT_SYMBOL(iwl_set_rxon_ht);
699 705
700/* 706#define IWL_NUM_RX_CHAINS_MULTIPLE 3
701 * Determine how many receiver/antenna chains to use. 707#define IWL_NUM_RX_CHAINS_SINGLE 2
708#define IWL_NUM_IDLE_CHAINS_DUAL 2
709#define IWL_NUM_IDLE_CHAINS_SINGLE 1
710
711/* Determine how many receiver/antenna chains to use.
702 * More provides better reception via diversity. Fewer saves power. 712 * More provides better reception via diversity. Fewer saves power.
703 * MIMO (dual stream) requires at least 2, but works better with 3. 713 * MIMO (dual stream) requires at least 2, but works better with 3.
704 * This does not determine *which* chains to use, just how many. 714 * This does not determine *which* chains to use, just how many.
@@ -709,10 +719,11 @@ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
709 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 719 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
710 720
711 /* # of Rx chains to use when expecting MIMO. */ 721 /* # of Rx chains to use when expecting MIMO. */
712 if (is_single || (!is_cam && (priv->ps_mode == IWL_MIMO_PS_STATIC))) 722 if (is_single || (!is_cam && (priv->current_ht_config.sm_ps ==
713 return 2; 723 WLAN_HT_CAP_SM_PS_STATIC)))
724 return IWL_NUM_RX_CHAINS_SINGLE;
714 else 725 else
715 return 3; 726 return IWL_NUM_RX_CHAINS_MULTIPLE;
716} 727}
717 728
718static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) 729static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
@@ -720,17 +731,19 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
720 int idle_cnt; 731 int idle_cnt;
721 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 732 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
722 /* # Rx chains when idling and maybe trying to save power */ 733 /* # Rx chains when idling and maybe trying to save power */
723 switch (priv->ps_mode) { 734 switch (priv->current_ht_config.sm_ps) {
724 case IWL_MIMO_PS_STATIC: 735 case WLAN_HT_CAP_SM_PS_STATIC:
725 case IWL_MIMO_PS_DYNAMIC: 736 case WLAN_HT_CAP_SM_PS_DYNAMIC:
726 idle_cnt = (is_cam) ? 2 : 1; 737 idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
738 IWL_NUM_IDLE_CHAINS_SINGLE;
727 break; 739 break;
728 case IWL_MIMO_PS_NONE: 740 case WLAN_HT_CAP_SM_PS_DISABLED:
729 idle_cnt = (is_cam) ? active_cnt : 1; 741 idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
730 break; 742 break;
731 case IWL_MIMO_PS_INVALID: 743 case WLAN_HT_CAP_SM_PS_INVALID:
732 default: 744 default:
733 IWL_ERROR("invalide mimo ps mode %d\n", priv->ps_mode); 745 IWL_ERROR("invalide mimo ps mode %d\n",
746 priv->current_ht_config.sm_ps);
734 WARN_ON(1); 747 WARN_ON(1);
735 idle_cnt = -1; 748 idle_cnt = -1;
736 break; 749 break;
@@ -738,6 +751,17 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
738 return idle_cnt; 751 return idle_cnt;
739} 752}
740 753
754/* up to 4 chains */
755static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
756{
757 u8 res;
758 res = (chain_bitmap & BIT(0)) >> 0;
759 res += (chain_bitmap & BIT(1)) >> 1;
760 res += (chain_bitmap & BIT(2)) >> 2;
761 res += (chain_bitmap & BIT(4)) >> 4;
762 return res;
763}
764
741/** 765/**
742 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image 766 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
743 * 767 *
@@ -748,37 +772,47 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
748{ 772{
749 bool is_single = is_single_rx_stream(priv); 773 bool is_single = is_single_rx_stream(priv);
750 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 774 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
751 u8 idle_rx_cnt, active_rx_cnt; 775 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
776 u32 active_chains;
752 u16 rx_chain; 777 u16 rx_chain;
753 778
754 /* Tell uCode which antennas are actually connected. 779 /* Tell uCode which antennas are actually connected.
755 * Before first association, we assume all antennas are connected. 780 * Before first association, we assume all antennas are connected.
756 * Just after first association, iwl_chain_noise_calibration() 781 * Just after first association, iwl_chain_noise_calibration()
757 * checks which antennas actually *are* connected. */ 782 * checks which antennas actually *are* connected. */
758 rx_chain = priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 783 if (priv->chain_noise_data.active_chains)
784 active_chains = priv->chain_noise_data.active_chains;
785 else
786 active_chains = priv->hw_params.valid_rx_ant;
787
788 rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
759 789
760 /* How many receivers should we use? */ 790 /* How many receivers should we use? */
761 active_rx_cnt = iwl_get_active_rx_chain_count(priv); 791 active_rx_cnt = iwl_get_active_rx_chain_count(priv);
762 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt); 792 idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
763 793
764 /* correct rx chain count accoridng hw settings */
765 if (priv->hw_params.rx_chains_num < active_rx_cnt)
766 active_rx_cnt = priv->hw_params.rx_chains_num;
767 794
768 if (priv->hw_params.rx_chains_num < idle_rx_cnt) 795 /* correct rx chain count according hw settings
769 idle_rx_cnt = priv->hw_params.rx_chains_num; 796 * and chain noise calibration
797 */
798 valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
799 if (valid_rx_cnt < active_rx_cnt)
800 active_rx_cnt = valid_rx_cnt;
801
802 if (valid_rx_cnt < idle_rx_cnt)
803 idle_rx_cnt = valid_rx_cnt;
770 804
771 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; 805 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
772 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; 806 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
773 807
774 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain); 808 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
775 809
776 if (!is_single && (active_rx_cnt >= 2) && is_cam) 810 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
777 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; 811 priv->staging_rxon.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
778 else 812 else
779 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; 813 priv->staging_rxon.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
780 814
781 IWL_DEBUG_ASSOC("rx_chain=0x%Xi active=%d idle=%d\n", 815 IWL_DEBUG_ASSOC("rx_chain=0x%X active=%d idle=%d\n",
782 priv->staging_rxon.rx_chain, 816 priv->staging_rxon.rx_chain,
783 active_rx_cnt, idle_rx_cnt); 817 active_rx_cnt, idle_rx_cnt);
784 818
@@ -788,7 +822,7 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
788EXPORT_SYMBOL(iwl_set_rxon_chain); 822EXPORT_SYMBOL(iwl_set_rxon_chain);
789 823
790/** 824/**
791 * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON 825 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
792 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz 826 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
793 * @channel: Any channel valid for the requested phymode 827 * @channel: Any channel valid for the requested phymode
794 828
@@ -797,10 +831,11 @@ EXPORT_SYMBOL(iwl_set_rxon_chain);
797 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 831 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
798 * in the staging RXON flag structure based on the phymode 832 * in the staging RXON flag structure based on the phymode
799 */ 833 */
800int iwl_set_rxon_channel(struct iwl_priv *priv, 834int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
801 enum ieee80211_band band,
802 u16 channel)
803{ 835{
836 enum ieee80211_band band = ch->band;
837 u16 channel = ieee80211_frequency_to_channel(ch->center_freq);
838
804 if (!iwl_get_channel_info(priv, band, channel)) { 839 if (!iwl_get_channel_info(priv, band, channel)) {
805 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", 840 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
806 channel, band); 841 channel, band);
@@ -834,6 +869,10 @@ int iwl_setup_mac(struct iwl_priv *priv)
834 /* Tell mac80211 our characteristics */ 869 /* Tell mac80211 our characteristics */
835 hw->flags = IEEE80211_HW_SIGNAL_DBM | 870 hw->flags = IEEE80211_HW_SIGNAL_DBM |
836 IEEE80211_HW_NOISE_DBM; 871 IEEE80211_HW_NOISE_DBM;
872 hw->wiphy->interface_modes =
873 BIT(NL80211_IFTYPE_AP) |
874 BIT(NL80211_IFTYPE_STATION) |
875 BIT(NL80211_IFTYPE_ADHOC);
837 /* Default value; 4 EDCA QOS priorities */ 876 /* Default value; 4 EDCA QOS priorities */
838 hw->queues = 4; 877 hw->queues = 4;
839 /* queues to support 11n aggregation */ 878 /* queues to support 11n aggregation */
@@ -891,7 +930,6 @@ int iwl_init_drv(struct iwl_priv *priv)
891 spin_lock_init(&priv->power_data.lock); 930 spin_lock_init(&priv->power_data.lock);
892 spin_lock_init(&priv->sta_lock); 931 spin_lock_init(&priv->sta_lock);
893 spin_lock_init(&priv->hcmd_lock); 932 spin_lock_init(&priv->hcmd_lock);
894 spin_lock_init(&priv->lq_mngr.lock);
895 933
896 INIT_LIST_HEAD(&priv->free_frames); 934 INIT_LIST_HEAD(&priv->free_frames);
897 935
@@ -905,10 +943,10 @@ int iwl_init_drv(struct iwl_priv *priv)
905 priv->ieee_rates = NULL; 943 priv->ieee_rates = NULL;
906 priv->band = IEEE80211_BAND_2GHZ; 944 priv->band = IEEE80211_BAND_2GHZ;
907 945
908 priv->iw_mode = IEEE80211_IF_TYPE_STA; 946 priv->iw_mode = NL80211_IFTYPE_STATION;
909 947
910 priv->use_ant_b_for_management_frame = 1; /* start with ant B */ 948 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
911 priv->ps_mode = IWL_MIMO_PS_NONE; 949 priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
912 950
913 /* Choose which receivers/antennas to use */ 951 /* Choose which receivers/antennas to use */
914 iwl_set_rxon_chain(priv); 952 iwl_set_rxon_chain(priv);
@@ -922,8 +960,6 @@ int iwl_init_drv(struct iwl_priv *priv)
922 priv->qos_data.qos_active = 0; 960 priv->qos_data.qos_active = 0;
923 priv->qos_data.qos_cap.val = 0; 961 priv->qos_data.qos_cap.val = 0;
924 962
925 iwl_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
926
927 priv->rates_mask = IWL_RATES_MASK; 963 priv->rates_mask = IWL_RATES_MASK;
928 /* If power management is turned on, default to AC mode */ 964 /* If power management is turned on, default to AC mode */
929 priv->power_mode = IWL_POWER_AC; 965 priv->power_mode = IWL_POWER_AC;
@@ -950,22 +986,6 @@ err:
950} 986}
951EXPORT_SYMBOL(iwl_init_drv); 987EXPORT_SYMBOL(iwl_init_drv);
952 988
953void iwl_free_calib_results(struct iwl_priv *priv)
954{
955 kfree(priv->calib_results.lo_res);
956 priv->calib_results.lo_res = NULL;
957 priv->calib_results.lo_res_len = 0;
958
959 kfree(priv->calib_results.tx_iq_res);
960 priv->calib_results.tx_iq_res = NULL;
961 priv->calib_results.tx_iq_res_len = 0;
962
963 kfree(priv->calib_results.tx_iq_perd_res);
964 priv->calib_results.tx_iq_perd_res = NULL;
965 priv->calib_results.tx_iq_perd_res_len = 0;
966}
967EXPORT_SYMBOL(iwl_free_calib_results);
968
969int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 989int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
970{ 990{
971 int ret = 0; 991 int ret = 0;
@@ -993,10 +1013,9 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
993} 1013}
994EXPORT_SYMBOL(iwl_set_tx_power); 1014EXPORT_SYMBOL(iwl_set_tx_power);
995 1015
996
997void iwl_uninit_drv(struct iwl_priv *priv) 1016void iwl_uninit_drv(struct iwl_priv *priv)
998{ 1017{
999 iwl_free_calib_results(priv); 1018 iwl_calib_free_results(priv);
1000 iwlcore_free_geos(priv); 1019 iwlcore_free_geos(priv);
1001 iwl_free_channel_map(priv); 1020 iwl_free_channel_map(priv);
1002 kfree(priv->scan); 1021 kfree(priv->scan);
@@ -1150,7 +1169,6 @@ int iwl_verify_ucode(struct iwl_priv *priv)
1150} 1169}
1151EXPORT_SYMBOL(iwl_verify_ucode); 1170EXPORT_SYMBOL(iwl_verify_ucode);
1152 1171
1153
1154static const char *desc_lookup(int i) 1172static const char *desc_lookup(int i)
1155{ 1173{
1156 switch (i) { 1174 switch (i) {
@@ -1231,9 +1249,9 @@ EXPORT_SYMBOL(iwl_dump_nic_error_log);
1231/** 1249/**
1232 * iwl_print_event_log - Dump error event log to syslog 1250 * iwl_print_event_log - Dump error event log to syslog
1233 * 1251 *
1234 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained! 1252 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
1235 */ 1253 */
1236void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, 1254static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1237 u32 num_events, u32 mode) 1255 u32 num_events, u32 mode)
1238{ 1256{
1239 u32 i; 1257 u32 i;
@@ -1274,8 +1292,6 @@ void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1274 } 1292 }
1275 } 1293 }
1276} 1294}
1277EXPORT_SYMBOL(iwl_print_event_log);
1278
1279 1295
1280void iwl_dump_nic_event_log(struct iwl_priv *priv) 1296void iwl_dump_nic_event_log(struct iwl_priv *priv)
1281{ 1297{
@@ -1391,7 +1407,7 @@ void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv)
1391 1407
1392 iwl_scan_cancel(priv); 1408 iwl_scan_cancel(priv);
1393 /* FIXME: This is a workaround for AP */ 1409 /* FIXME: This is a workaround for AP */
1394 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 1410 if (priv->iw_mode != NL80211_IFTYPE_AP) {
1395 spin_lock_irqsave(&priv->lock, flags); 1411 spin_lock_irqsave(&priv->lock, flags);
1396 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 1412 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
1397 CSR_UCODE_SW_BIT_RFKILL); 1413 CSR_UCODE_SW_BIT_RFKILL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 64f139e97444..55a4b584ce07 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -184,14 +184,10 @@ struct iwl_cfg {
184struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 184struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
185 struct ieee80211_ops *hw_ops); 185 struct ieee80211_ops *hw_ops);
186void iwl_hw_detect(struct iwl_priv *priv); 186void iwl_hw_detect(struct iwl_priv *priv);
187
188void iwl_clear_stations_table(struct iwl_priv *priv); 187void iwl_clear_stations_table(struct iwl_priv *priv);
189void iwl_free_calib_results(struct iwl_priv *priv);
190void iwl_reset_qos(struct iwl_priv *priv); 188void iwl_reset_qos(struct iwl_priv *priv);
191void iwl_set_rxon_chain(struct iwl_priv *priv); 189void iwl_set_rxon_chain(struct iwl_priv *priv);
192int iwl_set_rxon_channel(struct iwl_priv *priv, 190int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
193 enum ieee80211_band band,
194 u16 channel);
195void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info); 191void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info);
196u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, 192u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
197 struct ieee80211_ht_info *sta_ht_inf); 193 struct ieee80211_ht_info *sta_ht_inf);
@@ -218,7 +214,6 @@ void iwl_rx_replenish(struct iwl_priv *priv);
218int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 214int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
219int iwl_rx_agg_start(struct iwl_priv *priv, const u8 *addr, int tid, u16 ssn); 215int iwl_rx_agg_start(struct iwl_priv *priv, const u8 *addr, int tid, u16 ssn);
220int iwl_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid); 216int iwl_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
221/* FIXME: remove when TX is moved to iwl core */
222int iwl_rx_queue_restock(struct iwl_priv *priv); 217int iwl_rx_queue_restock(struct iwl_priv *priv);
223int iwl_rx_queue_space(const struct iwl_rx_queue *q); 218int iwl_rx_queue_space(const struct iwl_rx_queue *q);
224void iwl_rx_allocate(struct iwl_priv *priv); 219void iwl_rx_allocate(struct iwl_priv *priv);
@@ -237,11 +232,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
237******************************************************/ 232******************************************************/
238int iwl_txq_ctx_reset(struct iwl_priv *priv); 233int iwl_txq_ctx_reset(struct iwl_priv *priv);
239int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 234int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
240/* FIXME: remove when free Tx is fully merged into iwlcore */
241int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
242void iwl_hw_txq_ctx_free(struct iwl_priv *priv); 235void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
243int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
244 dma_addr_t addr, u16 len);
245int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 236int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
246int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); 237int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
247int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); 238int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
@@ -256,6 +247,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
256 * RF -Kill - here and not in iwl-rfkill.h to be available when 247 * RF -Kill - here and not in iwl-rfkill.h to be available when
257 * RF-kill subsystem is not compiled. 248 * RF-kill subsystem is not compiled.
258 ****************************************************/ 249 ****************************************************/
250void iwl_rf_kill(struct iwl_priv *priv);
259void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv); 251void iwl_radio_kill_sw_disable_radio(struct iwl_priv *priv);
260int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv); 252int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv);
261 253
@@ -286,11 +278,17 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
286void iwl_init_scan_params(struct iwl_priv *priv); 278void iwl_init_scan_params(struct iwl_priv *priv);
287int iwl_scan_cancel(struct iwl_priv *priv); 279int iwl_scan_cancel(struct iwl_priv *priv);
288int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); 280int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
289const char *iwl_escape_essid(const char *essid, u8 essid_len);
290int iwl_scan_initiate(struct iwl_priv *priv); 281int iwl_scan_initiate(struct iwl_priv *priv);
291void iwl_setup_rx_scan_handlers(struct iwl_priv *priv); 282void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
292void iwl_setup_scan_deferred_work(struct iwl_priv *priv); 283void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
293 284
285/*******************************************************************************
286 * Calibrations - implemented in iwl-calib.c
287 ******************************************************************************/
288int iwl_send_calib_results(struct iwl_priv *priv);
289int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
290void iwl_calib_free_results(struct iwl_priv *priv);
291
294/***************************************************** 292/*****************************************************
295 * S e n d i n g H o s t C o m m a n d s * 293 * S e n d i n g H o s t C o m m a n d s *
296 *****************************************************/ 294 *****************************************************/
@@ -312,8 +310,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
312/***************************************************** 310/*****************************************************
313* Error Handling Debugging 311* Error Handling Debugging
314******************************************************/ 312******************************************************/
315void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
316 u32 num_events, u32 mode);
317void iwl_dump_nic_error_log(struct iwl_priv *priv); 313void iwl_dump_nic_error_log(struct iwl_priv *priv);
318void iwl_dump_nic_event_log(struct iwl_priv *priv); 314void iwl_dump_nic_event_log(struct iwl_priv *priv);
319 315
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 52629fbd835a..662edf4f8d22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -64,7 +64,7 @@
64#define CSR_BASE (0x000) 64#define CSR_BASE (0x000)
65 65
66#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ 66#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
67#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ 67#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
68#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ 68#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
69#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ 69#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
70#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ 70#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d2daa174df22..e548d67f87fd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -110,11 +110,12 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
110 * 110 *
111 */ 111 */
112 112
113#define IWL_DL_INFO (1 << 0) 113#define IWL_DL_INFO (1 << 0)
114#define IWL_DL_MAC80211 (1 << 1) 114#define IWL_DL_MAC80211 (1 << 1)
115#define IWL_DL_HOST_COMMAND (1 << 2) 115#define IWL_DL_HCMD (1 << 2)
116#define IWL_DL_STATE (1 << 3) 116#define IWL_DL_STATE (1 << 3)
117#define IWL_DL_MACDUMP (1 << 4) 117#define IWL_DL_MACDUMP (1 << 4)
118#define IWL_DL_HCMD_DUMP (1 << 5)
118#define IWL_DL_RADIO (1 << 7) 119#define IWL_DL_RADIO (1 << 7)
119#define IWL_DL_POWER (1 << 8) 120#define IWL_DL_POWER (1 << 8)
120#define IWL_DL_TEMP (1 << 9) 121#define IWL_DL_TEMP (1 << 9)
@@ -162,7 +163,8 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
162#define IWL_DEBUG_ISR(f, a...) IWL_DEBUG(IWL_DL_ISR, f, ## a) 163#define IWL_DEBUG_ISR(f, a...) IWL_DEBUG(IWL_DL_ISR, f, ## a)
163#define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a) 164#define IWL_DEBUG_LED(f, a...) IWL_DEBUG(IWL_DL_LED, f, ## a)
164#define IWL_DEBUG_WEP(f, a...) IWL_DEBUG(IWL_DL_WEP, f, ## a) 165#define IWL_DEBUG_WEP(f, a...) IWL_DEBUG(IWL_DL_WEP, f, ## a)
165#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HOST_COMMAND, f, ## a) 166#define IWL_DEBUG_HC(f, a...) IWL_DEBUG(IWL_DL_HCMD, f, ## a)
167#define IWL_DEBUG_HC_DUMP(f, a...) IWL_DEBUG(IWL_DL_HCMD_DUMP, f, ## a)
166#define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a) 168#define IWL_DEBUG_CALIB(f, a...) IWL_DEBUG(IWL_DL_CALIB, f, ## a)
167#define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a) 169#define IWL_DEBUG_FW(f, a...) IWL_DEBUG(IWL_DL_FW, f, ## a)
168#define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a) 170#define IWL_DEBUG_RF_KILL(f, a...) IWL_DEBUG(IWL_DL_RF_KILL, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index cdfb343c7ec6..c018121085e9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -225,12 +225,6 @@ struct iwl_frame {
225 struct list_head list; 225 struct list_head list;
226}; 226};
227 227
228#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf)
229#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8)
230#define SEQ_TO_INDEX(x) ((u8)(x & 0xff))
231#define INDEX_TO_SEQ(x) ((u8)(x & 0xff))
232#define SEQ_HUGE_FRAME (0x4000)
233#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000)
234#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 228#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
235#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) 229#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
236#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) 230#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
@@ -412,6 +406,7 @@ struct iwl_ht_info {
412 /* self configuration data */ 406 /* self configuration data */
413 u8 is_ht; 407 u8 is_ht;
414 u8 supported_chan_width; 408 u8 supported_chan_width;
409 u8 sm_ps;
415 u8 is_green_field; 410 u8 is_green_field;
416 u8 sgf; /* HT_SHORT_GI_* short guard interval */ 411 u8 sgf; /* HT_SHORT_GI_* short guard interval */
417 u8 max_amsdu_size; 412 u8 max_amsdu_size;
@@ -570,50 +565,31 @@ struct iwl_hw_params {
570#define IWL_RX_STATS(x) (&x->u.rx_frame.stats) 565#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
571#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload) 566#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
572 567
573
574/****************************************************************************** 568/******************************************************************************
575 * 569 *
576 * Functions implemented in iwl-base.c which are forward declared here 570 * Functions implemented in core module which are forward declared here
577 * for use by iwl-*.c 571 * for use by iwl-[4-5].c
578 * 572 *
579 *****************************************************************************/ 573 * NOTE: The implementation of these functions are not hardware specific
580struct iwl_addsta_cmd; 574 * which is why they are in the core module files.
581extern int iwl_send_add_sta(struct iwl_priv *priv,
582 struct iwl_addsta_cmd *sta, u8 flags);
583u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
584 u8 flags, struct ieee80211_ht_info *ht_info);
585extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
586 struct ieee80211_hdr *hdr,
587 const u8 *dest, int left);
588extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
589int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
590extern int iwl4965_set_power(struct iwl_priv *priv, void *cmd);
591
592extern const u8 iwl_bcast_addr[ETH_ALEN];
593
594/******************************************************************************
595 *
596 * Functions implemented in iwl-[34]*.c which are forward declared here
597 * for use by iwl-base.c
598 *
599 * NOTE: The implementation of these functions are hardware specific
600 * which is why they are in the hardware specific files (vs. iwl-base.c)
601 * 575 *
602 * Naming convention -- 576 * Naming convention --
603 * iwl4965_ <-- Its part of iwlwifi (should be changed to iwl4965_) 577 * iwl_ <-- Is part of iwlwifi
604 * iwl4965_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
605 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) 578 * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
606 * iwl4965_bg_ <-- Called from work queue context 579 * iwl4965_bg_ <-- Called from work queue context
607 * iwl4965_mac_ <-- mac80211 callback 580 * iwl4965_mac_ <-- mac80211 callback
608 * 581 *
609 ****************************************************************************/ 582 ****************************************************************************/
583struct iwl_addsta_cmd;
584extern int iwl_send_add_sta(struct iwl_priv *priv,
585 struct iwl_addsta_cmd *sta, u8 flags);
586extern u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr,
587 int is_ap, u8 flags, struct ieee80211_ht_info *ht_info);
588extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
589extern int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
590extern const u8 iwl_bcast_addr[ETH_ALEN];
610extern int iwl_rxq_stop(struct iwl_priv *priv); 591extern int iwl_rxq_stop(struct iwl_priv *priv);
611extern void iwl_txq_ctx_stop(struct iwl_priv *priv); 592extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
612extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
613 struct iwl_frame *frame, u8 rate);
614extern void iwl4965_disable_events(struct iwl_priv *priv);
615
616extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel);
617extern int iwl_queue_space(const struct iwl_queue *q); 593extern int iwl_queue_space(const struct iwl_queue *q);
618static inline int iwl_queue_used(const struct iwl_queue *q, int i) 594static inline int iwl_queue_used(const struct iwl_queue *q, int i)
619{ 595{
@@ -636,12 +612,6 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
636 612
637struct iwl_priv; 613struct iwl_priv;
638 614
639/*
640 * Forward declare iwl-4965.c functions for iwl-base.c
641 */
642extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
643int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
644 u8 tid, int txq_id);
645 615
646/* Structures, enum, and defines specific to the 4965 */ 616/* Structures, enum, and defines specific to the 4965 */
647 617
@@ -656,11 +626,6 @@ struct iwl_kw {
656#define IWL_CHANNEL_WIDTH_20MHZ 0 626#define IWL_CHANNEL_WIDTH_20MHZ 0
657#define IWL_CHANNEL_WIDTH_40MHZ 1 627#define IWL_CHANNEL_WIDTH_40MHZ 1
658 628
659#define IWL_MIMO_PS_STATIC 0
660#define IWL_MIMO_PS_NONE 3
661#define IWL_MIMO_PS_DYNAMIC 1
662#define IWL_MIMO_PS_INVALID 2
663
664#define IWL_OPERATION_MODE_AUTO 0 629#define IWL_OPERATION_MODE_AUTO 0
665#define IWL_OPERATION_MODE_HT_ONLY 1 630#define IWL_OPERATION_MODE_HT_ONLY 1
666#define IWL_OPERATION_MODE_MIXED 2 631#define IWL_OPERATION_MODE_MIXED 2
@@ -671,18 +636,6 @@ struct iwl_kw {
671 636
672#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 637#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
673 638
674struct iwl4965_lq_mngr {
675 spinlock_t lock;
676 s32 max_window_size;
677 s32 *expected_tpt;
678 u8 *next_higher_rate;
679 u8 *next_lower_rate;
680 unsigned long stamp;
681 unsigned long stamp_last;
682 u32 flush_time;
683 u32 tx_packets;
684};
685
686/* Sensitivity and chain noise calibration */ 639/* Sensitivity and chain noise calibration */
687#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1) 640#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1)
688#define INITIALIZATION_VALUE 0xFFFF 641#define INITIALIZATION_VALUE 0xFFFF
@@ -727,8 +680,9 @@ enum iwl4965_false_alarm_state {
727 680
728enum iwl4965_chain_noise_state { 681enum iwl4965_chain_noise_state {
729 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ 682 IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
730 IWL_CHAIN_NOISE_ACCUMULATE = 1, 683 IWL_CHAIN_NOISE_ACCUMULATE,
731 IWL_CHAIN_NOISE_CALIBRATED = 2, 684 IWL_CHAIN_NOISE_CALIBRATED,
685 IWL_CHAIN_NOISE_DONE,
732}; 686};
733 687
734enum iwl4965_calib_enabled_state { 688enum iwl4965_calib_enabled_state {
@@ -745,13 +699,10 @@ struct statistics_general_data {
745 u32 beacon_energy_c; 699 u32 beacon_energy_c;
746}; 700};
747 701
748struct iwl_calib_results { 702/* Opaque calibration results */
749 void *tx_iq_res; 703struct iwl_calib_result {
750 void *tx_iq_perd_res; 704 void *buf;
751 void *lo_res; 705 size_t buf_len;
752 u32 tx_iq_res_len;
753 u32 tx_iq_perd_res_len;
754 u32 lo_res_len;
755}; 706};
756 707
757enum ucode_type { 708enum ucode_type {
@@ -789,17 +740,18 @@ struct iwl_sensitivity_data {
789 740
790/* Chain noise (differential Rx gain) calib data */ 741/* Chain noise (differential Rx gain) calib data */
791struct iwl_chain_noise_data { 742struct iwl_chain_noise_data {
792 u8 state; 743 u32 active_chains;
793 u16 beacon_count;
794 u32 chain_noise_a; 744 u32 chain_noise_a;
795 u32 chain_noise_b; 745 u32 chain_noise_b;
796 u32 chain_noise_c; 746 u32 chain_noise_c;
797 u32 chain_signal_a; 747 u32 chain_signal_a;
798 u32 chain_signal_b; 748 u32 chain_signal_b;
799 u32 chain_signal_c; 749 u32 chain_signal_c;
750 u16 beacon_count;
800 u8 disconn_array[NUM_RX_CHAINS]; 751 u8 disconn_array[NUM_RX_CHAINS];
801 u8 delta_gain_code[NUM_RX_CHAINS]; 752 u8 delta_gain_code[NUM_RX_CHAINS];
802 u8 radio_write; 753 u8 radio_write;
754 u8 state;
803}; 755};
804 756
805#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ 757#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
@@ -813,6 +765,7 @@ enum {
813 765
814 766
815#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */ 767#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
768#define IWL_CALIB_MAX 3
816 769
817struct iwl_priv { 770struct iwl_priv {
818 771
@@ -828,7 +781,6 @@ struct iwl_priv {
828 781
829 enum ieee80211_band band; 782 enum ieee80211_band band;
830 int alloc_rxb_skb; 783 int alloc_rxb_skb;
831 bool add_radiotap;
832 784
833 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 785 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
834 struct iwl_rx_mem_buffer *rxb); 786 struct iwl_rx_mem_buffer *rxb);
@@ -857,7 +809,7 @@ struct iwl_priv {
857 s32 last_temperature; 809 s32 last_temperature;
858 810
859 /* init calibration results */ 811 /* init calibration results */
860 struct iwl_calib_results calib_results; 812 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
861 813
862 /* Scan related variables */ 814 /* Scan related variables */
863 unsigned long last_scan_jiffies; 815 unsigned long last_scan_jiffies;
@@ -939,9 +891,6 @@ struct iwl_priv {
939 u8 last_phy_res[100]; 891 u8 last_phy_res[100];
940 892
941 /* Rate scaling data */ 893 /* Rate scaling data */
942 struct iwl4965_lq_mngr lq_mngr;
943
944 /* Rate scaling data */
945 s8 data_retry_limit; 894 s8 data_retry_limit;
946 u8 retry_rate; 895 u8 retry_rate;
947 896
@@ -1005,7 +954,7 @@ struct iwl_priv {
1005 u8 *eeprom; 954 u8 *eeprom;
1006 struct iwl_eeprom_calib_info *calib_info; 955 struct iwl_eeprom_calib_info *calib_info;
1007 956
1008 enum ieee80211_if_types iw_mode; 957 enum nl80211_iftype iw_mode;
1009 958
1010 struct sk_buff *ibss_beacon; 959 struct sk_buff *ibss_beacon;
1011 960
@@ -1025,7 +974,6 @@ struct iwl_priv {
1025 * hardware */ 974 * hardware */
1026 u16 assoc_id; 975 u16 assoc_id;
1027 u16 assoc_capability; 976 u16 assoc_capability;
1028 u8 ps_mode;
1029 977
1030 struct iwl_qos_info qos_data; 978 struct iwl_qos_info qos_data;
1031 979
@@ -1047,6 +995,7 @@ struct iwl_priv {
1047 995
1048 struct tasklet_struct irq_tasklet; 996 struct tasklet_struct irq_tasklet;
1049 997
998 struct delayed_work set_power_save;
1050 struct delayed_work init_alive_start; 999 struct delayed_work init_alive_start;
1051 struct delayed_work alive_start; 1000 struct delayed_work alive_start;
1052 struct delayed_work scan_check; 1001 struct delayed_work scan_check;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index cd11c0ca2991..a72efdf6d1dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -247,8 +247,8 @@
247#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ 247#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
248#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ 248#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
249 249
250#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT (20) 250#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
251#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_BITSHIFT (4) 251#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
252#define RX_RB_TIMEOUT (0x10) 252#define RX_RB_TIMEOUT (0x10)
253 253
254#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) 254#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
@@ -260,8 +260,9 @@
260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) 260#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) 261#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
262 262
263#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) 263#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
264#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) 264#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
265#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
265 266
266 267
267/** 268/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 2eb03eea1908..8300f3d00a06 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -120,8 +120,18 @@ static int iwl_generic_cmd_callback(struct iwl_priv *priv,
120 return 1; 120 return 1;
121 } 121 }
122 122
123 IWL_DEBUG_HC("back from %s (0x%08X)\n", 123#ifdef CONFIG_IWLWIFI_DEBUG
124 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 124 switch (cmd->hdr.cmd) {
125 case REPLY_TX_LINK_QUALITY_CMD:
126 case SENSITIVITY_CMD:
127 IWL_DEBUG_HC_DUMP("back from %s (0x%08X)\n",
128 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
129 break;
130 default:
131 IWL_DEBUG_HC("back from %s (0x%08X)\n",
132 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
133 }
134#endif
125 135
126 /* Let iwl_tx_complete free the response skb */ 136 /* Let iwl_tx_complete free the response skb */
127 return 1; 137 return 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 5bc3df432d2d..9740fcc1805e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -61,7 +61,7 @@
61 * 61 *
62 */ 62 */
63 63
64#define _iwl_write32(priv, ofs, val) writel((val), (priv)->hw_base + (ofs)) 64#define _iwl_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs))
65#ifdef CONFIG_IWLWIFI_DEBUG 65#ifdef CONFIG_IWLWIFI_DEBUG
66static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv, 66static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
67 u32 ofs, u32 val) 67 u32 ofs, u32 val)
@@ -75,7 +75,7 @@ static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
75#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val) 75#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val)
76#endif 76#endif
77 77
78#define _iwl_read32(priv, ofs) readl((priv)->hw_base + (ofs)) 78#define _iwl_read32(priv, ofs) ioread32((priv)->hw_base + (ofs))
79#ifdef CONFIG_IWLWIFI_DEBUG 79#ifdef CONFIG_IWLWIFI_DEBUG
80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) 80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
81{ 81{
@@ -155,28 +155,10 @@ static inline void __iwl_clear_bit(const char *f, u32 l,
155static inline int _iwl_grab_nic_access(struct iwl_priv *priv) 155static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
156{ 156{
157 int ret; 157 int ret;
158 u32 gp_ctl;
159
160#ifdef CONFIG_IWLWIFI_DEBUG 158#ifdef CONFIG_IWLWIFI_DEBUG
161 if (atomic_read(&priv->restrict_refcnt)) 159 if (atomic_read(&priv->restrict_refcnt))
162 return 0; 160 return 0;
163#endif 161#endif
164 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
165 test_bit(STATUS_RF_KILL_SW, &priv->status)) {
166 IWL_WARNING("WARNING: Requesting MAC access during RFKILL "
167 "wakes up NIC\n");
168
169 /* 10 msec allows time for NIC to complete its data save */
170 gp_ctl = _iwl_read32(priv, CSR_GP_CNTRL);
171 if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
172 IWL_DEBUG_RF_KILL("Wait for complete power-down, "
173 "gpctl = 0x%08x\n", gp_ctl);
174 mdelay(10);
175 } else
176 IWL_DEBUG_RF_KILL("power-down complete, "
177 "gpctl = 0x%08x\n", gp_ctl);
178 }
179
180 /* this bit wakes up the NIC */ 162 /* this bit wakes up the NIC */
181 _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 163 _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
182 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL, 164 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL,
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index a099c9e30e55..60a03d2d2d0e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -152,9 +152,10 @@ static u16 iwl_get_auto_power_mode(struct iwl_priv *priv)
152/* initialize to default */ 152/* initialize to default */
153static int iwl_power_init_handle(struct iwl_priv *priv) 153static int iwl_power_init_handle(struct iwl_priv *priv)
154{ 154{
155 int ret = 0, i;
156 struct iwl_power_mgr *pow_data; 155 struct iwl_power_mgr *pow_data;
157 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX; 156 int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_MAX;
157 struct iwl_powertable_cmd *cmd;
158 int i;
158 u16 pci_pm; 159 u16 pci_pm;
159 160
160 IWL_DEBUG_POWER("Initialize power \n"); 161 IWL_DEBUG_POWER("Initialize power \n");
@@ -167,25 +168,19 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
167 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size); 168 memcpy(&pow_data->pwr_range_1[0], &range_1[0], size);
168 memcpy(&pow_data->pwr_range_2[0], &range_2[0], size); 169 memcpy(&pow_data->pwr_range_2[0], &range_2[0], size);
169 170
170 ret = pci_read_config_word(priv->pci_dev, 171 pci_read_config_word(priv->pci_dev, PCI_CFG_LINK_CTRL, &pci_pm);
171 PCI_LINK_CTRL, &pci_pm);
172 if (ret != 0)
173 return 0;
174 else {
175 struct iwl_powertable_cmd *cmd;
176 172
177 IWL_DEBUG_POWER("adjust power command flags\n"); 173 IWL_DEBUG_POWER("adjust power command flags\n");
178 174
179 for (i = 0; i < IWL_POWER_MAX; i++) { 175 for (i = 0; i < IWL_POWER_MAX; i++) {
180 cmd = &pow_data->pwr_range_0[i].cmd; 176 cmd = &pow_data->pwr_range_0[i].cmd;
181 177
182 if (pci_pm & 0x1) 178 if (pci_pm & PCI_CFG_LINK_CTRL_VAL_L0S_EN)
183 cmd->flags &= ~IWL_POWER_PCI_PM_MSK; 179 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
184 else 180 else
185 cmd->flags |= IWL_POWER_PCI_PM_MSK; 181 cmd->flags |= IWL_POWER_PCI_PM_MSK;
186 }
187 } 182 }
188 return ret; 183 return 0;
189} 184}
190 185
191/* adjust power command according to dtim period and power level*/ 186/* adjust power command according to dtim period and power level*/
@@ -255,17 +250,26 @@ static int iwl_update_power_command(struct iwl_priv *priv,
255 250
256 251
257/* 252/*
258 * calucaute the final power mode index 253 * compute the final power mode index
259 */ 254 */
260int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh) 255int iwl_power_update_mode(struct iwl_priv *priv, bool force)
261{ 256{
262 struct iwl_power_mgr *setting = &(priv->power_data); 257 struct iwl_power_mgr *setting = &(priv->power_data);
263 int ret = 0; 258 int ret = 0;
264 u16 uninitialized_var(final_mode); 259 u16 uninitialized_var(final_mode);
265 260
266 /* If on battery, set to 3, 261 /* Don't update the RX chain when chain noise calibration is running */
267 * if plugged into AC power, set to CAM ("continuously aware mode"), 262 if (priv->chain_noise_data.state != IWL_CHAIN_NOISE_DONE &&
268 * else user level */ 263 priv->chain_noise_data.state != IWL_CHAIN_NOISE_ALIVE) {
264 IWL_DEBUG_POWER("Cannot update the power, chain noise "
265 "calibration running: %d\n",
266 priv->chain_noise_data.state);
267 return -EAGAIN;
268 }
269
270 /* If on battery, set to 3,
271 * if plugged into AC power, set to CAM ("continuously aware mode"),
272 * else user level */
269 273
270 switch (setting->system_power_setting) { 274 switch (setting->system_power_setting) {
271 case IWL_POWER_SYS_AUTO: 275 case IWL_POWER_SYS_AUTO:
@@ -286,11 +290,11 @@ int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh)
286 final_mode = setting->critical_power_setting; 290 final_mode = setting->critical_power_setting;
287 291
288 /* driver only support CAM for non STA network */ 292 /* driver only support CAM for non STA network */
289 if (priv->iw_mode != IEEE80211_IF_TYPE_STA) 293 if (priv->iw_mode != NL80211_IFTYPE_STATION)
290 final_mode = IWL_POWER_MODE_CAM; 294 final_mode = IWL_POWER_MODE_CAM;
291 295
292 if (!iwl_is_rfkill(priv) && !setting->power_disabled && 296 if (!iwl_is_rfkill(priv) && !setting->power_disabled &&
293 ((setting->power_mode != final_mode) || refresh)) { 297 ((setting->power_mode != final_mode) || force)) {
294 struct iwl_powertable_cmd cmd; 298 struct iwl_powertable_cmd cmd;
295 299
296 if (final_mode != IWL_POWER_MODE_CAM) 300 if (final_mode != IWL_POWER_MODE_CAM)
@@ -324,7 +328,7 @@ EXPORT_SYMBOL(iwl_power_update_mode);
324 * this will be usefull for rate scale to disable PM during heavy 328 * this will be usefull for rate scale to disable PM during heavy
325 * Tx/Rx activities 329 * Tx/Rx activities
326 */ 330 */
327int iwl_power_disable_management(struct iwl_priv *priv) 331int iwl_power_disable_management(struct iwl_priv *priv, u32 ms)
328{ 332{
329 u16 prev_mode; 333 u16 prev_mode;
330 int ret = 0; 334 int ret = 0;
@@ -337,6 +341,11 @@ int iwl_power_disable_management(struct iwl_priv *priv)
337 ret = iwl_power_update_mode(priv, 0); 341 ret = iwl_power_update_mode(priv, 0);
338 priv->power_data.power_disabled = 1; 342 priv->power_data.power_disabled = 1;
339 priv->power_data.user_power_setting = prev_mode; 343 priv->power_data.user_power_setting = prev_mode;
344 cancel_delayed_work(&priv->set_power_save);
345 if (ms)
346 queue_delayed_work(priv->workqueue, &priv->set_power_save,
347 msecs_to_jiffies(ms));
348
340 349
341 return ret; 350 return ret;
342} 351}
@@ -359,35 +368,26 @@ EXPORT_SYMBOL(iwl_power_enable_management);
359/* set user_power_setting */ 368/* set user_power_setting */
360int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode) 369int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode)
361{ 370{
362 int ret = 0;
363
364 if (mode > IWL_POWER_LIMIT) 371 if (mode > IWL_POWER_LIMIT)
365 return -EINVAL; 372 return -EINVAL;
366 373
367 priv->power_data.user_power_setting = mode; 374 priv->power_data.user_power_setting = mode;
368 375
369 ret = iwl_power_update_mode(priv, 0); 376 return iwl_power_update_mode(priv, 0);
370
371 return ret;
372} 377}
373EXPORT_SYMBOL(iwl_power_set_user_mode); 378EXPORT_SYMBOL(iwl_power_set_user_mode);
374 379
375
376/* set system_power_setting. This should be set by over all 380/* set system_power_setting. This should be set by over all
377 * PM application. 381 * PM application.
378 */ 382 */
379int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode) 383int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode)
380{ 384{
381 int ret = 0;
382
383 if (mode > IWL_POWER_LIMIT) 385 if (mode > IWL_POWER_LIMIT)
384 return -EINVAL; 386 return -EINVAL;
385 387
386 priv->power_data.system_power_setting = mode; 388 priv->power_data.system_power_setting = mode;
387 389
388 ret = iwl_power_update_mode(priv, 0); 390 return iwl_power_update_mode(priv, 0);
389
390 return ret;
391} 391}
392EXPORT_SYMBOL(iwl_power_set_system_mode); 392EXPORT_SYMBOL(iwl_power_set_system_mode);
393 393
@@ -431,3 +431,35 @@ int iwl_power_temperature_change(struct iwl_priv *priv)
431 return ret; 431 return ret;
432} 432}
433EXPORT_SYMBOL(iwl_power_temperature_change); 433EXPORT_SYMBOL(iwl_power_temperature_change);
434
435static void iwl_bg_set_power_save(struct work_struct *work)
436{
437 struct iwl_priv *priv = container_of(work,
438 struct iwl_priv, set_power_save.work);
439 IWL_DEBUG(IWL_DL_STATE, "update power\n");
440
441 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
442 return;
443
444 mutex_lock(&priv->mutex);
445
446 /* on starting association we disable power managment
447 * until association, if association failed then this
448 * timer will expire and enable PM again.
449 */
450 if (!iwl_is_associated(priv))
451 iwl_power_enable_management(priv);
452
453 mutex_unlock(&priv->mutex);
454}
455void iwl_setup_power_deferred_work(struct iwl_priv *priv)
456{
457 INIT_DELAYED_WORK(&priv->set_power_save, iwl_bg_set_power_save);
458}
459EXPORT_SYMBOL(iwl_setup_power_deferred_work);
460
461void iwl_power_cancel_timeout(struct iwl_priv *priv)
462{
463 cancel_delayed_work(&priv->set_power_save);
464}
465EXPORT_SYMBOL(iwl_power_cancel_timeout);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index abcbbf96a84e..df484a90ae64 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -72,14 +72,16 @@ struct iwl_power_mgr {
72 /* final power level that used to calculate final power command */ 72 /* final power level that used to calculate final power command */
73 u8 power_mode; 73 u8 power_mode;
74 u8 user_power_setting; /* set by user through mac80211 or sysfs */ 74 u8 user_power_setting; /* set by user through mac80211 or sysfs */
75 u8 system_power_setting; /* set by kernel syatem tools */ 75 u8 system_power_setting; /* set by kernel system tools */
76 u8 critical_power_setting; /* set if driver over heated */ 76 u8 critical_power_setting; /* set if driver over heated */
77 u8 is_battery_active; /* DC/AC power */ 77 u8 is_battery_active; /* DC/AC power */
78 u8 power_disabled; /* flag to disable using power saving level */ 78 u8 power_disabled; /* flag to disable using power saving level */
79}; 79};
80 80
81int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh); 81void iwl_setup_power_deferred_work(struct iwl_priv *priv);
82int iwl_power_disable_management(struct iwl_priv *priv); 82void iwl_power_cancel_timeout(struct iwl_priv *priv);
83int iwl_power_update_mode(struct iwl_priv *priv, bool force);
84int iwl_power_disable_management(struct iwl_priv *priv, u32 ms);
83int iwl_power_enable_management(struct iwl_priv *priv); 85int iwl_power_enable_management(struct iwl_priv *priv);
84int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode); 86int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode);
85int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode); 87int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e81bfc42a7cb..7cde9d76ff5d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -376,7 +376,9 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
376{ 376{
377 int ret; 377 int ret;
378 unsigned long flags; 378 unsigned long flags;
379 unsigned int rb_size; 379 u32 rb_size;
380 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
381 const u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT why this stalls RX */
380 382
381 spin_lock_irqsave(&priv->lock, flags); 383 spin_lock_irqsave(&priv->lock, flags);
382 ret = iwl_grab_nic_access(priv); 384 ret = iwl_grab_nic_access(priv);
@@ -398,26 +400,32 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
398 400
399 /* Tell device where to find RBD circular buffer in DRAM */ 401 /* Tell device where to find RBD circular buffer in DRAM */
400 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 402 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
401 rxq->dma_addr >> 8); 403 (u32)(rxq->dma_addr >> 8));
402 404
403 /* Tell device where in DRAM to update its Rx status */ 405 /* Tell device where in DRAM to update its Rx status */
404 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, 406 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
405 (priv->shared_phys + priv->rb_closed_offset) >> 4); 407 (priv->shared_phys + priv->rb_closed_offset) >> 4);
406 408
407 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */ 409 /* Enable Rx DMA
410 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set becuase of HW bug in
411 * the credit mechanism in 5000 HW RX FIFO
412 * Direct rx interrupts to hosts
413 * Rx buffer size 4 or 8k
414 * RB timeout 0x10
415 * 256 RBDs
416 */
408 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 417 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
409 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 418 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
419 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
410 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 420 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
411 rb_size | 421 rb_size|
412 /* 0x10 << 4 | */ 422 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
413 (RX_QUEUE_SIZE_LOG << 423 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
414 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
415
416 /*
417 * iwl_write32(priv,CSR_INT_COAL_REG,0);
418 */
419 424
420 iwl_release_nic_access(priv); 425 iwl_release_nic_access(priv);
426
427 iwl_write32(priv, CSR_INT_COALESCING, 0x40);
428
421 spin_unlock_irqrestore(&priv->lock, flags); 429 spin_unlock_irqrestore(&priv->lock, flags);
422 430
423 return 0; 431 return 0;
@@ -789,107 +797,6 @@ static inline void iwl_dbg_report_frame(struct iwl_priv *priv,
789} 797}
790#endif 798#endif
791 799
792static void iwl_add_radiotap(struct iwl_priv *priv,
793 struct sk_buff *skb,
794 struct iwl_rx_phy_res *rx_start,
795 struct ieee80211_rx_status *stats,
796 u32 ampdu_status)
797{
798 s8 signal = stats->signal;
799 s8 noise = 0;
800 int rate = stats->rate_idx;
801 u64 tsf = stats->mactime;
802 __le16 antenna;
803 __le16 phy_flags_hw = rx_start->phy_flags;
804 struct iwl4965_rt_rx_hdr {
805 struct ieee80211_radiotap_header rt_hdr;
806 __le64 rt_tsf; /* TSF */
807 u8 rt_flags; /* radiotap packet flags */
808 u8 rt_rate; /* rate in 500kb/s */
809 __le16 rt_channelMHz; /* channel in MHz */
810 __le16 rt_chbitmask; /* channel bitfield */
811 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
812 s8 rt_dbmnoise;
813 u8 rt_antenna; /* antenna number */
814 } __attribute__ ((packed)) *iwl4965_rt;
815
816 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
817 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
818 if (net_ratelimit())
819 printk(KERN_ERR "not enough headroom [%d] for "
820 "radiotap head [%zd]\n",
821 skb_headroom(skb), sizeof(*iwl4965_rt));
822 return;
823 }
824
825 /* put radiotap header in front of 802.11 header and data */
826 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
827
828 /* initialise radiotap header */
829 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
830 iwl4965_rt->rt_hdr.it_pad = 0;
831
832 /* total header + data */
833 put_unaligned_le16(sizeof(*iwl4965_rt), &iwl4965_rt->rt_hdr.it_len);
834
835 /* Indicate all the fields we add to the radiotap header */
836 put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
837 (1 << IEEE80211_RADIOTAP_FLAGS) |
838 (1 << IEEE80211_RADIOTAP_RATE) |
839 (1 << IEEE80211_RADIOTAP_CHANNEL) |
840 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
841 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
842 (1 << IEEE80211_RADIOTAP_ANTENNA),
843 &(iwl4965_rt->rt_hdr.it_present));
844
845 /* Zero the flags, we'll add to them as we go */
846 iwl4965_rt->rt_flags = 0;
847
848 put_unaligned_le64(tsf, &iwl4965_rt->rt_tsf);
849
850 iwl4965_rt->rt_dbmsignal = signal;
851 iwl4965_rt->rt_dbmnoise = noise;
852
853 /* Convert the channel frequency and set the flags */
854 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
855 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
856 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
857 &iwl4965_rt->rt_chbitmask);
858 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
859 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
860 &iwl4965_rt->rt_chbitmask);
861 else /* 802.11g */
862 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
863 &iwl4965_rt->rt_chbitmask);
864
865 if (rate == -1)
866 iwl4965_rt->rt_rate = 0;
867 else
868 iwl4965_rt->rt_rate = iwl_rates[rate].ieee;
869
870 /*
871 * "antenna number"
872 *
873 * It seems that the antenna field in the phy flags value
874 * is actually a bitfield. This is undefined by radiotap,
875 * it wants an actual antenna number but I always get "7"
876 * for most legacy frames I receive indicating that the
877 * same frame was received on all three RX chains.
878 *
879 * I think this field should be removed in favour of a
880 * new 802.11n radiotap field "RX chains" that is defined
881 * as a bitmask.
882 */
883 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
884 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
885
886 /* set the preamble flag if appropriate */
887 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
888 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
889
890 stats->flag |= RX_FLAG_RADIOTAP;
891}
892
893static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len) 800static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
894{ 801{
895 /* 0 - mgmt, 1 - cnt, 2 - data */ 802 /* 0 - mgmt, 1 - cnt, 2 - data */
@@ -1074,9 +981,6 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
1074 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 981 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
1075 return; 982 return;
1076 983
1077 if (priv->add_radiotap)
1078 iwl_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
1079
1080 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len); 984 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
1081 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); 985 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
1082 priv->alloc_rxb_skb--; 986 priv->alloc_rxb_skb--;
@@ -1130,10 +1034,10 @@ static int iwl_is_network_packet(struct iwl_priv *priv,
1130 /* Filter incoming packets to determine if they are targeted toward 1034 /* Filter incoming packets to determine if they are targeted toward
1131 * this network, discarding packets coming from ourselves */ 1035 * this network, discarding packets coming from ourselves */
1132 switch (priv->iw_mode) { 1036 switch (priv->iw_mode) {
1133 case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ 1037 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
1134 /* packets to our IBSS update information */ 1038 /* packets to our IBSS update information */
1135 return !compare_ether_addr(header->addr3, priv->bssid); 1039 return !compare_ether_addr(header->addr3, priv->bssid);
1136 case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ 1040 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
1137 /* packets to our IBSS update information */ 1041 /* packets to our IBSS update information */
1138 return !compare_ether_addr(header->addr2, priv->bssid); 1042 return !compare_ether_addr(header->addr2, priv->bssid);
1139 default: 1043 default:
@@ -1171,7 +1075,6 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1171 if (rx_status.band == IEEE80211_BAND_5GHZ) 1075 if (rx_status.band == IEEE80211_BAND_5GHZ)
1172 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; 1076 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
1173 1077
1174 rx_status.antenna = 0;
1175 rx_status.flag = 0; 1078 rx_status.flag = 0;
1176 1079
1177 /* TSF isn't reliable. In order to allow smooth user experience, 1080 /* TSF isn't reliable. In order to allow smooth user experience,
@@ -1253,8 +1156,28 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1253 rx_status.signal, rx_status.noise, rx_status.signal, 1156 rx_status.signal, rx_status.noise, rx_status.signal,
1254 (unsigned long long)rx_status.mactime); 1157 (unsigned long long)rx_status.mactime);
1255 1158
1159 /*
1160 * "antenna number"
1161 *
1162 * It seems that the antenna field in the phy flags value
1163 * is actually a bitfield. This is undefined by radiotap,
1164 * it wants an actual antenna number but I always get "7"
1165 * for most legacy frames I receive indicating that the
1166 * same frame was received on all three RX chains.
1167 *
1168 * I think this field should be removed in favour of a
1169 * new 802.11n radiotap field "RX chains" that is defined
1170 * as a bitmask.
1171 */
1172 rx_status.antenna = le16_to_cpu(rx_start->phy_flags &
1173 RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
1174
1175 /* set the preamble flag if appropriate */
1176 if (rx_start->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1177 rx_status.flag |= RX_FLAG_SHORTPRE;
1178
1256 /* Take shortcut when only in monitor mode */ 1179 /* Take shortcut when only in monitor mode */
1257 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 1180 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
1258 iwl_pass_packet_to_mac80211(priv, include_phy, 1181 iwl_pass_packet_to_mac80211(priv, include_phy,
1259 rxb, &rx_status); 1182 rxb, &rx_status);
1260 return; 1183 return;
@@ -1271,7 +1194,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1271 switch (fc & IEEE80211_FCTL_FTYPE) { 1194 switch (fc & IEEE80211_FCTL_FTYPE) {
1272 case IEEE80211_FTYPE_MGMT: 1195 case IEEE80211_FTYPE_MGMT:
1273 case IEEE80211_FTYPE_DATA: 1196 case IEEE80211_FTYPE_DATA:
1274 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 1197 if (priv->iw_mode == NL80211_IFTYPE_AP)
1275 iwl_update_ps_mode(priv, fc & IEEE80211_FCTL_PM, 1198 iwl_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
1276 header->addr2); 1199 header->addr2);
1277 /* fall through */ 1200 /* fall through */
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 6c8ac3a87d54..bf855c35b0c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -88,7 +88,7 @@ static int iwl_is_empty_essid(const char *essid, int essid_len)
88 88
89 89
90 90
91const char *iwl_escape_essid(const char *essid, u8 essid_len) 91static const char *iwl_escape_essid(const char *essid, u8 essid_len)
92{ 92{
93 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 93 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
94 const char *s = essid; 94 const char *s = essid;
@@ -111,7 +111,6 @@ const char *iwl_escape_essid(const char *essid, u8 essid_len)
111 *d = '\0'; 111 *d = '\0';
112 return escaped; 112 return escaped;
113} 113}
114EXPORT_SYMBOL(iwl_escape_essid);
115 114
116/** 115/**
117 * iwl_scan_cancel - Cancel any currently executing HW scan 116 * iwl_scan_cancel - Cancel any currently executing HW scan
@@ -464,11 +463,6 @@ void iwl_init_scan_params(struct iwl_priv *priv)
464 463
465int iwl_scan_initiate(struct iwl_priv *priv) 464int iwl_scan_initiate(struct iwl_priv *priv)
466{ 465{
467 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
468 IWL_ERROR("APs don't scan.\n");
469 return 0;
470 }
471
472 if (!iwl_is_ready_rf(priv)) { 466 if (!iwl_is_ready_rf(priv)) {
473 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); 467 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
474 return -EIO; 468 return -EIO;
@@ -480,8 +474,7 @@ int iwl_scan_initiate(struct iwl_priv *priv)
480 } 474 }
481 475
482 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 476 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
483 IWL_DEBUG_SCAN("Scan request while abort pending. " 477 IWL_DEBUG_SCAN("Scan request while abort pending\n");
484 "Queuing.\n");
485 return -EAGAIN; 478 return -EAGAIN;
486 } 479 }
487 480
@@ -869,7 +862,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
869 862
870 scan->tx_cmd.len = cpu_to_le16(cmd_len); 863 scan->tx_cmd.len = cpu_to_le16(cmd_len);
871 864
872 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) 865 if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
873 scan->filter_flags = RXON_FILTER_PROMISC_MSK; 866 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
874 867
875 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | 868 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 6283a3a707f5..61797f3f8d5c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -47,8 +47,8 @@ u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
47 unsigned long flags; 47 unsigned long flags;
48 DECLARE_MAC_BUF(mac); 48 DECLARE_MAC_BUF(mac);
49 49
50 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) || 50 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
51 (priv->iw_mode == IEEE80211_IF_TYPE_AP)) 51 (priv->iw_mode == NL80211_IFTYPE_AP))
52 start = IWL_STA_ID; 52 start = IWL_STA_ID;
53 53
54 if (is_broadcast_ether_addr(addr)) 54 if (is_broadcast_ether_addr(addr))
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(iwl_find_station);
74 74
75int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) 75int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
76{ 76{
77 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { 77 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
78 return IWL_AP_ID; 78 return IWL_AP_ID;
79 } else { 79 } else {
80 u8 *da = ieee80211_get_DA(hdr); 80 u8 *da = ieee80211_get_DA(hdr);
@@ -191,20 +191,20 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
191 if (!sta_ht_inf || !sta_ht_inf->ht_supported) 191 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
192 goto done; 192 goto done;
193 193
194 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2; 194 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
195 195
196 sta_flags = priv->stations[index].sta.station_flags; 196 sta_flags = priv->stations[index].sta.station_flags;
197 197
198 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); 198 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
199 199
200 switch (mimo_ps_mode) { 200 switch (mimo_ps_mode) {
201 case WLAN_HT_CAP_MIMO_PS_STATIC: 201 case WLAN_HT_CAP_SM_PS_STATIC:
202 sta_flags |= STA_FLG_MIMO_DIS_MSK; 202 sta_flags |= STA_FLG_MIMO_DIS_MSK;
203 break; 203 break;
204 case WLAN_HT_CAP_MIMO_PS_DYNAMIC: 204 case WLAN_HT_CAP_SM_PS_DYNAMIC:
205 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; 205 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
206 break; 206 break;
207 case WLAN_HT_CAP_MIMO_PS_DISABLED: 207 case WLAN_HT_CAP_SM_PS_DISABLED:
208 break; 208 break;
209 default: 209 default:
210 IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode); 210 IWL_WARNING("Invalid MIMO PS mode %d\n", mimo_ps_mode);
@@ -286,7 +286,7 @@ u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
286 286
287 /* BCAST station and IBSS stations do not work in HT mode */ 287 /* BCAST station and IBSS stations do not work in HT mode */
288 if (sta_id != priv->hw_params.bcast_sta_id && 288 if (sta_id != priv->hw_params.bcast_sta_id &&
289 priv->iw_mode != IEEE80211_IF_TYPE_IBSS) 289 priv->iw_mode != NL80211_IFTYPE_ADHOC)
290 iwl_set_ht_add_station(priv, sta_id, ht_info); 290 iwl_set_ht_add_station(priv, sta_id, ht_info);
291 291
292 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 292 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
@@ -817,7 +817,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
817 }; 817 };
818 818
819 if ((lq->sta_id == 0xFF) && 819 if ((lq->sta_id == 0xFF) &&
820 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) 820 (priv->iw_mode == NL80211_IFTYPE_ADHOC))
821 return -EINVAL; 821 return -EINVAL;
822 822
823 if (lq->sta_id == 0xFF) 823 if (lq->sta_id == 0xFF)
@@ -904,7 +904,7 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
904 904
905 if ((is_ap) && 905 if ((is_ap) &&
906 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) && 906 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) &&
907 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) 907 (priv->iw_mode == NL80211_IFTYPE_STATION))
908 sta_id = iwl_add_station_flags(priv, addr, is_ap, 908 sta_id = iwl_add_station_flags(priv, addr, is_ap,
909 0, cur_ht_config); 909 0, cur_ht_config);
910 else 910 else
@@ -938,11 +938,11 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
938 938
939 /* If we are a client station in a BSS network, use the special 939 /* If we are a client station in a BSS network, use the special
940 * AP station entry (that's the only station we communicate with) */ 940 * AP station entry (that's the only station we communicate with) */
941 case IEEE80211_IF_TYPE_STA: 941 case NL80211_IFTYPE_STATION:
942 return IWL_AP_ID; 942 return IWL_AP_ID;
943 943
944 /* If we are an AP, then find the station, or use BCAST */ 944 /* If we are an AP, then find the station, or use BCAST */
945 case IEEE80211_IF_TYPE_AP: 945 case NL80211_IFTYPE_AP:
946 sta_id = iwl_find_station(priv, hdr->addr1); 946 sta_id = iwl_find_station(priv, hdr->addr1);
947 if (sta_id != IWL_INVALID_STATION) 947 if (sta_id != IWL_INVALID_STATION)
948 return sta_id; 948 return sta_id;
@@ -950,7 +950,7 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
950 950
951 /* If this frame is going out to an IBSS network, find the station, 951 /* If this frame is going out to an IBSS network, find the station,
952 * or create a new station table entry */ 952 * or create a new station table entry */
953 case IEEE80211_IF_TYPE_IBSS: 953 case NL80211_IFTYPE_ADHOC:
954 sta_id = iwl_find_station(priv, hdr->addr1); 954 sta_id = iwl_find_station(priv, hdr->addr1);
955 if (sta_id != IWL_INVALID_STATION) 955 if (sta_id != IWL_INVALID_STATION)
956 return sta_id; 956 return sta_id;
@@ -968,6 +968,11 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
968 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); 968 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
969 return priv->hw_params.bcast_sta_id; 969 return priv->hw_params.bcast_sta_id;
970 970
971 /* If we are in monitor mode, use BCAST. This is required for
972 * packet injection. */
973 case NL80211_IFTYPE_MONITOR:
974 return priv->hw_params.bcast_sta_id;
975
971 default: 976 default:
972 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode); 977 IWL_WARNING("Unknown mode of operation: %d\n", priv->iw_mode);
973 return priv->hw_params.bcast_sta_id; 978 return priv->hw_params.bcast_sta_id;
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 78b1a7a4ca40..907a53ebc6e4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -63,7 +63,7 @@ static const u16 default_tid_to_tx_fifo[] = {
63 * Does NOT advance any TFD circular buffer read/write indexes 63 * Does NOT advance any TFD circular buffer read/write indexes
64 * Does NOT free the TFD itself (which is within circular buffer) 64 * Does NOT free the TFD itself (which is within circular buffer)
65 */ 65 */
66int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) 66static int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
67{ 67{
68 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; 68 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0];
69 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; 69 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
@@ -115,10 +115,8 @@ int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
115 } 115 }
116 return 0; 116 return 0;
117} 117}
118EXPORT_SYMBOL(iwl_hw_txq_free_tfd);
119 118
120 119static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
121int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
122 dma_addr_t addr, u16 len) 120 dma_addr_t addr, u16 len)
123{ 121{
124 int index, is_odd; 122 int index, is_odd;
@@ -126,7 +124,7 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
126 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); 124 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
127 125
128 /* Each TFD can point to a maximum 20 Tx buffers */ 126 /* Each TFD can point to a maximum 20 Tx buffers */
129 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) { 127 if (num_tbs >= MAX_NUM_OF_TBS) {
130 IWL_ERROR("Error can not send more than %d chunks\n", 128 IWL_ERROR("Error can not send more than %d chunks\n",
131 MAX_NUM_OF_TBS); 129 MAX_NUM_OF_TBS);
132 return -EINVAL; 130 return -EINVAL;
@@ -151,7 +149,6 @@ int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
151 149
152 return 0; 150 return 0;
153} 151}
154EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd);
155 152
156/** 153/**
157 * iwl_txq_update_write_ptr - Send new write index to hardware 154 * iwl_txq_update_write_ptr - Send new write index to hardware
@@ -478,7 +475,6 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
478} 475}
479EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 476EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
480 477
481
482/** 478/**
483 * iwl_txq_ctx_reset - Reset TX queue context 479 * iwl_txq_ctx_reset - Reset TX queue context
484 * Destroys all DMA structures and initialise them again 480 * Destroys all DMA structures and initialise them again
@@ -545,6 +541,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
545 error_kw: 541 error_kw:
546 return ret; 542 return ret;
547} 543}
544
548/** 545/**
549 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory 546 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
550 */ 547 */
@@ -796,11 +793,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
796 goto drop_unlock; 793 goto drop_unlock;
797 } 794 }
798 795
799 if (!priv->vif) {
800 IWL_DEBUG_DROP("Dropping - !priv->vif\n");
801 goto drop_unlock;
802 }
803
804 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == 796 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
805 IWL_INVALID_RATE) { 797 IWL_INVALID_RATE) {
806 IWL_ERROR("ERROR: No TX rate available.\n"); 798 IWL_ERROR("ERROR: No TX rate available.\n");
@@ -822,16 +814,18 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
822 814
823 /* drop all data frame if we are not associated */ 815 /* drop all data frame if we are not associated */
824 if (ieee80211_is_data(fc) && 816 if (ieee80211_is_data(fc) &&
825 (!iwl_is_associated(priv) || 817 (priv->iw_mode != NL80211_IFTYPE_MONITOR ||
826 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) || 818 !(info->flags & IEEE80211_TX_CTL_INJECTED)) && /* packet injection */
827 !priv->assoc_station_added)) { 819 (!iwl_is_associated(priv) ||
820 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
821 !priv->assoc_station_added)) {
828 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); 822 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
829 goto drop_unlock; 823 goto drop_unlock;
830 } 824 }
831 825
832 spin_unlock_irqrestore(&priv->lock, flags); 826 spin_unlock_irqrestore(&priv->lock, flags);
833 827
834 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc)); 828 hdr_len = ieee80211_hdrlen(fc);
835 829
836 /* Find (or create) index into station table for destination station */ 830 /* Find (or create) index into station table for destination station */
837 sta_id = iwl_get_sta_id(priv, hdr); 831 sta_id = iwl_get_sta_id(priv, hdr);
@@ -849,7 +843,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
849 txq_id = swq_id; 843 txq_id = swq_id;
850 if (ieee80211_is_data_qos(fc)) { 844 if (ieee80211_is_data_qos(fc)) {
851 qc = ieee80211_get_qos_ctl(hdr); 845 qc = ieee80211_get_qos_ctl(hdr);
852 tid = qc[0] & 0xf; 846 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
853 seq_number = priv->stations[sta_id].tid[tid].seq_number; 847 seq_number = priv->stations[sta_id].tid[tid].seq_number;
854 seq_number &= IEEE80211_SCTL_SEQ; 848 seq_number &= IEEE80211_SCTL_SEQ;
855 hdr->seq_ctrl = hdr->seq_ctrl & 849 hdr->seq_ctrl = hdr->seq_ctrl &
@@ -1064,7 +1058,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1064 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | 1058 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
1065 INDEX_TO_SEQ(q->write_ptr)); 1059 INDEX_TO_SEQ(q->write_ptr));
1066 if (out_cmd->meta.flags & CMD_SIZE_HUGE) 1060 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1067 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); 1061 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
1068 len = (idx == TFD_CMD_SLOTS) ? 1062 len = (idx == TFD_CMD_SLOTS) ?
1069 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); 1063 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1070 phys_addr = pci_map_single(priv->pci_dev, out_cmd, len, 1064 phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
@@ -1072,12 +1066,26 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1072 phys_addr += offsetof(struct iwl_cmd, hdr); 1066 phys_addr += offsetof(struct iwl_cmd, hdr);
1073 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 1067 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1074 1068
1075 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " 1069#ifdef CONFIG_IWLWIFI_DEBUG
1076 "%d bytes at %d[%d]:%d\n", 1070 switch (out_cmd->hdr.cmd) {
1077 get_cmd_string(out_cmd->hdr.cmd), 1071 case REPLY_TX_LINK_QUALITY_CMD:
1078 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1072 case SENSITIVITY_CMD:
1079 fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM); 1073 IWL_DEBUG_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
1080 1074 "%d bytes at %d[%d]:%d\n",
1075 get_cmd_string(out_cmd->hdr.cmd),
1076 out_cmd->hdr.cmd,
1077 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1078 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1079 break;
1080 default:
1081 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
1082 "%d bytes at %d[%d]:%d\n",
1083 get_cmd_string(out_cmd->hdr.cmd),
1084 out_cmd->hdr.cmd,
1085 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
1086 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
1087 }
1088#endif
1081 txq->need_update = 1; 1089 txq->need_update = 1;
1082 1090
1083 /* Set up entry in queue's byte count circular buffer */ 1091 /* Set up entry in queue's byte count circular buffer */
@@ -1185,17 +1193,16 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1185 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1193 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1186 int txq_id = SEQ_TO_QUEUE(sequence); 1194 int txq_id = SEQ_TO_QUEUE(sequence);
1187 int index = SEQ_TO_INDEX(sequence); 1195 int index = SEQ_TO_INDEX(sequence);
1188 int huge = sequence & SEQ_HUGE_FRAME;
1189 int cmd_index; 1196 int cmd_index;
1197 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
1190 struct iwl_cmd *cmd; 1198 struct iwl_cmd *cmd;
1191 1199
1192 /* If a Tx command is being handled and it isn't in the actual 1200 /* If a Tx command is being handled and it isn't in the actual
1193 * command queue then there a command routing bug has been introduced 1201 * command queue then there a command routing bug has been introduced
1194 * in the queue management code. */ 1202 * in the queue management code. */
1195 if (txq_id != IWL_CMD_QUEUE_NUM) 1203 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
1196 IWL_ERROR("Error wrong command queue %d command id 0x%X\n", 1204 "wrong command queue %d, command id 0x%X\n", txq_id, pkt->hdr.cmd))
1197 txq_id, pkt->hdr.cmd); 1205 return;
1198 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
1199 1206
1200 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 1207 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1201 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1208 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b775d5bab668..d15a2c997954 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1160,7 +1160,7 @@ static int iwl3945_commit_rxon(struct iwl3945_priv *priv)
1160 /* If we have set the ASSOC_MSK and we are in BSS mode then 1160 /* If we have set the ASSOC_MSK and we are in BSS mode then
1161 * add the IWL_AP_ID to the station rate table */ 1161 * add the IWL_AP_ID to the station rate table */
1162 if (iwl3945_is_associated(priv) && 1162 if (iwl3945_is_associated(priv) &&
1163 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) 1163 (priv->iw_mode == NL80211_IFTYPE_STATION))
1164 if (iwl3945_add_station(priv, priv->active_rxon.bssid_addr, 1, 0) 1164 if (iwl3945_add_station(priv, priv->active_rxon.bssid_addr, 1, 0)
1165 == IWL_INVALID_STATION) { 1165 == IWL_INVALID_STATION) {
1166 IWL_ERROR("Error adding AP address for transmit.\n"); 1166 IWL_ERROR("Error adding AP address for transmit.\n");
@@ -1447,8 +1447,8 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv,
1447{ 1447{
1448 1448
1449 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon || 1449 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
1450 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && 1450 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
1451 (priv->iw_mode != IEEE80211_IF_TYPE_AP))) 1451 (priv->iw_mode != NL80211_IFTYPE_AP)))
1452 return 0; 1452 return 0;
1453 1453
1454 if (priv->ibss_beacon->len > left) 1454 if (priv->ibss_beacon->len > left)
@@ -1746,14 +1746,14 @@ static void iwl3945_reset_qos(struct iwl3945_priv *priv)
1746 spin_lock_irqsave(&priv->lock, flags); 1746 spin_lock_irqsave(&priv->lock, flags);
1747 priv->qos_data.qos_active = 0; 1747 priv->qos_data.qos_active = 0;
1748 1748
1749 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { 1749 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) {
1750 if (priv->qos_data.qos_enable) 1750 if (priv->qos_data.qos_enable)
1751 priv->qos_data.qos_active = 1; 1751 priv->qos_data.qos_active = 1;
1752 if (!(priv->active_rate & 0xfff0)) { 1752 if (!(priv->active_rate & 0xfff0)) {
1753 cw_min = 31; 1753 cw_min = 31;
1754 is_legacy = 1; 1754 is_legacy = 1;
1755 } 1755 }
1756 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 1756 } else if (priv->iw_mode == NL80211_IFTYPE_AP) {
1757 if (priv->qos_data.qos_enable) 1757 if (priv->qos_data.qos_enable)
1758 priv->qos_data.qos_active = 1; 1758 priv->qos_data.qos_active = 1;
1759 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { 1759 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
@@ -2120,7 +2120,7 @@ static void iwl3945_setup_rxon_timing(struct iwl3945_priv *priv)
2120 beacon_int = priv->beacon_int; 2120 beacon_int = priv->beacon_int;
2121 spin_unlock_irqrestore(&priv->lock, flags); 2121 spin_unlock_irqrestore(&priv->lock, flags);
2122 2122
2123 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { 2123 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
2124 if (beacon_int == 0) { 2124 if (beacon_int == 0) {
2125 priv->rxon_timing.beacon_interval = cpu_to_le16(100); 2125 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
2126 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); 2126 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
@@ -2156,7 +2156,7 @@ static void iwl3945_setup_rxon_timing(struct iwl3945_priv *priv)
2156 2156
2157static int iwl3945_scan_initiate(struct iwl3945_priv *priv) 2157static int iwl3945_scan_initiate(struct iwl3945_priv *priv)
2158{ 2158{
2159 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 2159 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2160 IWL_ERROR("APs don't scan.\n"); 2160 IWL_ERROR("APs don't scan.\n");
2161 return 0; 2161 return 0;
2162 } 2162 }
@@ -2218,7 +2218,7 @@ static void iwl3945_set_flags_for_phymode(struct iwl3945_priv *priv,
2218 else 2218 else
2219 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2219 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2220 2220
2221 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 2221 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2222 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2222 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2223 2223
2224 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 2224 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -2237,23 +2237,23 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2237 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 2237 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2238 2238
2239 switch (priv->iw_mode) { 2239 switch (priv->iw_mode) {
2240 case IEEE80211_IF_TYPE_AP: 2240 case NL80211_IFTYPE_AP:
2241 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; 2241 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2242 break; 2242 break;
2243 2243
2244 case IEEE80211_IF_TYPE_STA: 2244 case NL80211_IFTYPE_STATION:
2245 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; 2245 priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS;
2246 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; 2246 priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
2247 break; 2247 break;
2248 2248
2249 case IEEE80211_IF_TYPE_IBSS: 2249 case NL80211_IFTYPE_ADHOC:
2250 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; 2250 priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS;
2251 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; 2251 priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
2252 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | 2252 priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
2253 RXON_FILTER_ACCEPT_GRP_MSK; 2253 RXON_FILTER_ACCEPT_GRP_MSK;
2254 break; 2254 break;
2255 2255
2256 case IEEE80211_IF_TYPE_MNTR: 2256 case NL80211_IFTYPE_MONITOR:
2257 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; 2257 priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER;
2258 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | 2258 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2259 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 2259 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
@@ -2282,7 +2282,7 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2282 * in some case A channels are all non IBSS 2282 * in some case A channels are all non IBSS
2283 * in this case force B/G channel 2283 * in this case force B/G channel
2284 */ 2284 */
2285 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2285 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
2286 !(is_channel_ibss(ch_info))) 2286 !(is_channel_ibss(ch_info)))
2287 ch_info = &priv->channel_info[0]; 2287 ch_info = &priv->channel_info[0];
2288 2288
@@ -2302,7 +2302,7 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2302 2302
2303static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode) 2303static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
2304{ 2304{
2305 if (mode == IEEE80211_IF_TYPE_IBSS) { 2305 if (mode == NL80211_IFTYPE_ADHOC) {
2306 const struct iwl3945_channel_info *ch_info; 2306 const struct iwl3945_channel_info *ch_info;
2307 2307
2308 ch_info = iwl3945_get_channel_info(priv, 2308 ch_info = iwl3945_get_channel_info(priv,
@@ -2469,11 +2469,11 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2469 2469
2470 /* If we are a client station in a BSS network, use the special 2470 /* If we are a client station in a BSS network, use the special
2471 * AP station entry (that's the only station we communicate with) */ 2471 * AP station entry (that's the only station we communicate with) */
2472 case IEEE80211_IF_TYPE_STA: 2472 case NL80211_IFTYPE_STATION:
2473 return IWL_AP_ID; 2473 return IWL_AP_ID;
2474 2474
2475 /* If we are an AP, then find the station, or use BCAST */ 2475 /* If we are an AP, then find the station, or use BCAST */
2476 case IEEE80211_IF_TYPE_AP: 2476 case NL80211_IFTYPE_AP:
2477 sta_id = iwl3945_hw_find_station(priv, hdr->addr1); 2477 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
2478 if (sta_id != IWL_INVALID_STATION) 2478 if (sta_id != IWL_INVALID_STATION)
2479 return sta_id; 2479 return sta_id;
@@ -2481,7 +2481,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2481 2481
2482 /* If this frame is going out to an IBSS network, find the station, 2482 /* If this frame is going out to an IBSS network, find the station,
2483 * or create a new station table entry */ 2483 * or create a new station table entry */
2484 case IEEE80211_IF_TYPE_IBSS: { 2484 case NL80211_IFTYPE_ADHOC: {
2485 DECLARE_MAC_BUF(mac); 2485 DECLARE_MAC_BUF(mac);
2486 2486
2487 /* Create new station table entry */ 2487 /* Create new station table entry */
@@ -2502,7 +2502,7 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2502 } 2502 }
2503 /* If we are in monitor mode, use BCAST. This is required for 2503 /* If we are in monitor mode, use BCAST. This is required for
2504 * packet injection. */ 2504 * packet injection. */
2505 case IEEE80211_IF_TYPE_MNTR: 2505 case NL80211_IFTYPE_MONITOR:
2506 return priv->hw_setting.bcast_sta_id; 2506 return priv->hw_setting.bcast_sta_id;
2507 2507
2508 default: 2508 default:
@@ -2565,16 +2565,16 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2565 2565
2566 /* drop all data frame if we are not associated */ 2566 /* drop all data frame if we are not associated */
2567 if (ieee80211_is_data(fc) && 2567 if (ieee80211_is_data(fc) &&
2568 (priv->iw_mode != IEEE80211_IF_TYPE_MNTR) && /* packet injection */ 2568 (priv->iw_mode != NL80211_IFTYPE_MONITOR) && /* packet injection */
2569 (!iwl3945_is_associated(priv) || 2569 (!iwl3945_is_associated(priv) ||
2570 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id))) { 2570 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
2571 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n"); 2571 IWL_DEBUG_DROP("Dropping - !iwl3945_is_associated\n");
2572 goto drop_unlock; 2572 goto drop_unlock;
2573 } 2573 }
2574 2574
2575 spin_unlock_irqrestore(&priv->lock, flags); 2575 spin_unlock_irqrestore(&priv->lock, flags);
2576 2576
2577 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(fc)); 2577 hdr_len = ieee80211_hdrlen(fc);
2578 2578
2579 /* Find (or create) index into station table for destination station */ 2579 /* Find (or create) index into station table for destination station */
2580 sta_id = iwl3945_get_sta_id(priv, hdr); 2580 sta_id = iwl3945_get_sta_id(priv, hdr);
@@ -2590,7 +2590,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2590 2590
2591 if (ieee80211_is_data_qos(fc)) { 2591 if (ieee80211_is_data_qos(fc)) {
2592 qc = ieee80211_get_qos_ctl(hdr); 2592 qc = ieee80211_get_qos_ctl(hdr);
2593 tid = qc[0] & 0xf; 2593 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
2594 seq_number = priv->stations[sta_id].tid[tid].seq_number & 2594 seq_number = priv->stations[sta_id].tid[tid].seq_number &
2595 IEEE80211_SCTL_SEQ; 2595 IEEE80211_SCTL_SEQ;
2596 hdr->seq_ctrl = cpu_to_le16(seq_number) | 2596 hdr->seq_ctrl = cpu_to_le16(seq_number) |
@@ -2709,7 +2709,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2709 sizeof(out_cmd->cmd.tx)); 2709 sizeof(out_cmd->cmd.tx));
2710 2710
2711 iwl3945_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, 2711 iwl3945_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
2712 ieee80211_get_hdrlen(le16_to_cpu(fc))); 2712 ieee80211_hdrlen(fc));
2713 2713
2714 /* Tell device the write index *just past* this latest filled TFD */ 2714 /* Tell device the write index *just past* this latest filled TFD */
2715 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 2715 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -2806,7 +2806,7 @@ static void iwl3945_radio_kill_sw(struct iwl3945_priv *priv, int disable_radio)
2806 if (disable_radio) { 2806 if (disable_radio) {
2807 iwl3945_scan_cancel(priv); 2807 iwl3945_scan_cancel(priv);
2808 /* FIXME: This is a workaround for AP */ 2808 /* FIXME: This is a workaround for AP */
2809 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 2809 if (priv->iw_mode != NL80211_IFTYPE_AP) {
2810 spin_lock_irqsave(&priv->lock, flags); 2810 spin_lock_irqsave(&priv->lock, flags);
2811 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_SET, 2811 iwl3945_write32(priv, CSR_UCODE_DRV_GP1_SET,
2812 CSR_UCODE_SW_BIT_RFKILL); 2812 CSR_UCODE_SW_BIT_RFKILL);
@@ -3161,7 +3161,7 @@ static void iwl3945_rx_beacon_notif(struct iwl3945_priv *priv,
3161 le32_to_cpu(beacon->low_tsf), rate); 3161 le32_to_cpu(beacon->low_tsf), rate);
3162#endif 3162#endif
3163 3163
3164 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 3164 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
3165 (!test_bit(STATUS_EXIT_PENDING, &priv->status))) 3165 (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
3166 queue_work(priv->workqueue, &priv->beacon_update); 3166 queue_work(priv->workqueue, &priv->beacon_update);
3167} 3167}
@@ -4782,8 +4782,11 @@ static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
4782/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 4782/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
4783 * sending probe req. This should be set long enough to hear probe responses 4783 * sending probe req. This should be set long enough to hear probe responses
4784 * from more than one AP. */ 4784 * from more than one AP. */
4785#define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */ 4785#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
4786#define IWL_ACTIVE_DWELL_TIME_52 (10) 4786#define IWL_ACTIVE_DWELL_TIME_52 (20)
4787
4788#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
4789#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
4787 4790
4788/* For faster active scanning, scan will move to the next channel if fewer than 4791/* For faster active scanning, scan will move to the next channel if fewer than
4789 * PLCP_QUIET_THRESH packets are heard on this channel within 4792 * PLCP_QUIET_THRESH packets are heard on this channel within
@@ -4792,7 +4795,7 @@ static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
4792 * no other traffic). 4795 * no other traffic).
4793 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ 4796 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
4794#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */ 4797#define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */
4795#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */ 4798#define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(10) /* msec */
4796 4799
4797/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. 4800/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
4798 * Must be set longer than active dwell time. 4801 * Must be set longer than active dwell time.
@@ -4802,19 +4805,23 @@ static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
4802#define IWL_PASSIVE_DWELL_BASE (100) 4805#define IWL_PASSIVE_DWELL_BASE (100)
4803#define IWL_CHANNEL_TUNE_TIME 5 4806#define IWL_CHANNEL_TUNE_TIME 5
4804 4807
4808#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
4809
4805static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv, 4810static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv,
4806 enum ieee80211_band band) 4811 enum ieee80211_band band,
4812 u8 n_probes)
4807{ 4813{
4808 if (band == IEEE80211_BAND_5GHZ) 4814 if (band == IEEE80211_BAND_5GHZ)
4809 return IWL_ACTIVE_DWELL_TIME_52; 4815 return IWL_ACTIVE_DWELL_TIME_52 +
4816 IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
4810 else 4817 else
4811 return IWL_ACTIVE_DWELL_TIME_24; 4818 return IWL_ACTIVE_DWELL_TIME_24 +
4819 IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
4812} 4820}
4813 4821
4814static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv, 4822static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv,
4815 enum ieee80211_band band) 4823 enum ieee80211_band band)
4816{ 4824{
4817 u16 active = iwl3945_get_active_dwell_time(priv, band);
4818 u16 passive = (band == IEEE80211_BAND_2GHZ) ? 4825 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
4819 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : 4826 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
4820 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; 4827 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
@@ -4829,15 +4836,12 @@ static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv,
4829 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; 4836 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
4830 } 4837 }
4831 4838
4832 if (passive <= active)
4833 passive = active + 1;
4834
4835 return passive; 4839 return passive;
4836} 4840}
4837 4841
4838static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv, 4842static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4839 enum ieee80211_band band, 4843 enum ieee80211_band band,
4840 u8 is_active, u8 direct_mask, 4844 u8 is_active, u8 n_probes,
4841 struct iwl3945_scan_channel *scan_ch) 4845 struct iwl3945_scan_channel *scan_ch)
4842{ 4846{
4843 const struct ieee80211_channel *channels = NULL; 4847 const struct ieee80211_channel *channels = NULL;
@@ -4853,9 +4857,12 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4853 4857
4854 channels = sband->channels; 4858 channels = sband->channels;
4855 4859
4856 active_dwell = iwl3945_get_active_dwell_time(priv, band); 4860 active_dwell = iwl3945_get_active_dwell_time(priv, band, n_probes);
4857 passive_dwell = iwl3945_get_passive_dwell_time(priv, band); 4861 passive_dwell = iwl3945_get_passive_dwell_time(priv, band);
4858 4862
4863 if (passive_dwell <= active_dwell)
4864 passive_dwell = active_dwell + 1;
4865
4859 for (i = 0, added = 0; i < sband->n_channels; i++) { 4866 for (i = 0, added = 0; i < sband->n_channels; i++) {
4860 if (channels[i].flags & IEEE80211_CHAN_DISABLED) 4867 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4861 continue; 4868 continue;
@@ -4875,8 +4882,8 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4875 else 4882 else
4876 scan_ch->type = 1; /* active */ 4883 scan_ch->type = 1; /* active */
4877 4884
4878 if (scan_ch->type & 1) 4885 if ((scan_ch->type & 1) && n_probes)
4879 scan_ch->type |= (direct_mask << 1); 4886 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4880 4887
4881 scan_ch->active_dwell = cpu_to_le16(active_dwell); 4888 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4882 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 4889 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
@@ -6052,7 +6059,7 @@ static void iwl3945_bg_set_monitor(struct work_struct *work)
6052 if (!iwl3945_is_ready(priv)) 6059 if (!iwl3945_is_ready(priv))
6053 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n"); 6060 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
6054 else 6061 else
6055 if (iwl3945_set_mode(priv, IEEE80211_IF_TYPE_MNTR) != 0) 6062 if (iwl3945_set_mode(priv, NL80211_IFTYPE_MONITOR) != 0)
6056 IWL_ERROR("iwl3945_set_mode() failed\n"); 6063 IWL_ERROR("iwl3945_set_mode() failed\n");
6057 6064
6058 mutex_unlock(&priv->mutex); 6065 mutex_unlock(&priv->mutex);
@@ -6093,7 +6100,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6093 int rc = 0; 6100 int rc = 0;
6094 struct iwl3945_scan_cmd *scan; 6101 struct iwl3945_scan_cmd *scan;
6095 struct ieee80211_conf *conf = NULL; 6102 struct ieee80211_conf *conf = NULL;
6096 u8 direct_mask; 6103 u8 n_probes = 2;
6097 enum ieee80211_band band; 6104 enum ieee80211_band band;
6098 6105
6099 conf = ieee80211_get_hw_conf(priv->hw); 6106 conf = ieee80211_get_hw_conf(priv->hw);
@@ -6201,7 +6208,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6201 scan->direct_scan[0].len = priv->direct_ssid_len; 6208 scan->direct_scan[0].len = priv->direct_ssid_len;
6202 memcpy(scan->direct_scan[0].ssid, 6209 memcpy(scan->direct_scan[0].ssid,
6203 priv->direct_ssid, priv->direct_ssid_len); 6210 priv->direct_ssid, priv->direct_ssid_len);
6204 direct_mask = 1; 6211 n_probes++;
6205 } else if (!iwl3945_is_associated(priv) && priv->essid_len) { 6212 } else if (!iwl3945_is_associated(priv) && priv->essid_len) {
6206 IWL_DEBUG_SCAN 6213 IWL_DEBUG_SCAN
6207 ("Kicking off one direct scan for '%s' when not associated\n", 6214 ("Kicking off one direct scan for '%s' when not associated\n",
@@ -6209,11 +6216,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6209 scan->direct_scan[0].id = WLAN_EID_SSID; 6216 scan->direct_scan[0].id = WLAN_EID_SSID;
6210 scan->direct_scan[0].len = priv->essid_len; 6217 scan->direct_scan[0].len = priv->essid_len;
6211 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); 6218 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
6212 direct_mask = 1; 6219 n_probes++;
6213 } else { 6220 } else
6214 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n"); 6221 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
6215 direct_mask = 0;
6216 }
6217 6222
6218 /* We don't build a direct scan probe request; the uCode will do 6223 /* We don't build a direct scan probe request; the uCode will do
6219 * that based on the direct_mask added to each channel entry */ 6224 * that based on the direct_mask added to each channel entry */
@@ -6243,21 +6248,13 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6243 /* select Rx antennas */ 6248 /* select Rx antennas */
6244 scan->flags |= iwl3945_get_antenna_flags(priv); 6249 scan->flags |= iwl3945_get_antenna_flags(priv);
6245 6250
6246 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) 6251 if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
6247 scan->filter_flags = RXON_FILTER_PROMISC_MSK; 6252 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
6248 6253
6249 if (direct_mask) 6254 scan->channel_count =
6250 scan->channel_count = 6255 iwl3945_get_channels_for_scan(priv, band, 1, /* active */
6251 iwl3945_get_channels_for_scan( 6256 n_probes,
6252 priv, band, 1, /* active */ 6257 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6253 direct_mask,
6254 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6255 else
6256 scan->channel_count =
6257 iwl3945_get_channels_for_scan(
6258 priv, band, 0, /* passive */
6259 direct_mask,
6260 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6261 6258
6262 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 6259 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
6263 scan->channel_count * sizeof(struct iwl3945_scan_channel); 6260 scan->channel_count * sizeof(struct iwl3945_scan_channel);
@@ -6320,16 +6317,13 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
6320 6317
6321#define IWL_DELAY_NEXT_SCAN (HZ*2) 6318#define IWL_DELAY_NEXT_SCAN (HZ*2)
6322 6319
6323static void iwl3945_bg_post_associate(struct work_struct *data) 6320static void iwl3945_post_associate(struct iwl3945_priv *priv)
6324{ 6321{
6325 struct iwl3945_priv *priv = container_of(data, struct iwl3945_priv,
6326 post_associate.work);
6327
6328 int rc = 0; 6322 int rc = 0;
6329 struct ieee80211_conf *conf = NULL; 6323 struct ieee80211_conf *conf = NULL;
6330 DECLARE_MAC_BUF(mac); 6324 DECLARE_MAC_BUF(mac);
6331 6325
6332 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 6326 if (priv->iw_mode == NL80211_IFTYPE_AP) {
6333 IWL_ERROR("%s Should not be called in AP mode\n", __func__); 6327 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
6334 return; 6328 return;
6335 } 6329 }
@@ -6342,12 +6336,9 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6342 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 6336 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6343 return; 6337 return;
6344 6338
6345 mutex_lock(&priv->mutex); 6339 if (!priv->vif || !priv->is_open)
6346
6347 if (!priv->vif || !priv->is_open) {
6348 mutex_unlock(&priv->mutex);
6349 return; 6340 return;
6350 } 6341
6351 iwl3945_scan_cancel_timeout(priv, 200); 6342 iwl3945_scan_cancel_timeout(priv, 200);
6352 6343
6353 conf = ieee80211_get_hw_conf(priv->hw); 6344 conf = ieee80211_get_hw_conf(priv->hw);
@@ -6381,7 +6372,7 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6381 else 6372 else
6382 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 6373 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6383 6374
6384 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 6375 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
6385 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 6376 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
6386 6377
6387 } 6378 }
@@ -6389,11 +6380,11 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6389 iwl3945_commit_rxon(priv); 6380 iwl3945_commit_rxon(priv);
6390 6381
6391 switch (priv->iw_mode) { 6382 switch (priv->iw_mode) {
6392 case IEEE80211_IF_TYPE_STA: 6383 case NL80211_IFTYPE_STATION:
6393 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); 6384 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
6394 break; 6385 break;
6395 6386
6396 case IEEE80211_IF_TYPE_IBSS: 6387 case NL80211_IFTYPE_ADHOC:
6397 6388
6398 /* clear out the station table */ 6389 /* clear out the station table */
6399 iwl3945_clear_stations_table(priv); 6390 iwl3945_clear_stations_table(priv);
@@ -6419,7 +6410,6 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6419 6410
6420 /* we have just associated, don't start scan too early */ 6411 /* we have just associated, don't start scan too early */
6421 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 6412 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
6422 mutex_unlock(&priv->mutex);
6423} 6413}
6424 6414
6425static void iwl3945_bg_abort_scan(struct work_struct *work) 6415static void iwl3945_bg_abort_scan(struct work_struct *work)
@@ -6567,7 +6557,6 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
6567 */ 6557 */
6568 mutex_lock(&priv->mutex); 6558 mutex_lock(&priv->mutex);
6569 iwl3945_scan_cancel_timeout(priv, 100); 6559 iwl3945_scan_cancel_timeout(priv, 100);
6570 cancel_delayed_work(&priv->post_associate);
6571 mutex_unlock(&priv->mutex); 6560 mutex_unlock(&priv->mutex);
6572 } 6561 }
6573 6562
@@ -6650,8 +6639,6 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
6650 mutex_lock(&priv->mutex); 6639 mutex_lock(&priv->mutex);
6651 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 6640 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
6652 6641
6653 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
6654
6655 if (!iwl3945_is_ready(priv)) { 6642 if (!iwl3945_is_ready(priv)) {
6656 IWL_DEBUG_MAC80211("leave - not ready\n"); 6643 IWL_DEBUG_MAC80211("leave - not ready\n");
6657 ret = -EIO; 6644 ret = -EIO;
@@ -6767,7 +6754,7 @@ static void iwl3945_config_ap(struct iwl3945_priv *priv)
6767 priv->staging_rxon.flags &= 6754 priv->staging_rxon.flags &=
6768 ~RXON_FLG_SHORT_SLOT_MSK; 6755 ~RXON_FLG_SHORT_SLOT_MSK;
6769 6756
6770 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 6757 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
6771 priv->staging_rxon.flags &= 6758 priv->staging_rxon.flags &=
6772 ~RXON_FLG_SHORT_SLOT_MSK; 6759 ~RXON_FLG_SHORT_SLOT_MSK;
6773 } 6760 }
@@ -6804,7 +6791,7 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6804 } 6791 }
6805 6792
6806 /* handle this temporarily here */ 6793 /* handle this temporarily here */
6807 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS && 6794 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
6808 conf->changed & IEEE80211_IFCC_BEACON) { 6795 conf->changed & IEEE80211_IFCC_BEACON) {
6809 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 6796 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
6810 if (!beacon) 6797 if (!beacon)
@@ -6816,7 +6803,7 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6816 6803
6817 /* XXX: this MUST use conf->mac_addr */ 6804 /* XXX: this MUST use conf->mac_addr */
6818 6805
6819 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 6806 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
6820 (!conf->ssid_len)) { 6807 (!conf->ssid_len)) {
6821 IWL_DEBUG_MAC80211 6808 IWL_DEBUG_MAC80211
6822 ("Leaving in AP mode because HostAPD is not ready.\n"); 6809 ("Leaving in AP mode because HostAPD is not ready.\n");
@@ -6839,7 +6826,7 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6839 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { 6826 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
6840 */ 6827 */
6841 6828
6842 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 6829 if (priv->iw_mode == NL80211_IFTYPE_AP) {
6843 if (!conf->bssid) { 6830 if (!conf->bssid) {
6844 conf->bssid = priv->mac_addr; 6831 conf->bssid = priv->mac_addr;
6845 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); 6832 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
@@ -6874,11 +6861,11 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6874 * to verify) - jpk */ 6861 * to verify) - jpk */
6875 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 6862 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
6876 6863
6877 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 6864 if (priv->iw_mode == NL80211_IFTYPE_AP)
6878 iwl3945_config_ap(priv); 6865 iwl3945_config_ap(priv);
6879 else { 6866 else {
6880 rc = iwl3945_commit_rxon(priv); 6867 rc = iwl3945_commit_rxon(priv);
6881 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) 6868 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
6882 iwl3945_add_station(priv, 6869 iwl3945_add_station(priv,
6883 priv->active_rxon.bssid_addr, 1, 0); 6870 priv->active_rxon.bssid_addr, 1, 0);
6884 } 6871 }
@@ -6914,7 +6901,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
6914 6901
6915 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) { 6902 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) {
6916 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n", 6903 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n",
6917 IEEE80211_IF_TYPE_MNTR, 6904 NL80211_IFTYPE_MONITOR,
6918 changed_flags, *total_flags); 6905 changed_flags, *total_flags);
6919 /* queue work 'cuz mac80211 is holding a lock which 6906 /* queue work 'cuz mac80211 is holding a lock which
6920 * prevents us from issuing (synchronous) f/w cmds */ 6907 * prevents us from issuing (synchronous) f/w cmds */
@@ -6935,7 +6922,6 @@ static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
6935 6922
6936 if (iwl3945_is_ready_rf(priv)) { 6923 if (iwl3945_is_ready_rf(priv)) {
6937 iwl3945_scan_cancel_timeout(priv, 100); 6924 iwl3945_scan_cancel_timeout(priv, 100);
6938 cancel_delayed_work(&priv->post_associate);
6939 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 6925 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
6940 iwl3945_commit_rxon(priv); 6926 iwl3945_commit_rxon(priv);
6941 } 6927 }
@@ -6950,6 +6936,63 @@ static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
6950 IWL_DEBUG_MAC80211("leave\n"); 6936 IWL_DEBUG_MAC80211("leave\n");
6951} 6937}
6952 6938
6939#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
6940
6941static void iwl3945_bss_info_changed(struct ieee80211_hw *hw,
6942 struct ieee80211_vif *vif,
6943 struct ieee80211_bss_conf *bss_conf,
6944 u32 changes)
6945{
6946 struct iwl3945_priv *priv = hw->priv;
6947
6948 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
6949
6950 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
6951 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
6952 bss_conf->use_short_preamble);
6953 if (bss_conf->use_short_preamble)
6954 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
6955 else
6956 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
6957 }
6958
6959 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
6960 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
6961 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
6962 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
6963 else
6964 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
6965 }
6966
6967 if (changes & BSS_CHANGED_ASSOC) {
6968 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
6969 /* This should never happen as this function should
6970 * never be called from interrupt context. */
6971 if (WARN_ON_ONCE(in_interrupt()))
6972 return;
6973 if (bss_conf->assoc) {
6974 priv->assoc_id = bss_conf->aid;
6975 priv->beacon_int = bss_conf->beacon_int;
6976 priv->timestamp0 = bss_conf->timestamp & 0xFFFFFFFF;
6977 priv->timestamp1 = (bss_conf->timestamp >> 32) &
6978 0xFFFFFFFF;
6979 priv->assoc_capability = bss_conf->assoc_capability;
6980 priv->next_scan_jiffies = jiffies +
6981 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
6982 mutex_lock(&priv->mutex);
6983 iwl3945_post_associate(priv);
6984 mutex_unlock(&priv->mutex);
6985 } else {
6986 priv->assoc_id = 0;
6987 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
6988 }
6989 } else if (changes && iwl3945_is_associated(priv) && priv->assoc_id) {
6990 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
6991 iwl3945_send_rxon_assoc(priv);
6992 }
6993
6994}
6995
6953static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) 6996static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
6954{ 6997{
6955 int rc = 0; 6998 int rc = 0;
@@ -6967,7 +7010,7 @@ static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
6967 goto out_unlock; 7010 goto out_unlock;
6968 } 7011 }
6969 7012
6970 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ 7013 if (priv->iw_mode == NL80211_IFTYPE_AP) { /* APs don't scan */
6971 rc = -EIO; 7014 rc = -EIO;
6972 IWL_ERROR("ERROR: APs don't scan\n"); 7015 IWL_ERROR("ERROR: APs don't scan\n");
6973 goto out_unlock; 7016 goto out_unlock;
@@ -7109,7 +7152,7 @@ static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
7109 spin_unlock_irqrestore(&priv->lock, flags); 7152 spin_unlock_irqrestore(&priv->lock, flags);
7110 7153
7111 mutex_lock(&priv->mutex); 7154 mutex_lock(&priv->mutex);
7112 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 7155 if (priv->iw_mode == NL80211_IFTYPE_AP)
7113 iwl3945_activate_qos(priv, 1); 7156 iwl3945_activate_qos(priv, 1);
7114 else if (priv->assoc_id && iwl3945_is_associated(priv)) 7157 else if (priv->assoc_id && iwl3945_is_associated(priv))
7115 iwl3945_activate_qos(priv, 0); 7158 iwl3945_activate_qos(priv, 0);
@@ -7182,8 +7225,6 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7182 7225
7183 iwl3945_reset_qos(priv); 7226 iwl3945_reset_qos(priv);
7184 7227
7185 cancel_delayed_work(&priv->post_associate);
7186
7187 spin_lock_irqsave(&priv->lock, flags); 7228 spin_lock_irqsave(&priv->lock, flags);
7188 priv->assoc_id = 0; 7229 priv->assoc_id = 0;
7189 priv->assoc_capability = 0; 7230 priv->assoc_capability = 0;
@@ -7198,7 +7239,7 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7198 priv->beacon_int = priv->hw->conf.beacon_int; 7239 priv->beacon_int = priv->hw->conf.beacon_int;
7199 priv->timestamp1 = 0; 7240 priv->timestamp1 = 0;
7200 priv->timestamp0 = 0; 7241 priv->timestamp0 = 0;
7201 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) 7242 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
7202 priv->beacon_int = 0; 7243 priv->beacon_int = 0;
7203 7244
7204 spin_unlock_irqrestore(&priv->lock, flags); 7245 spin_unlock_irqrestore(&priv->lock, flags);
@@ -7212,14 +7253,14 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7212 /* we are restarting association process 7253 /* we are restarting association process
7213 * clear RXON_FILTER_ASSOC_MSK bit 7254 * clear RXON_FILTER_ASSOC_MSK bit
7214 */ 7255 */
7215 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 7256 if (priv->iw_mode != NL80211_IFTYPE_AP) {
7216 iwl3945_scan_cancel_timeout(priv, 100); 7257 iwl3945_scan_cancel_timeout(priv, 100);
7217 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 7258 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
7218 iwl3945_commit_rxon(priv); 7259 iwl3945_commit_rxon(priv);
7219 } 7260 }
7220 7261
7221 /* Per mac80211.h: This is only used in IBSS mode... */ 7262 /* Per mac80211.h: This is only used in IBSS mode... */
7222 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 7263 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
7223 7264
7224 IWL_DEBUG_MAC80211("leave - not in IBSS\n"); 7265 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
7225 mutex_unlock(&priv->mutex); 7266 mutex_unlock(&priv->mutex);
@@ -7248,7 +7289,7 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7248 return -EIO; 7289 return -EIO;
7249 } 7290 }
7250 7291
7251 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { 7292 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
7252 IWL_DEBUG_MAC80211("leave - not IBSS\n"); 7293 IWL_DEBUG_MAC80211("leave - not IBSS\n");
7253 mutex_unlock(&priv->mutex); 7294 mutex_unlock(&priv->mutex);
7254 return -EIO; 7295 return -EIO;
@@ -7268,7 +7309,7 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7268 7309
7269 iwl3945_reset_qos(priv); 7310 iwl3945_reset_qos(priv);
7270 7311
7271 queue_work(priv->workqueue, &priv->post_associate.work); 7312 iwl3945_post_associate(priv);
7272 7313
7273 mutex_unlock(&priv->mutex); 7314 mutex_unlock(&priv->mutex);
7274 7315
@@ -7329,15 +7370,6 @@ static ssize_t show_temperature(struct device *d,
7329 7370
7330static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); 7371static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);
7331 7372
7332static ssize_t show_rs_window(struct device *d,
7333 struct device_attribute *attr,
7334 char *buf)
7335{
7336 struct iwl3945_priv *priv = d->driver_data;
7337 return iwl3945_fill_rs_info(priv->hw, buf, IWL_AP_ID);
7338}
7339static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
7340
7341static ssize_t show_tx_power(struct device *d, 7373static ssize_t show_tx_power(struct device *d,
7342 struct device_attribute *attr, char *buf) 7374 struct device_attribute *attr, char *buf)
7343{ 7375{
@@ -7767,7 +7799,6 @@ static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv)
7767 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill); 7799 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
7768 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 7800 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
7769 INIT_WORK(&priv->set_monitor, iwl3945_bg_set_monitor); 7801 INIT_WORK(&priv->set_monitor, iwl3945_bg_set_monitor);
7770 INIT_DELAYED_WORK(&priv->post_associate, iwl3945_bg_post_associate);
7771 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 7802 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
7772 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 7803 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
7773 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check); 7804 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check);
@@ -7785,7 +7816,6 @@ static void iwl3945_cancel_deferred_work(struct iwl3945_priv *priv)
7785 cancel_delayed_work_sync(&priv->init_alive_start); 7816 cancel_delayed_work_sync(&priv->init_alive_start);
7786 cancel_delayed_work(&priv->scan_check); 7817 cancel_delayed_work(&priv->scan_check);
7787 cancel_delayed_work(&priv->alive_start); 7818 cancel_delayed_work(&priv->alive_start);
7788 cancel_delayed_work(&priv->post_associate);
7789 cancel_work_sync(&priv->beacon_update); 7819 cancel_work_sync(&priv->beacon_update);
7790} 7820}
7791 7821
@@ -7801,7 +7831,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
7801#endif 7831#endif
7802 &dev_attr_power_level.attr, 7832 &dev_attr_power_level.attr,
7803 &dev_attr_retry_rate.attr, 7833 &dev_attr_retry_rate.attr,
7804 &dev_attr_rs_window.attr,
7805 &dev_attr_statistics.attr, 7834 &dev_attr_statistics.attr,
7806 &dev_attr_status.attr, 7835 &dev_attr_status.attr,
7807 &dev_attr_temperature.attr, 7836 &dev_attr_temperature.attr,
@@ -7830,6 +7859,7 @@ static struct ieee80211_ops iwl3945_hw_ops = {
7830 .conf_tx = iwl3945_mac_conf_tx, 7859 .conf_tx = iwl3945_mac_conf_tx,
7831 .get_tsf = iwl3945_mac_get_tsf, 7860 .get_tsf = iwl3945_mac_get_tsf,
7832 .reset_tsf = iwl3945_mac_reset_tsf, 7861 .reset_tsf = iwl3945_mac_reset_tsf,
7862 .bss_info_changed = iwl3945_bss_info_changed,
7833 .hw_scan = iwl3945_mac_hw_scan 7863 .hw_scan = iwl3945_mac_hw_scan
7834}; 7864};
7835 7865
@@ -7868,6 +7898,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7868 SET_IEEE80211_DEV(hw, &pdev->dev); 7898 SET_IEEE80211_DEV(hw, &pdev->dev);
7869 7899
7870 hw->rate_control_algorithm = "iwl-3945-rs"; 7900 hw->rate_control_algorithm = "iwl-3945-rs";
7901 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
7871 7902
7872 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); 7903 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
7873 priv = hw->priv; 7904 priv = hw->priv;
@@ -7890,6 +7921,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7890 hw->flags = IEEE80211_HW_SIGNAL_DBM | 7921 hw->flags = IEEE80211_HW_SIGNAL_DBM |
7891 IEEE80211_HW_NOISE_DBM; 7922 IEEE80211_HW_NOISE_DBM;
7892 7923
7924 hw->wiphy->interface_modes =
7925 BIT(NL80211_IFTYPE_AP) |
7926 BIT(NL80211_IFTYPE_STATION) |
7927 BIT(NL80211_IFTYPE_ADHOC);
7928
7893 /* 4 EDCA QOS priorities */ 7929 /* 4 EDCA QOS priorities */
7894 hw->queues = 4; 7930 hw->queues = 4;
7895 7931
@@ -7951,7 +7987,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7951 IWL_DEBUG_INFO("Radio disabled.\n"); 7987 IWL_DEBUG_INFO("Radio disabled.\n");
7952 } 7988 }
7953 7989
7954 priv->iw_mode = IEEE80211_IF_TYPE_STA; 7990 priv->iw_mode = NL80211_IFTYPE_STATION;
7955 7991
7956 printk(KERN_INFO DRV_NAME 7992 printk(KERN_INFO DRV_NAME
7957 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name); 7993 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name);
@@ -8331,6 +8367,8 @@ static void __exit iwl3945_exit(void)
8331 iwl3945_rate_control_unregister(); 8367 iwl3945_rate_control_unregister();
8332} 8368}
8333 8369
8370MODULE_FIRMWARE("iwlwifi-3945" IWL3945_UCODE_API ".ucode");
8371
8334module_param_named(antenna, iwl3945_param_antenna, int, 0444); 8372module_param_named(antenna, iwl3945_param_antenna, int, 0444);
8335MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 8373MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
8336module_param_named(disable, iwl3945_param_disable, int, 0444); 8374module_param_named(disable, iwl3945_param_disable, int, 0444);
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index a267d6e65f03..92be60415d04 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -8,6 +8,7 @@
8#include "scan.h" 8#include "scan.h"
9#include "cmd.h" 9#include "cmd.h"
10 10
11static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp);
11 12
12static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) = 13static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) =
13 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 14 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -20,12 +21,88 @@ static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
20#define CAPINFO_MASK (~(0xda00)) 21#define CAPINFO_MASK (~(0xda00))
21 22
22 23
24/**
25 * @brief This function finds common rates between rates and card rates.
26 *
27 * It will fill common rates in rates as output if found.
28 *
29 * NOTE: Setting the MSB of the basic rates need to be taken
30 * care, either before or after calling this function
31 *
32 * @param priv A pointer to struct lbs_private structure
33 * @param rates the buffer which keeps input and output
34 * @param rates_size the size of rate1 buffer; new size of buffer on return
35 *
36 * @return 0 on success, or -1 on error
37 */
38static int get_common_rates(struct lbs_private *priv,
39 u8 *rates,
40 u16 *rates_size)
41{
42 u8 *card_rates = lbs_bg_rates;
43 size_t num_card_rates = sizeof(lbs_bg_rates);
44 int ret = 0, i, j;
45 u8 tmp[30];
46 size_t tmp_size = 0;
47
48 /* For each rate in card_rates that exists in rate1, copy to tmp */
49 for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
50 for (j = 0; rates[j] && (j < *rates_size); j++) {
51 if (rates[j] == card_rates[i])
52 tmp[tmp_size++] = card_rates[i];
53 }
54 }
55
56 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
57 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
58 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
59 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
60
61 if (!priv->enablehwauto) {
62 for (i = 0; i < tmp_size; i++) {
63 if (tmp[i] == priv->cur_rate)
64 goto done;
65 }
66 lbs_pr_alert("Previously set fixed data rate %#x isn't "
67 "compatible with the network.\n", priv->cur_rate);
68 ret = -1;
69 goto done;
70 }
71 ret = 0;
72
73done:
74 memset(rates, 0, *rates_size);
75 *rates_size = min_t(int, tmp_size, *rates_size);
76 memcpy(rates, tmp, *rates_size);
77 return ret;
78}
79
80
81/**
82 * @brief Sets the MSB on basic rates as the firmware requires
83 *
84 * Scan through an array and set the MSB for basic data rates.
85 *
86 * @param rates buffer of data rates
87 * @param len size of buffer
88 */
89static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
90{
91 int i;
92
93 for (i = 0; i < len; i++) {
94 if (rates[i] == 0x02 || rates[i] == 0x04 ||
95 rates[i] == 0x0b || rates[i] == 0x16)
96 rates[i] |= 0x80;
97 }
98}
99
23 100
24/** 101/**
25 * @brief Associate to a specific BSS discovered in a scan 102 * @brief Associate to a specific BSS discovered in a scan
26 * 103 *
27 * @param priv A pointer to struct lbs_private structure 104 * @param priv A pointer to struct lbs_private structure
28 * @param pbssdesc Pointer to the BSS descriptor to associate with. 105 * @param assoc_req The association request describing the BSS to associate with
29 * 106 *
30 * @return 0-success, otherwise fail 107 * @return 0-success, otherwise fail
31 */ 108 */
@@ -33,29 +110,29 @@ static int lbs_associate(struct lbs_private *priv,
33 struct assoc_request *assoc_req) 110 struct assoc_request *assoc_req)
34{ 111{
35 int ret; 112 int ret;
113 u8 preamble = RADIO_PREAMBLE_LONG;
36 114
37 lbs_deb_enter(LBS_DEB_ASSOC); 115 lbs_deb_enter(LBS_DEB_ASSOC);
38 116
39 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE, 117 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE,
40 0, CMD_OPTION_WAITFORRSP, 118 0, CMD_OPTION_WAITFORRSP,
41 0, assoc_req->bss.bssid); 119 0, assoc_req->bss.bssid);
42
43 if (ret) 120 if (ret)
44 goto done; 121 goto out;
45 122
46 /* set preamble to firmware */ 123 /* Use short preamble only when both the BSS and firmware support it */
47 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) && 124 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
48 (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) 125 (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
49 priv->preamble = CMD_TYPE_SHORT_PREAMBLE; 126 preamble = RADIO_PREAMBLE_SHORT;
50 else
51 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
52 127
53 lbs_set_radio_control(priv); 128 ret = lbs_set_radio(priv, preamble, 1);
129 if (ret)
130 goto out;
54 131
55 ret = lbs_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE, 132 ret = lbs_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE,
56 0, CMD_OPTION_WAITFORRSP, 0, assoc_req); 133 0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
57 134
58done: 135out:
59 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 136 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
60 return ret; 137 return ret;
61} 138}
@@ -64,17 +141,22 @@ done:
64 * @brief Join an adhoc network found in a previous scan 141 * @brief Join an adhoc network found in a previous scan
65 * 142 *
66 * @param priv A pointer to struct lbs_private structure 143 * @param priv A pointer to struct lbs_private structure
67 * @param pbssdesc Pointer to a BSS descriptor found in a previous scan 144 * @param assoc_req The association request describing the BSS to join
68 * to attempt to join
69 * 145 *
70 * @return 0--success, -1--fail 146 * @return 0 on success, error on failure
71 */ 147 */
72static int lbs_join_adhoc_network(struct lbs_private *priv, 148static int lbs_adhoc_join(struct lbs_private *priv,
73 struct assoc_request *assoc_req) 149 struct assoc_request *assoc_req)
74{ 150{
151 struct cmd_ds_802_11_ad_hoc_join cmd;
75 struct bss_descriptor *bss = &assoc_req->bss; 152 struct bss_descriptor *bss = &assoc_req->bss;
153 u8 preamble = RADIO_PREAMBLE_LONG;
154 DECLARE_MAC_BUF(mac);
155 u16 ratesize = 0;
76 int ret = 0; 156 int ret = 0;
77 157
158 lbs_deb_enter(LBS_DEB_ASSOC);
159
78 lbs_deb_join("current SSID '%s', ssid length %u\n", 160 lbs_deb_join("current SSID '%s', ssid length %u\n",
79 escape_essid(priv->curbssparams.ssid, 161 escape_essid(priv->curbssparams.ssid,
80 priv->curbssparams.ssid_len), 162 priv->curbssparams.ssid_len),
@@ -106,29 +188,106 @@ static int lbs_join_adhoc_network(struct lbs_private *priv,
106 goto out; 188 goto out;
107 } 189 }
108 190
109 /* Use shortpreamble only when both creator and card supports 191 /* Use short preamble only when both the BSS and firmware support it */
110 short preamble */ 192 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
111 if (!(bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) || 193 (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
112 !(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
113 lbs_deb_join("AdhocJoin: Long preamble\n");
114 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
115 } else {
116 lbs_deb_join("AdhocJoin: Short preamble\n"); 194 lbs_deb_join("AdhocJoin: Short preamble\n");
117 priv->preamble = CMD_TYPE_SHORT_PREAMBLE; 195 preamble = RADIO_PREAMBLE_SHORT;
118 } 196 }
119 197
120 lbs_set_radio_control(priv); 198 ret = lbs_set_radio(priv, preamble, 1);
199 if (ret)
200 goto out;
121 201
122 lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel); 202 lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel);
123 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band); 203 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
124 204
125 priv->adhoccreate = 0; 205 priv->adhoccreate = 0;
206 priv->curbssparams.channel = bss->channel;
126 207
127 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_JOIN, 208 /* Build the join command */
128 0, CMD_OPTION_WAITFORRSP, 209 memset(&cmd, 0, sizeof(cmd));
129 OID_802_11_SSID, assoc_req); 210 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
211
212 cmd.bss.type = CMD_BSS_TYPE_IBSS;
213 cmd.bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
214
215 memcpy(&cmd.bss.bssid, &bss->bssid, ETH_ALEN);
216 memcpy(&cmd.bss.ssid, &bss->ssid, bss->ssid_len);
217
218 memcpy(&cmd.bss.phyparamset, &bss->phyparamset,
219 sizeof(union ieeetypes_phyparamset));
220
221 memcpy(&cmd.bss.ssparamset, &bss->ssparamset,
222 sizeof(union IEEEtypes_ssparamset));
223
224 cmd.bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
225 lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
226 bss->capability, CAPINFO_MASK);
227
228 /* information on BSSID descriptor passed to FW */
229 lbs_deb_join("ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n",
230 print_mac(mac, cmd.bss.bssid), cmd.bss.ssid);
231
232 /* Only v8 and below support setting these */
233 if (priv->fwrelease < 0x09000000) {
234 /* failtimeout */
235 cmd.failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
236 /* probedelay */
237 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
238 }
239
240 /* Copy Data rates from the rates recorded in scan response */
241 memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
242 ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES);
243 memcpy(cmd.bss.rates, bss->rates, ratesize);
244 if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
245 lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");
246 ret = -1;
247 goto out;
248 }
249
250 /* Copy the ad-hoc creation rates into Current BSS state structure */
251 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
252 memcpy(&priv->curbssparams.rates, cmd.bss.rates, ratesize);
253
254 /* Set MSB on basic rates as the firmware requires, but _after_
255 * copying to current bss rates.
256 */
257 lbs_set_basic_rate_flags(cmd.bss.rates, ratesize);
258
259 cmd.bss.ssparamset.ibssparamset.atimwindow = cpu_to_le16(bss->atimwindow);
260
261 if (assoc_req->secinfo.wep_enabled) {
262 u16 tmp = le16_to_cpu(cmd.bss.capability);
263 tmp |= WLAN_CAPABILITY_PRIVACY;
264 cmd.bss.capability = cpu_to_le16(tmp);
265 }
266
267 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
268 __le32 local_ps_mode = cpu_to_le32(LBS802_11POWERMODECAM);
269
270 /* wake up first */
271 ret = lbs_prepare_and_send_command(priv, CMD_802_11_PS_MODE,
272 CMD_ACT_SET, 0, 0,
273 &local_ps_mode);
274 if (ret) {
275 ret = -1;
276 goto out;
277 }
278 }
279
280 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
281 ret = -1;
282 goto out;
283 }
284
285 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
286 if (ret == 0)
287 ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd);
130 288
131out: 289out:
290 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
132 return ret; 291 return ret;
133} 292}
134 293
@@ -136,39 +295,131 @@ out:
136 * @brief Start an Adhoc Network 295 * @brief Start an Adhoc Network
137 * 296 *
138 * @param priv A pointer to struct lbs_private structure 297 * @param priv A pointer to struct lbs_private structure
139 * @param adhocssid The ssid of the Adhoc Network 298 * @param assoc_req The association request describing the BSS to start
140 * @return 0--success, -1--fail 299 *
300 * @return 0 on success, error on failure
141 */ 301 */
142static int lbs_start_adhoc_network(struct lbs_private *priv, 302static int lbs_adhoc_start(struct lbs_private *priv,
143 struct assoc_request *assoc_req) 303 struct assoc_request *assoc_req)
144{ 304{
305 struct cmd_ds_802_11_ad_hoc_start cmd;
306 u8 preamble = RADIO_PREAMBLE_LONG;
307 size_t ratesize = 0;
308 u16 tmpcap = 0;
145 int ret = 0; 309 int ret = 0;
146 310
147 priv->adhoccreate = 1; 311 lbs_deb_enter(LBS_DEB_ASSOC);
148 312
149 if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) { 313 if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
150 lbs_deb_join("AdhocStart: Short preamble\n"); 314 lbs_deb_join("ADHOC_START: Will use short preamble\n");
151 priv->preamble = CMD_TYPE_SHORT_PREAMBLE; 315 preamble = RADIO_PREAMBLE_SHORT;
152 } else {
153 lbs_deb_join("AdhocStart: Long preamble\n");
154 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
155 } 316 }
156 317
157 lbs_set_radio_control(priv); 318 ret = lbs_set_radio(priv, preamble, 1);
319 if (ret)
320 goto out;
158 321
159 lbs_deb_join("AdhocStart: channel = %d\n", assoc_req->channel); 322 /* Build the start command */
160 lbs_deb_join("AdhocStart: band = %d\n", assoc_req->band); 323 memset(&cmd, 0, sizeof(cmd));
324 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
161 325
162 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_START, 326 memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len);
163 0, CMD_OPTION_WAITFORRSP, 0, assoc_req); 327
328 lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n",
329 escape_essid(assoc_req->ssid, assoc_req->ssid_len),
330 assoc_req->ssid_len);
331
332 cmd.bsstype = CMD_BSS_TYPE_IBSS;
333
334 if (priv->beacon_period == 0)
335 priv->beacon_period = MRVDRV_BEACON_INTERVAL;
336 cmd.beaconperiod = cpu_to_le16(priv->beacon_period);
337
338 WARN_ON(!assoc_req->channel);
339
340 /* set Physical parameter set */
341 cmd.phyparamset.dsparamset.elementid = MFIE_TYPE_DS_SET;
342 cmd.phyparamset.dsparamset.len = 1;
343 cmd.phyparamset.dsparamset.currentchan = assoc_req->channel;
344
345 /* set IBSS parameter set */
346 cmd.ssparamset.ibssparamset.elementid = MFIE_TYPE_IBSS_SET;
347 cmd.ssparamset.ibssparamset.len = 2;
348 cmd.ssparamset.ibssparamset.atimwindow = 0;
349
350 /* set capability info */
351 tmpcap = WLAN_CAPABILITY_IBSS;
352 if (assoc_req->secinfo.wep_enabled) {
353 lbs_deb_join("ADHOC_START: WEP enabled, setting privacy on\n");
354 tmpcap |= WLAN_CAPABILITY_PRIVACY;
355 } else
356 lbs_deb_join("ADHOC_START: WEP disabled, setting privacy off\n");
357
358 cmd.capability = cpu_to_le16(tmpcap);
359
360 /* Only v8 and below support setting probe delay */
361 if (priv->fwrelease < 0x09000000)
362 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
363
364 ratesize = min(sizeof(cmd.rates), sizeof(lbs_bg_rates));
365 memcpy(cmd.rates, lbs_bg_rates, ratesize);
366
367 /* Copy the ad-hoc creating rates into Current BSS state structure */
368 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
369 memcpy(&priv->curbssparams.rates, &cmd.rates, ratesize);
164 370
371 /* Set MSB on basic rates as the firmware requires, but _after_
372 * copying to current bss rates.
373 */
374 lbs_set_basic_rate_flags(cmd.rates, ratesize);
375
376 lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
377 cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
378
379 if (lbs_create_dnld_countryinfo_11d(priv)) {
380 lbs_deb_join("ADHOC_START: dnld_countryinfo_11d failed\n");
381 ret = -1;
382 goto out;
383 }
384
385 lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
386 assoc_req->channel, assoc_req->band);
387
388 priv->adhoccreate = 1;
389 priv->mode = IW_MODE_ADHOC;
390
391 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_START, &cmd);
392 if (ret == 0)
393 ret = lbs_adhoc_post(priv, (struct cmd_header *) &cmd);
394
395out:
396 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
165 return ret; 397 return ret;
166} 398}
167 399
168int lbs_stop_adhoc_network(struct lbs_private *priv) 400/**
401 * @brief Stop and Ad-Hoc network and exit Ad-Hoc mode
402 *
403 * @param priv A pointer to struct lbs_private structure
404 * @return 0 on success, or an error
405 */
406int lbs_adhoc_stop(struct lbs_private *priv)
169{ 407{
170 return lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_STOP, 408 struct cmd_ds_802_11_ad_hoc_stop cmd;
171 0, CMD_OPTION_WAITFORRSP, 0, NULL); 409 int ret;
410
411 lbs_deb_enter(LBS_DEB_JOIN);
412
413 memset(&cmd, 0, sizeof (cmd));
414 cmd.hdr.size = cpu_to_le16 (sizeof (cmd));
415
416 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
417
418 /* Clean up everything even if there was an error */
419 lbs_mac_event_disconnected(priv);
420
421 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
422 return ret;
172} 423}
173 424
174static inline int match_bss_no_security(struct lbs_802_11_security *secinfo, 425static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
@@ -480,14 +731,14 @@ static int assoc_helper_essid(struct lbs_private *priv,
480 if (bss != NULL) { 731 if (bss != NULL) {
481 lbs_deb_assoc("SSID found, will join\n"); 732 lbs_deb_assoc("SSID found, will join\n");
482 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor)); 733 memcpy(&assoc_req->bss, bss, sizeof(struct bss_descriptor));
483 lbs_join_adhoc_network(priv, assoc_req); 734 lbs_adhoc_join(priv, assoc_req);
484 } else { 735 } else {
485 /* else send START command */ 736 /* else send START command */
486 lbs_deb_assoc("SSID not found, creating adhoc network\n"); 737 lbs_deb_assoc("SSID not found, creating adhoc network\n");
487 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid, 738 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
488 IW_ESSID_MAX_SIZE); 739 IW_ESSID_MAX_SIZE);
489 assoc_req->bss.ssid_len = assoc_req->ssid_len; 740 assoc_req->bss.ssid_len = assoc_req->ssid_len;
490 lbs_start_adhoc_network(priv, assoc_req); 741 lbs_adhoc_start(priv, assoc_req);
491 } 742 }
492 } 743 }
493 744
@@ -520,7 +771,7 @@ static int assoc_helper_bssid(struct lbs_private *priv,
520 ret = lbs_associate(priv, assoc_req); 771 ret = lbs_associate(priv, assoc_req);
521 lbs_deb_assoc("ASSOC: lbs_associate(bssid) returned %d\n", ret); 772 lbs_deb_assoc("ASSOC: lbs_associate(bssid) returned %d\n", ret);
522 } else if (assoc_req->mode == IW_MODE_ADHOC) { 773 } else if (assoc_req->mode == IW_MODE_ADHOC) {
523 lbs_join_adhoc_network(priv, assoc_req); 774 lbs_adhoc_join(priv, assoc_req);
524 } 775 }
525 776
526out: 777out:
@@ -572,11 +823,7 @@ static int assoc_helper_mode(struct lbs_private *priv,
572 } 823 }
573 824
574 priv->mode = assoc_req->mode; 825 priv->mode = assoc_req->mode;
575 ret = lbs_prepare_and_send_command(priv, 826 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, assoc_req->mode);
576 CMD_802_11_SNMP_MIB,
577 0, CMD_OPTION_WAITFORRSP,
578 OID_802_11_INFRASTRUCTURE_MODE,
579 /* Shoot me now */ (void *) (size_t) assoc_req->mode);
580 827
581done: 828done:
582 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 829 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1029,7 +1276,9 @@ void lbs_association_worker(struct work_struct *work)
1029 */ 1276 */
1030 if (priv->mode == IW_MODE_INFRA) { 1277 if (priv->mode == IW_MODE_INFRA) {
1031 if (should_deauth_infrastructure(priv, assoc_req)) { 1278 if (should_deauth_infrastructure(priv, assoc_req)) {
1032 ret = lbs_send_deauthentication(priv); 1279 ret = lbs_cmd_80211_deauthenticate(priv,
1280 priv->curbssparams.bssid,
1281 WLAN_REASON_DEAUTH_LEAVING);
1033 if (ret) { 1282 if (ret) {
1034 lbs_deb_assoc("Deauthentication due to new " 1283 lbs_deb_assoc("Deauthentication due to new "
1035 "configuration request failed: %d\n", 1284 "configuration request failed: %d\n",
@@ -1038,7 +1287,7 @@ void lbs_association_worker(struct work_struct *work)
1038 } 1287 }
1039 } else if (priv->mode == IW_MODE_ADHOC) { 1288 } else if (priv->mode == IW_MODE_ADHOC) {
1040 if (should_stop_adhoc(priv, assoc_req)) { 1289 if (should_stop_adhoc(priv, assoc_req)) {
1041 ret = lbs_stop_adhoc_network(priv); 1290 ret = lbs_adhoc_stop(priv);
1042 if (ret) { 1291 if (ret) {
1043 lbs_deb_assoc("Teardown of AdHoc network due to " 1292 lbs_deb_assoc("Teardown of AdHoc network due to "
1044 "new configuration request failed: %d\n", 1293 "new configuration request failed: %d\n",
@@ -1214,94 +1463,6 @@ struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
1214 1463
1215 1464
1216/** 1465/**
1217 * @brief This function finds common rates between rate1 and card rates.
1218 *
1219 * It will fill common rates in rate1 as output if found.
1220 *
1221 * NOTE: Setting the MSB of the basic rates need to be taken
1222 * care, either before or after calling this function
1223 *
1224 * @param priv A pointer to struct lbs_private structure
1225 * @param rate1 the buffer which keeps input and output
1226 * @param rate1_size the size of rate1 buffer; new size of buffer on return
1227 *
1228 * @return 0 or -1
1229 */
1230static int get_common_rates(struct lbs_private *priv,
1231 u8 *rates,
1232 u16 *rates_size)
1233{
1234 u8 *card_rates = lbs_bg_rates;
1235 size_t num_card_rates = sizeof(lbs_bg_rates);
1236 int ret = 0, i, j;
1237 u8 tmp[30];
1238 size_t tmp_size = 0;
1239
1240 /* For each rate in card_rates that exists in rate1, copy to tmp */
1241 for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
1242 for (j = 0; rates[j] && (j < *rates_size); j++) {
1243 if (rates[j] == card_rates[i])
1244 tmp[tmp_size++] = card_rates[i];
1245 }
1246 }
1247
1248 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
1249 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
1250 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
1251 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
1252
1253 if (!priv->enablehwauto) {
1254 for (i = 0; i < tmp_size; i++) {
1255 if (tmp[i] == priv->cur_rate)
1256 goto done;
1257 }
1258 lbs_pr_alert("Previously set fixed data rate %#x isn't "
1259 "compatible with the network.\n", priv->cur_rate);
1260 ret = -1;
1261 goto done;
1262 }
1263 ret = 0;
1264
1265done:
1266 memset(rates, 0, *rates_size);
1267 *rates_size = min_t(int, tmp_size, *rates_size);
1268 memcpy(rates, tmp, *rates_size);
1269 return ret;
1270}
1271
1272
1273/**
1274 * @brief Sets the MSB on basic rates as the firmware requires
1275 *
1276 * Scan through an array and set the MSB for basic data rates.
1277 *
1278 * @param rates buffer of data rates
1279 * @param len size of buffer
1280 */
1281static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
1282{
1283 int i;
1284
1285 for (i = 0; i < len; i++) {
1286 if (rates[i] == 0x02 || rates[i] == 0x04 ||
1287 rates[i] == 0x0b || rates[i] == 0x16)
1288 rates[i] |= 0x80;
1289 }
1290}
1291
1292/**
1293 * @brief Send Deauthentication Request
1294 *
1295 * @param priv A pointer to struct lbs_private structure
1296 * @return 0--success, -1--fail
1297 */
1298int lbs_send_deauthentication(struct lbs_private *priv)
1299{
1300 return lbs_prepare_and_send_command(priv, CMD_802_11_DEAUTHENTICATE,
1301 0, CMD_OPTION_WAITFORRSP, 0, NULL);
1302}
1303
1304/**
1305 * @brief This function prepares command of authenticate. 1466 * @brief This function prepares command of authenticate.
1306 * 1467 *
1307 * @param priv A pointer to struct lbs_private structure 1468 * @param priv A pointer to struct lbs_private structure
@@ -1353,26 +1514,37 @@ out:
1353 return ret; 1514 return ret;
1354} 1515}
1355 1516
1356int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, 1517/**
1357 struct cmd_ds_command *cmd) 1518 * @brief Deauthenticate from a specific BSS
1519 *
1520 * @param priv A pointer to struct lbs_private structure
1521 * @param bssid The specific BSS to deauthenticate from
1522 * @param reason The 802.11 sec. 7.3.1.7 Reason Code for deauthenticating
1523 *
1524 * @return 0 on success, error on failure
1525 */
1526int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, u8 bssid[ETH_ALEN],
1527 u16 reason)
1358{ 1528{
1359 struct cmd_ds_802_11_deauthenticate *dauth = &cmd->params.deauth; 1529 struct cmd_ds_802_11_deauthenticate cmd;
1530 int ret;
1360 1531
1361 lbs_deb_enter(LBS_DEB_JOIN); 1532 lbs_deb_enter(LBS_DEB_JOIN);
1362 1533
1363 cmd->command = cpu_to_le16(CMD_802_11_DEAUTHENTICATE); 1534 memset(&cmd, 0, sizeof(cmd));
1364 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_deauthenticate) + 1535 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1365 S_DS_GEN); 1536 memcpy(cmd.macaddr, &bssid[0], ETH_ALEN);
1537 cmd.reasoncode = cpu_to_le16(reason);
1366 1538
1367 /* set AP MAC address */ 1539 ret = lbs_cmd_with_response(priv, CMD_802_11_DEAUTHENTICATE, &cmd);
1368 memmove(dauth->macaddr, priv->curbssparams.bssid, ETH_ALEN);
1369 1540
1370 /* Reason code 3 = Station is leaving */ 1541 /* Clean up everything even if there was an error; can't assume that
1371#define REASON_CODE_STA_LEAVING 3 1542 * we're still authenticated to the AP after trying to deauth.
1372 dauth->reasoncode = cpu_to_le16(REASON_CODE_STA_LEAVING); 1543 */
1544 lbs_mac_event_disconnected(priv);
1373 1545
1374 lbs_deb_leave(LBS_DEB_JOIN); 1546 lbs_deb_leave(LBS_DEB_JOIN);
1375 return 0; 1547 return ret;
1376} 1548}
1377 1549
1378int lbs_cmd_80211_associate(struct lbs_private *priv, 1550int lbs_cmd_80211_associate(struct lbs_private *priv,
@@ -1489,231 +1661,6 @@ done:
1489 return ret; 1661 return ret;
1490} 1662}
1491 1663
1492int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
1493 struct cmd_ds_command *cmd, void *pdata_buf)
1494{
1495 struct cmd_ds_802_11_ad_hoc_start *adhs = &cmd->params.ads;
1496 int ret = 0;
1497 int cmdappendsize = 0;
1498 struct assoc_request *assoc_req = pdata_buf;
1499 u16 tmpcap = 0;
1500 size_t ratesize = 0;
1501
1502 lbs_deb_enter(LBS_DEB_JOIN);
1503
1504 if (!priv) {
1505 ret = -1;
1506 goto done;
1507 }
1508
1509 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_START);
1510
1511 /*
1512 * Fill in the parameters for 2 data structures:
1513 * 1. cmd_ds_802_11_ad_hoc_start command
1514 * 2. priv->scantable[i]
1515 *
1516 * Driver will fill up SSID, bsstype,IBSS param, Physical Param,
1517 * probe delay, and cap info.
1518 *
1519 * Firmware will fill up beacon period, DTIM, Basic rates
1520 * and operational rates.
1521 */
1522
1523 memset(adhs->ssid, 0, IW_ESSID_MAX_SIZE);
1524 memcpy(adhs->ssid, assoc_req->ssid, assoc_req->ssid_len);
1525
1526 lbs_deb_join("ADHOC_S_CMD: SSID '%s', ssid length %u\n",
1527 escape_essid(assoc_req->ssid, assoc_req->ssid_len),
1528 assoc_req->ssid_len);
1529
1530 /* set the BSS type */
1531 adhs->bsstype = CMD_BSS_TYPE_IBSS;
1532 priv->mode = IW_MODE_ADHOC;
1533 if (priv->beacon_period == 0)
1534 priv->beacon_period = MRVDRV_BEACON_INTERVAL;
1535 adhs->beaconperiod = cpu_to_le16(priv->beacon_period);
1536
1537 /* set Physical param set */
1538#define DS_PARA_IE_ID 3
1539#define DS_PARA_IE_LEN 1
1540
1541 adhs->phyparamset.dsparamset.elementid = DS_PARA_IE_ID;
1542 adhs->phyparamset.dsparamset.len = DS_PARA_IE_LEN;
1543
1544 WARN_ON(!assoc_req->channel);
1545
1546 lbs_deb_join("ADHOC_S_CMD: Creating ADHOC on channel %d\n",
1547 assoc_req->channel);
1548
1549 adhs->phyparamset.dsparamset.currentchan = assoc_req->channel;
1550
1551 /* set IBSS param set */
1552#define IBSS_PARA_IE_ID 6
1553#define IBSS_PARA_IE_LEN 2
1554
1555 adhs->ssparamset.ibssparamset.elementid = IBSS_PARA_IE_ID;
1556 adhs->ssparamset.ibssparamset.len = IBSS_PARA_IE_LEN;
1557 adhs->ssparamset.ibssparamset.atimwindow = 0;
1558
1559 /* set capability info */
1560 tmpcap = WLAN_CAPABILITY_IBSS;
1561 if (assoc_req->secinfo.wep_enabled) {
1562 lbs_deb_join("ADHOC_S_CMD: WEP enabled, "
1563 "setting privacy on\n");
1564 tmpcap |= WLAN_CAPABILITY_PRIVACY;
1565 } else {
1566 lbs_deb_join("ADHOC_S_CMD: WEP disabled, "
1567 "setting privacy off\n");
1568 }
1569 adhs->capability = cpu_to_le16(tmpcap);
1570
1571 /* probedelay */
1572 adhs->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1573
1574 memset(adhs->rates, 0, sizeof(adhs->rates));
1575 ratesize = min(sizeof(adhs->rates), sizeof(lbs_bg_rates));
1576 memcpy(adhs->rates, lbs_bg_rates, ratesize);
1577
1578 /* Copy the ad-hoc creating rates into Current BSS state structure */
1579 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1580 memcpy(&priv->curbssparams.rates, &adhs->rates, ratesize);
1581
1582 /* Set MSB on basic rates as the firmware requires, but _after_
1583 * copying to current bss rates.
1584 */
1585 lbs_set_basic_rate_flags(adhs->rates, ratesize);
1586
1587 lbs_deb_join("ADHOC_S_CMD: rates=%02x %02x %02x %02x \n",
1588 adhs->rates[0], adhs->rates[1], adhs->rates[2], adhs->rates[3]);
1589
1590 lbs_deb_join("ADHOC_S_CMD: AD HOC Start command is ready\n");
1591
1592 if (lbs_create_dnld_countryinfo_11d(priv)) {
1593 lbs_deb_join("ADHOC_S_CMD: dnld_countryinfo_11d failed\n");
1594 ret = -1;
1595 goto done;
1596 }
1597
1598 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_start) +
1599 S_DS_GEN + cmdappendsize);
1600
1601 ret = 0;
1602done:
1603 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1604 return ret;
1605}
1606
1607int lbs_cmd_80211_ad_hoc_stop(struct cmd_ds_command *cmd)
1608{
1609 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_STOP);
1610 cmd->size = cpu_to_le16(S_DS_GEN);
1611
1612 return 0;
1613}
1614
1615int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv,
1616 struct cmd_ds_command *cmd, void *pdata_buf)
1617{
1618 struct cmd_ds_802_11_ad_hoc_join *join_cmd = &cmd->params.adj;
1619 struct assoc_request *assoc_req = pdata_buf;
1620 struct bss_descriptor *bss = &assoc_req->bss;
1621 int cmdappendsize = 0;
1622 int ret = 0;
1623 u16 ratesize = 0;
1624 DECLARE_MAC_BUF(mac);
1625
1626 lbs_deb_enter(LBS_DEB_JOIN);
1627
1628 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_JOIN);
1629
1630 join_cmd->bss.type = CMD_BSS_TYPE_IBSS;
1631 join_cmd->bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
1632
1633 memcpy(&join_cmd->bss.bssid, &bss->bssid, ETH_ALEN);
1634 memcpy(&join_cmd->bss.ssid, &bss->ssid, bss->ssid_len);
1635
1636 memcpy(&join_cmd->bss.phyparamset, &bss->phyparamset,
1637 sizeof(union ieeetypes_phyparamset));
1638
1639 memcpy(&join_cmd->bss.ssparamset, &bss->ssparamset,
1640 sizeof(union IEEEtypes_ssparamset));
1641
1642 join_cmd->bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
1643 lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
1644 bss->capability, CAPINFO_MASK);
1645
1646 /* information on BSSID descriptor passed to FW */
1647 lbs_deb_join(
1648 "ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n",
1649 print_mac(mac, join_cmd->bss.bssid),
1650 join_cmd->bss.ssid);
1651
1652 /* failtimeout */
1653 join_cmd->failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
1654
1655 /* probedelay */
1656 join_cmd->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1657
1658 priv->curbssparams.channel = bss->channel;
1659
1660 /* Copy Data rates from the rates recorded in scan response */
1661 memset(join_cmd->bss.rates, 0, sizeof(join_cmd->bss.rates));
1662 ratesize = min_t(u16, sizeof(join_cmd->bss.rates), MAX_RATES);
1663 memcpy(join_cmd->bss.rates, bss->rates, ratesize);
1664 if (get_common_rates(priv, join_cmd->bss.rates, &ratesize)) {
1665 lbs_deb_join("ADHOC_J_CMD: get_common_rates returns error.\n");
1666 ret = -1;
1667 goto done;
1668 }
1669
1670 /* Copy the ad-hoc creating rates into Current BSS state structure */
1671 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1672 memcpy(&priv->curbssparams.rates, join_cmd->bss.rates, ratesize);
1673
1674 /* Set MSB on basic rates as the firmware requires, but _after_
1675 * copying to current bss rates.
1676 */
1677 lbs_set_basic_rate_flags(join_cmd->bss.rates, ratesize);
1678
1679 join_cmd->bss.ssparamset.ibssparamset.atimwindow =
1680 cpu_to_le16(bss->atimwindow);
1681
1682 if (assoc_req->secinfo.wep_enabled) {
1683 u16 tmp = le16_to_cpu(join_cmd->bss.capability);
1684 tmp |= WLAN_CAPABILITY_PRIVACY;
1685 join_cmd->bss.capability = cpu_to_le16(tmp);
1686 }
1687
1688 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
1689 /* wake up first */
1690 __le32 Localpsmode;
1691
1692 Localpsmode = cpu_to_le32(LBS802_11POWERMODECAM);
1693 ret = lbs_prepare_and_send_command(priv,
1694 CMD_802_11_PS_MODE,
1695 CMD_ACT_SET,
1696 0, 0, &Localpsmode);
1697
1698 if (ret) {
1699 ret = -1;
1700 goto done;
1701 }
1702 }
1703
1704 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
1705 ret = -1;
1706 goto done;
1707 }
1708
1709 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_join) +
1710 S_DS_GEN + cmdappendsize);
1711
1712done:
1713 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1714 return ret;
1715}
1716
1717int lbs_ret_80211_associate(struct lbs_private *priv, 1664int lbs_ret_80211_associate(struct lbs_private *priv,
1718 struct cmd_ds_command *resp) 1665 struct cmd_ds_command *resp)
1719{ 1666{
@@ -1815,34 +1762,19 @@ done:
1815 return ret; 1762 return ret;
1816} 1763}
1817 1764
1818int lbs_ret_80211_disassociate(struct lbs_private *priv) 1765static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp)
1819{
1820 lbs_deb_enter(LBS_DEB_JOIN);
1821
1822 lbs_mac_event_disconnected(priv);
1823
1824 lbs_deb_leave(LBS_DEB_JOIN);
1825 return 0;
1826}
1827
1828int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
1829 struct cmd_ds_command *resp)
1830{ 1766{
1831 int ret = 0; 1767 int ret = 0;
1832 u16 command = le16_to_cpu(resp->command); 1768 u16 command = le16_to_cpu(resp->command);
1833 u16 result = le16_to_cpu(resp->result); 1769 u16 result = le16_to_cpu(resp->result);
1834 struct cmd_ds_802_11_ad_hoc_result *padhocresult; 1770 struct cmd_ds_802_11_ad_hoc_result *adhoc_resp;
1835 union iwreq_data wrqu; 1771 union iwreq_data wrqu;
1836 struct bss_descriptor *bss; 1772 struct bss_descriptor *bss;
1837 DECLARE_MAC_BUF(mac); 1773 DECLARE_MAC_BUF(mac);
1838 1774
1839 lbs_deb_enter(LBS_DEB_JOIN); 1775 lbs_deb_enter(LBS_DEB_JOIN);
1840 1776
1841 padhocresult = &resp->params.result; 1777 adhoc_resp = (struct cmd_ds_802_11_ad_hoc_result *) resp;
1842
1843 lbs_deb_join("ADHOC_RESP: size = %d\n", le16_to_cpu(resp->size));
1844 lbs_deb_join("ADHOC_RESP: command = %x\n", command);
1845 lbs_deb_join("ADHOC_RESP: result = %x\n", result);
1846 1778
1847 if (!priv->in_progress_assoc_req) { 1779 if (!priv->in_progress_assoc_req) {
1848 lbs_deb_join("ADHOC_RESP: no in-progress association " 1780 lbs_deb_join("ADHOC_RESP: no in-progress association "
@@ -1856,26 +1788,19 @@ int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
1856 * Join result code 0 --> SUCCESS 1788 * Join result code 0 --> SUCCESS
1857 */ 1789 */
1858 if (result) { 1790 if (result) {
1859 lbs_deb_join("ADHOC_RESP: failed\n"); 1791 lbs_deb_join("ADHOC_RESP: failed (result 0x%X)\n", result);
1860 if (priv->connect_status == LBS_CONNECTED) 1792 if (priv->connect_status == LBS_CONNECTED)
1861 lbs_mac_event_disconnected(priv); 1793 lbs_mac_event_disconnected(priv);
1862 ret = -1; 1794 ret = -1;
1863 goto done; 1795 goto done;
1864 } 1796 }
1865 1797
1866 /*
1867 * Now the join cmd should be successful
1868 * If BSSID has changed use SSID to compare instead of BSSID
1869 */
1870 lbs_deb_join("ADHOC_RESP: associated to '%s'\n",
1871 escape_essid(bss->ssid, bss->ssid_len));
1872
1873 /* Send a Media Connected event, according to the Spec */ 1798 /* Send a Media Connected event, according to the Spec */
1874 priv->connect_status = LBS_CONNECTED; 1799 priv->connect_status = LBS_CONNECTED;
1875 1800
1876 if (command == CMD_RET(CMD_802_11_AD_HOC_START)) { 1801 if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
1877 /* Update the created network descriptor with the new BSSID */ 1802 /* Update the created network descriptor with the new BSSID */
1878 memcpy(bss->bssid, padhocresult->bssid, ETH_ALEN); 1803 memcpy(bss->bssid, adhoc_resp->bssid, ETH_ALEN);
1879 } 1804 }
1880 1805
1881 /* Set the BSSID from the joined/started descriptor */ 1806 /* Set the BSSID from the joined/started descriptor */
@@ -1894,22 +1819,13 @@ int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
1894 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1819 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1895 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 1820 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
1896 1821
1897 lbs_deb_join("ADHOC_RESP: - Joined/Started Ad Hoc\n"); 1822 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %s, channel %d\n",
1898 lbs_deb_join("ADHOC_RESP: channel = %d\n", priv->curbssparams.channel); 1823 escape_essid(bss->ssid, bss->ssid_len),
1899 lbs_deb_join("ADHOC_RESP: BSSID = %s\n", 1824 print_mac(mac, priv->curbssparams.bssid),
1900 print_mac(mac, padhocresult->bssid)); 1825 priv->curbssparams.channel);
1901 1826
1902done: 1827done:
1903 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); 1828 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1904 return ret; 1829 return ret;
1905} 1830}
1906 1831
1907int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv)
1908{
1909 lbs_deb_enter(LBS_DEB_JOIN);
1910
1911 lbs_mac_event_disconnected(priv);
1912
1913 lbs_deb_leave(LBS_DEB_JOIN);
1914 return 0;
1915}
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index c516fbe518fd..8b7336dd02a3 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -12,28 +12,18 @@ struct cmd_ds_command;
12int lbs_cmd_80211_authenticate(struct lbs_private *priv, 12int lbs_cmd_80211_authenticate(struct lbs_private *priv,
13 struct cmd_ds_command *cmd, 13 struct cmd_ds_command *cmd,
14 void *pdata_buf); 14 void *pdata_buf);
15int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv, 15
16 struct cmd_ds_command *cmd, 16int lbs_adhoc_stop(struct lbs_private *priv);
17 void *pdata_buf); 17
18int lbs_cmd_80211_ad_hoc_stop(struct cmd_ds_command *cmd);
19int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
20 struct cmd_ds_command *cmd,
21 void *pdata_buf);
22int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, 18int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
23 struct cmd_ds_command *cmd); 19 u8 bssid[ETH_ALEN], u16 reason);
24int lbs_cmd_80211_associate(struct lbs_private *priv, 20int lbs_cmd_80211_associate(struct lbs_private *priv,
25 struct cmd_ds_command *cmd, 21 struct cmd_ds_command *cmd,
26 void *pdata_buf); 22 void *pdata_buf);
27 23
28int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv, 24int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
29 struct cmd_ds_command *resp); 25 struct cmd_ds_command *resp);
30int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv);
31int lbs_ret_80211_disassociate(struct lbs_private *priv);
32int lbs_ret_80211_associate(struct lbs_private *priv, 26int lbs_ret_80211_associate(struct lbs_private *priv,
33 struct cmd_ds_command *resp); 27 struct cmd_ds_command *resp);
34 28
35int lbs_stop_adhoc_network(struct lbs_private *priv);
36
37int lbs_send_deauthentication(struct lbs_private *priv);
38
39#endif /* _LBS_ASSOC_H */ 29#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 75427e61898d..a912fb68c099 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -480,181 +480,166 @@ int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
480 return ret; 480 return ret;
481} 481}
482 482
483static int lbs_cmd_802_11_reset(struct cmd_ds_command *cmd, int cmd_action) 483/**
484{ 484 * @brief Set an SNMP MIB value
485 struct cmd_ds_802_11_reset *reset = &cmd->params.reset; 485 *
486 486 * @param priv A pointer to struct lbs_private structure
487 lbs_deb_enter(LBS_DEB_CMD); 487 * @param oid The OID to set in the firmware
488 488 * @param val Value to set the OID to
489 cmd->command = cpu_to_le16(CMD_802_11_RESET); 489 *
490 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset) + S_DS_GEN); 490 * @return 0 on success, error on failure
491 reset->action = cpu_to_le16(cmd_action); 491 */
492 492int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val)
493 lbs_deb_leave(LBS_DEB_CMD);
494 return 0;
495}
496
497static int lbs_cmd_802_11_snmp_mib(struct lbs_private *priv,
498 struct cmd_ds_command *cmd,
499 int cmd_action,
500 int cmd_oid, void *pdata_buf)
501{ 493{
502 struct cmd_ds_802_11_snmp_mib *pSNMPMIB = &cmd->params.smib; 494 struct cmd_ds_802_11_snmp_mib cmd;
503 u8 ucTemp; 495 int ret;
504 496
505 lbs_deb_enter(LBS_DEB_CMD); 497 lbs_deb_enter(LBS_DEB_CMD);
506 498
507 lbs_deb_cmd("SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid); 499 memset(&cmd, 0, sizeof (cmd));
508 500 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
509 cmd->command = cpu_to_le16(CMD_802_11_SNMP_MIB); 501 cmd.action = cpu_to_le16(CMD_ACT_SET);
510 cmd->size = cpu_to_le16(sizeof(*pSNMPMIB) + S_DS_GEN); 502 cmd.oid = cpu_to_le16((u16) oid);
511
512 switch (cmd_oid) {
513 case OID_802_11_INFRASTRUCTURE_MODE:
514 {
515 u8 mode = (u8) (size_t) pdata_buf;
516 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
517 pSNMPMIB->oid = cpu_to_le16((u16) DESIRED_BSSTYPE_I);
518 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u8));
519 if (mode == IW_MODE_ADHOC) {
520 ucTemp = SNMP_MIB_VALUE_ADHOC;
521 } else {
522 /* Infra and Auto modes */
523 ucTemp = SNMP_MIB_VALUE_INFRA;
524 }
525
526 memmove(pSNMPMIB->value, &ucTemp, sizeof(u8));
527 503
504 switch (oid) {
505 case SNMP_MIB_OID_BSS_TYPE:
506 cmd.bufsize = cpu_to_le16(sizeof(u8));
507 cmd.value[0] = (val == IW_MODE_ADHOC) ? 2 : 1;
528 break; 508 break;
509 case SNMP_MIB_OID_11D_ENABLE:
510 case SNMP_MIB_OID_FRAG_THRESHOLD:
511 case SNMP_MIB_OID_RTS_THRESHOLD:
512 case SNMP_MIB_OID_SHORT_RETRY_LIMIT:
513 case SNMP_MIB_OID_LONG_RETRY_LIMIT:
514 cmd.bufsize = cpu_to_le16(sizeof(u16));
515 *((__le16 *)(&cmd.value)) = cpu_to_le16(val);
516 break;
517 default:
518 lbs_deb_cmd("SNMP_CMD: (set) unhandled OID 0x%x\n", oid);
519 ret = -EINVAL;
520 goto out;
529 } 521 }
530 522
531 case OID_802_11D_ENABLE: 523 lbs_deb_cmd("SNMP_CMD: (set) oid 0x%x, oid size 0x%x, value 0x%x\n",
532 { 524 le16_to_cpu(cmd.oid), le16_to_cpu(cmd.bufsize), val);
533 u32 ulTemp;
534
535 pSNMPMIB->oid = cpu_to_le16((u16) DOT11D_I);
536
537 if (cmd_action == CMD_ACT_SET) {
538 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
539 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
540 ulTemp = *(u32 *)pdata_buf;
541 *((__le16 *)(pSNMPMIB->value)) =
542 cpu_to_le16((u16) ulTemp);
543 }
544 break;
545 }
546
547 case OID_802_11_FRAGMENTATION_THRESHOLD:
548 {
549 u32 ulTemp;
550
551 pSNMPMIB->oid = cpu_to_le16((u16) FRAGTHRESH_I);
552
553 if (cmd_action == CMD_ACT_GET) {
554 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET);
555 } else if (cmd_action == CMD_ACT_SET) {
556 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
557 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
558 ulTemp = *((u32 *) pdata_buf);
559 *((__le16 *)(pSNMPMIB->value)) =
560 cpu_to_le16((u16) ulTemp);
561 525
562 } 526 ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd);
563 527
564 break; 528out:
565 } 529 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
530 return ret;
531}
566 532
567 case OID_802_11_RTS_THRESHOLD: 533/**
568 { 534 * @brief Get an SNMP MIB value
535 *
536 * @param priv A pointer to struct lbs_private structure
537 * @param oid The OID to retrieve from the firmware
538 * @param out_val Location for the returned value
539 *
540 * @return 0 on success, error on failure
541 */
542int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
543{
544 struct cmd_ds_802_11_snmp_mib cmd;
545 int ret;
569 546
570 u32 ulTemp; 547 lbs_deb_enter(LBS_DEB_CMD);
571 pSNMPMIB->oid = cpu_to_le16(RTSTHRESH_I);
572 548
573 if (cmd_action == CMD_ACT_GET) { 549 memset(&cmd, 0, sizeof (cmd));
574 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET); 550 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
575 } else if (cmd_action == CMD_ACT_SET) { 551 cmd.action = cpu_to_le16(CMD_ACT_GET);
576 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET); 552 cmd.oid = cpu_to_le16(oid);
577 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
578 ulTemp = *((u32 *)pdata_buf);
579 *(__le16 *)(pSNMPMIB->value) =
580 cpu_to_le16((u16) ulTemp);
581 553
582 } 554 ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd);
583 break; 555 if (ret)
584 } 556 goto out;
585 case OID_802_11_TX_RETRYCOUNT:
586 pSNMPMIB->oid = cpu_to_le16((u16) SHORT_RETRYLIM_I);
587
588 if (cmd_action == CMD_ACT_GET) {
589 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_GET);
590 } else if (cmd_action == CMD_ACT_SET) {
591 pSNMPMIB->querytype = cpu_to_le16(CMD_ACT_SET);
592 pSNMPMIB->bufsize = cpu_to_le16(sizeof(u16));
593 *((__le16 *)(pSNMPMIB->value)) =
594 cpu_to_le16((u16) priv->txretrycount);
595 }
596 557
558 switch (le16_to_cpu(cmd.bufsize)) {
559 case sizeof(u8):
560 if (oid == SNMP_MIB_OID_BSS_TYPE) {
561 if (cmd.value[0] == 2)
562 *out_val = IW_MODE_ADHOC;
563 else
564 *out_val = IW_MODE_INFRA;
565 } else
566 *out_val = cmd.value[0];
567 break;
568 case sizeof(u16):
569 *out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
597 break; 570 break;
598 default: 571 default:
572 lbs_deb_cmd("SNMP_CMD: (get) unhandled OID 0x%x size %d\n",
573 oid, le16_to_cpu(cmd.bufsize));
599 break; 574 break;
600 } 575 }
601 576
602 lbs_deb_cmd( 577out:
603 "SNMP_CMD: command=0x%x, size=0x%x, seqnum=0x%x, result=0x%x\n", 578 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
604 le16_to_cpu(cmd->command), le16_to_cpu(cmd->size), 579 return ret;
605 le16_to_cpu(cmd->seqnum), le16_to_cpu(cmd->result));
606
607 lbs_deb_cmd(
608 "SNMP_CMD: action 0x%x, oid 0x%x, oidsize 0x%x, value 0x%x\n",
609 le16_to_cpu(pSNMPMIB->querytype), le16_to_cpu(pSNMPMIB->oid),
610 le16_to_cpu(pSNMPMIB->bufsize),
611 le16_to_cpu(*(__le16 *) pSNMPMIB->value));
612
613 lbs_deb_leave(LBS_DEB_CMD);
614 return 0;
615} 580}
616 581
617static int lbs_cmd_802_11_rf_tx_power(struct cmd_ds_command *cmd, 582/**
618 u16 cmd_action, void *pdata_buf) 583 * @brief Get the min, max, and current TX power
584 *
585 * @param priv A pointer to struct lbs_private structure
586 * @param curlevel Current power level in dBm
587 * @param minlevel Minimum supported power level in dBm (optional)
588 * @param maxlevel Maximum supported power level in dBm (optional)
589 *
590 * @return 0 on success, error on failure
591 */
592int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
593 s16 *maxlevel)
619{ 594{
620 595 struct cmd_ds_802_11_rf_tx_power cmd;
621 struct cmd_ds_802_11_rf_tx_power *prtp = &cmd->params.txp; 596 int ret;
622 597
623 lbs_deb_enter(LBS_DEB_CMD); 598 lbs_deb_enter(LBS_DEB_CMD);
624 599
625 cmd->size = 600 memset(&cmd, 0, sizeof(cmd));
626 cpu_to_le16((sizeof(struct cmd_ds_802_11_rf_tx_power)) + S_DS_GEN); 601 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
627 cmd->command = cpu_to_le16(CMD_802_11_RF_TX_POWER); 602 cmd.action = cpu_to_le16(CMD_ACT_GET);
628 prtp->action = cpu_to_le16(cmd_action); 603
604 ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd);
605 if (ret == 0) {
606 *curlevel = le16_to_cpu(cmd.curlevel);
607 if (minlevel)
608 *minlevel = le16_to_cpu(cmd.minlevel);
609 if (maxlevel)
610 *maxlevel = le16_to_cpu(cmd.maxlevel);
611 }
629 612
630 lbs_deb_cmd("RF_TX_POWER_CMD: size:%d cmd:0x%x Act:%d\n", 613 lbs_deb_leave(LBS_DEB_CMD);
631 le16_to_cpu(cmd->size), le16_to_cpu(cmd->command), 614 return ret;
632 le16_to_cpu(prtp->action)); 615}
633 616
634 switch (cmd_action) { 617/**
635 case CMD_ACT_TX_POWER_OPT_GET: 618 * @brief Set the TX power
636 prtp->action = cpu_to_le16(CMD_ACT_GET); 619 *
637 prtp->currentlevel = 0; 620 * @param priv A pointer to struct lbs_private structure
638 break; 621 * @param dbm The desired power level in dBm
622 *
623 * @return 0 on success, error on failure
624 */
625int lbs_set_tx_power(struct lbs_private *priv, s16 dbm)
626{
627 struct cmd_ds_802_11_rf_tx_power cmd;
628 int ret;
639 629
640 case CMD_ACT_TX_POWER_OPT_SET_HIGH: 630 lbs_deb_enter(LBS_DEB_CMD);
641 prtp->action = cpu_to_le16(CMD_ACT_SET);
642 prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_HIGH);
643 break;
644 631
645 case CMD_ACT_TX_POWER_OPT_SET_MID: 632 memset(&cmd, 0, sizeof(cmd));
646 prtp->action = cpu_to_le16(CMD_ACT_SET); 633 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
647 prtp->currentlevel = cpu_to_le16(CMD_ACT_TX_POWER_INDEX_MID); 634 cmd.action = cpu_to_le16(CMD_ACT_SET);
648 break; 635 cmd.curlevel = cpu_to_le16(dbm);
649 636
650 case CMD_ACT_TX_POWER_OPT_SET_LOW: 637 lbs_deb_cmd("SET_RF_TX_POWER: %d dBm\n", dbm);
651 prtp->action = cpu_to_le16(CMD_ACT_SET); 638
652 prtp->currentlevel = cpu_to_le16(*((u16 *) pdata_buf)); 639 ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd);
653 break;
654 }
655 640
656 lbs_deb_leave(LBS_DEB_CMD); 641 lbs_deb_leave(LBS_DEB_CMD);
657 return 0; 642 return ret;
658} 643}
659 644
660static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd, 645static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
@@ -1033,9 +1018,9 @@ int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
1033 return ret; 1018 return ret;
1034} 1019}
1035 1020
1036int lbs_mesh_config_send(struct lbs_private *priv, 1021static int __lbs_mesh_config_send(struct lbs_private *priv,
1037 struct cmd_ds_mesh_config *cmd, 1022 struct cmd_ds_mesh_config *cmd,
1038 uint16_t action, uint16_t type) 1023 uint16_t action, uint16_t type)
1039{ 1024{
1040 int ret; 1025 int ret;
1041 1026
@@ -1054,6 +1039,19 @@ int lbs_mesh_config_send(struct lbs_private *priv,
1054 return ret; 1039 return ret;
1055} 1040}
1056 1041
1042int lbs_mesh_config_send(struct lbs_private *priv,
1043 struct cmd_ds_mesh_config *cmd,
1044 uint16_t action, uint16_t type)
1045{
1046 int ret;
1047
1048 if (!(priv->fwcapinfo & FW_CAPINFO_PERSISTENT_CONFIG))
1049 return -EOPNOTSUPP;
1050
1051 ret = __lbs_mesh_config_send(priv, cmd, action, type);
1052 return ret;
1053}
1054
1057/* This function is the CMD_MESH_CONFIG legacy function. It only handles the 1055/* This function is the CMD_MESH_CONFIG legacy function. It only handles the
1058 * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG 1056 * START and STOP actions. The extended actions supported by CMD_MESH_CONFIG
1059 * are all handled by preparing a struct cmd_ds_mesh_config and passing it to 1057 * are all handled by preparing a struct cmd_ds_mesh_config and passing it to
@@ -1095,7 +1093,7 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1095 action, priv->mesh_tlv, chan, 1093 action, priv->mesh_tlv, chan,
1096 escape_essid(priv->mesh_ssid, priv->mesh_ssid_len)); 1094 escape_essid(priv->mesh_ssid, priv->mesh_ssid_len));
1097 1095
1098 return lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); 1096 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
1099} 1097}
1100 1098
1101static int lbs_cmd_bcn_ctrl(struct lbs_private * priv, 1099static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
@@ -1256,41 +1254,47 @@ void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
1256 priv->cur_cmd = NULL; 1254 priv->cur_cmd = NULL;
1257} 1255}
1258 1256
1259int lbs_set_radio_control(struct lbs_private *priv) 1257int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
1260{ 1258{
1261 int ret = 0;
1262 struct cmd_ds_802_11_radio_control cmd; 1259 struct cmd_ds_802_11_radio_control cmd;
1260 int ret = -EINVAL;
1263 1261
1264 lbs_deb_enter(LBS_DEB_CMD); 1262 lbs_deb_enter(LBS_DEB_CMD);
1265 1263
1266 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 1264 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1267 cmd.action = cpu_to_le16(CMD_ACT_SET); 1265 cmd.action = cpu_to_le16(CMD_ACT_SET);
1268 1266
1269 switch (priv->preamble) { 1267 /* Only v8 and below support setting the preamble */
1270 case CMD_TYPE_SHORT_PREAMBLE: 1268 if (priv->fwrelease < 0x09000000) {
1271 cmd.control = cpu_to_le16(SET_SHORT_PREAMBLE); 1269 switch (preamble) {
1272 break; 1270 case RADIO_PREAMBLE_SHORT:
1273 1271 if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
1274 case CMD_TYPE_LONG_PREAMBLE: 1272 goto out;
1275 cmd.control = cpu_to_le16(SET_LONG_PREAMBLE); 1273 /* Fall through */
1276 break; 1274 case RADIO_PREAMBLE_AUTO:
1275 case RADIO_PREAMBLE_LONG:
1276 cmd.control = cpu_to_le16(preamble);
1277 break;
1278 default:
1279 goto out;
1280 }
1281 }
1277 1282
1278 case CMD_TYPE_AUTO_PREAMBLE: 1283 if (radio_on)
1279 default: 1284 cmd.control |= cpu_to_le16(0x1);
1280 cmd.control = cpu_to_le16(SET_AUTO_PREAMBLE); 1285 else {
1281 break; 1286 cmd.control &= cpu_to_le16(~0x1);
1287 priv->txpower_cur = 0;
1282 } 1288 }
1283 1289
1284 if (priv->radioon) 1290 lbs_deb_cmd("RADIO_CONTROL: radio %s, preamble %d\n",
1285 cmd.control |= cpu_to_le16(TURN_ON_RF); 1291 radio_on ? "ON" : "OFF", preamble);
1286 else
1287 cmd.control &= cpu_to_le16(~TURN_ON_RF);
1288 1292
1289 lbs_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->radioon, 1293 priv->radio_on = radio_on;
1290 priv->preamble);
1291 1294
1292 ret = lbs_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd); 1295 ret = lbs_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
1293 1296
1297out:
1294 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 1298 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
1295 return ret; 1299 return ret;
1296} 1300}
@@ -1380,55 +1384,25 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1380 ret = lbs_cmd_80211_associate(priv, cmdptr, pdata_buf); 1384 ret = lbs_cmd_80211_associate(priv, cmdptr, pdata_buf);
1381 break; 1385 break;
1382 1386
1383 case CMD_802_11_DEAUTHENTICATE:
1384 ret = lbs_cmd_80211_deauthenticate(priv, cmdptr);
1385 break;
1386
1387 case CMD_802_11_AD_HOC_START:
1388 ret = lbs_cmd_80211_ad_hoc_start(priv, cmdptr, pdata_buf);
1389 break;
1390
1391 case CMD_802_11_RESET:
1392 ret = lbs_cmd_802_11_reset(cmdptr, cmd_action);
1393 break;
1394
1395 case CMD_802_11_AUTHENTICATE: 1387 case CMD_802_11_AUTHENTICATE:
1396 ret = lbs_cmd_80211_authenticate(priv, cmdptr, pdata_buf); 1388 ret = lbs_cmd_80211_authenticate(priv, cmdptr, pdata_buf);
1397 break; 1389 break;
1398 1390
1399 case CMD_802_11_SNMP_MIB:
1400 ret = lbs_cmd_802_11_snmp_mib(priv, cmdptr,
1401 cmd_action, cmd_oid, pdata_buf);
1402 break;
1403
1404 case CMD_MAC_REG_ACCESS: 1391 case CMD_MAC_REG_ACCESS:
1405 case CMD_BBP_REG_ACCESS: 1392 case CMD_BBP_REG_ACCESS:
1406 case CMD_RF_REG_ACCESS: 1393 case CMD_RF_REG_ACCESS:
1407 ret = lbs_cmd_reg_access(cmdptr, cmd_action, pdata_buf); 1394 ret = lbs_cmd_reg_access(cmdptr, cmd_action, pdata_buf);
1408 break; 1395 break;
1409 1396
1410 case CMD_802_11_RF_TX_POWER:
1411 ret = lbs_cmd_802_11_rf_tx_power(cmdptr,
1412 cmd_action, pdata_buf);
1413 break;
1414
1415 case CMD_802_11_MONITOR_MODE: 1397 case CMD_802_11_MONITOR_MODE:
1416 ret = lbs_cmd_802_11_monitor_mode(cmdptr, 1398 ret = lbs_cmd_802_11_monitor_mode(cmdptr,
1417 cmd_action, pdata_buf); 1399 cmd_action, pdata_buf);
1418 break; 1400 break;
1419 1401
1420 case CMD_802_11_AD_HOC_JOIN:
1421 ret = lbs_cmd_80211_ad_hoc_join(priv, cmdptr, pdata_buf);
1422 break;
1423
1424 case CMD_802_11_RSSI: 1402 case CMD_802_11_RSSI:
1425 ret = lbs_cmd_802_11_rssi(priv, cmdptr); 1403 ret = lbs_cmd_802_11_rssi(priv, cmdptr);
1426 break; 1404 break;
1427 1405
1428 case CMD_802_11_AD_HOC_STOP:
1429 ret = lbs_cmd_80211_ad_hoc_stop(cmdptr);
1430 break;
1431
1432 case CMD_802_11_SET_AFC: 1406 case CMD_802_11_SET_AFC:
1433 case CMD_802_11_GET_AFC: 1407 case CMD_802_11_GET_AFC:
1434 1408
@@ -1953,6 +1927,70 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv)
1953} 1927}
1954 1928
1955 1929
1930/**
1931 * @brief Configures the transmission power control functionality.
1932 *
1933 * @param priv A pointer to struct lbs_private structure
1934 * @param enable Transmission power control enable
1935 * @param p0 Power level when link quality is good (dBm).
1936 * @param p1 Power level when link quality is fair (dBm).
1937 * @param p2 Power level when link quality is poor (dBm).
1938 * @param usesnr Use Signal to Noise Ratio in TPC
1939 *
1940 * @return 0 on success
1941 */
1942int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
1943 int8_t p2, int usesnr)
1944{
1945 struct cmd_ds_802_11_tpc_cfg cmd;
1946 int ret;
1947
1948 memset(&cmd, 0, sizeof(cmd));
1949 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1950 cmd.action = cpu_to_le16(CMD_ACT_SET);
1951 cmd.enable = !!enable;
1952 cmd.usesnr = !!usesnr;
1953 cmd.P0 = p0;
1954 cmd.P1 = p1;
1955 cmd.P2 = p2;
1956
1957 ret = lbs_cmd_with_response(priv, CMD_802_11_TPC_CFG, &cmd);
1958
1959 return ret;
1960}
1961
1962/**
1963 * @brief Configures the power adaptation settings.
1964 *
1965 * @param priv A pointer to struct lbs_private structure
1966 * @param enable Power adaptation enable
1967 * @param p0 Power level for 1, 2, 5.5 and 11 Mbps (dBm).
1968 * @param p1 Power level for 6, 9, 12, 18, 22, 24 and 36 Mbps (dBm).
1969 * @param p2 Power level for 48 and 54 Mbps (dBm).
1970 *
1971 * @return 0 on Success
1972 */
1973
1974int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
1975 int8_t p1, int8_t p2)
1976{
1977 struct cmd_ds_802_11_pa_cfg cmd;
1978 int ret;
1979
1980 memset(&cmd, 0, sizeof(cmd));
1981 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1982 cmd.action = cpu_to_le16(CMD_ACT_SET);
1983 cmd.enable = !!enable;
1984 cmd.P0 = p0;
1985 cmd.P1 = p1;
1986 cmd.P2 = p2;
1987
1988 ret = lbs_cmd_with_response(priv, CMD_802_11_PA_CFG , &cmd);
1989
1990 return ret;
1991}
1992
1993
1956static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, 1994static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
1957 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size, 1995 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
1958 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 1996 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index a53b51f8bdb4..36be4c9703e0 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -26,6 +26,18 @@ int __lbs_cmd(struct lbs_private *priv, uint16_t command,
26 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 26 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
27 unsigned long callback_arg); 27 unsigned long callback_arg);
28 28
29int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
30 int8_t p1, int8_t p2);
31
32int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
33 int8_t p2, int usesnr);
34
35int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
36 int8_t p1, int8_t p2);
37
38int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
39 int8_t p2, int usesnr);
40
29int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra, 41int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
30 struct cmd_header *resp); 42 struct cmd_header *resp);
31 43
@@ -61,4 +73,14 @@ int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
61int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action, 73int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
62 struct assoc_request *assoc); 74 struct assoc_request *assoc);
63 75
76int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
77 s16 *maxlevel);
78int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
79
80int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
81
82int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
83
84int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
85
64#endif /* _LBS_CMD_H */ 86#endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 24de3c3cf877..bcf2a9756fb6 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -146,63 +146,6 @@ static int lbs_ret_reg_access(struct lbs_private *priv,
146 return ret; 146 return ret;
147} 147}
148 148
149static int lbs_ret_802_11_snmp_mib(struct lbs_private *priv,
150 struct cmd_ds_command *resp)
151{
152 struct cmd_ds_802_11_snmp_mib *smib = &resp->params.smib;
153 u16 oid = le16_to_cpu(smib->oid);
154 u16 querytype = le16_to_cpu(smib->querytype);
155
156 lbs_deb_enter(LBS_DEB_CMD);
157
158 lbs_deb_cmd("SNMP_RESP: oid 0x%x, querytype 0x%x\n", oid,
159 querytype);
160 lbs_deb_cmd("SNMP_RESP: Buf size %d\n", le16_to_cpu(smib->bufsize));
161
162 if (querytype == CMD_ACT_GET) {
163 switch (oid) {
164 case FRAGTHRESH_I:
165 priv->fragthsd =
166 le16_to_cpu(*((__le16 *)(smib->value)));
167 lbs_deb_cmd("SNMP_RESP: frag threshold %u\n",
168 priv->fragthsd);
169 break;
170 case RTSTHRESH_I:
171 priv->rtsthsd =
172 le16_to_cpu(*((__le16 *)(smib->value)));
173 lbs_deb_cmd("SNMP_RESP: rts threshold %u\n",
174 priv->rtsthsd);
175 break;
176 case SHORT_RETRYLIM_I:
177 priv->txretrycount =
178 le16_to_cpu(*((__le16 *)(smib->value)));
179 lbs_deb_cmd("SNMP_RESP: tx retry count %u\n",
180 priv->rtsthsd);
181 break;
182 default:
183 break;
184 }
185 }
186
187 lbs_deb_enter(LBS_DEB_CMD);
188 return 0;
189}
190
191static int lbs_ret_802_11_rf_tx_power(struct lbs_private *priv,
192 struct cmd_ds_command *resp)
193{
194 struct cmd_ds_802_11_rf_tx_power *rtp = &resp->params.txp;
195
196 lbs_deb_enter(LBS_DEB_CMD);
197
198 priv->txpowerlevel = le16_to_cpu(rtp->currentlevel);
199
200 lbs_deb_cmd("TX power currently %d\n", priv->txpowerlevel);
201
202 lbs_deb_leave(LBS_DEB_CMD);
203 return 0;
204}
205
206static int lbs_ret_802_11_rssi(struct lbs_private *priv, 149static int lbs_ret_802_11_rssi(struct lbs_private *priv,
207 struct cmd_ds_command *resp) 150 struct cmd_ds_command *resp)
208{ 151{
@@ -273,24 +216,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
273 ret = lbs_ret_80211_associate(priv, resp); 216 ret = lbs_ret_80211_associate(priv, resp);
274 break; 217 break;
275 218
276 case CMD_RET(CMD_802_11_DISASSOCIATE):
277 case CMD_RET(CMD_802_11_DEAUTHENTICATE):
278 ret = lbs_ret_80211_disassociate(priv);
279 break;
280
281 case CMD_RET(CMD_802_11_AD_HOC_START):
282 case CMD_RET(CMD_802_11_AD_HOC_JOIN):
283 ret = lbs_ret_80211_ad_hoc_start(priv, resp);
284 break;
285
286 case CMD_RET(CMD_802_11_SNMP_MIB):
287 ret = lbs_ret_802_11_snmp_mib(priv, resp);
288 break;
289
290 case CMD_RET(CMD_802_11_RF_TX_POWER):
291 ret = lbs_ret_802_11_rf_tx_power(priv, resp);
292 break;
293
294 case CMD_RET(CMD_802_11_SET_AFC): 219 case CMD_RET(CMD_802_11_SET_AFC):
295 case CMD_RET(CMD_802_11_GET_AFC): 220 case CMD_RET(CMD_802_11_GET_AFC):
296 spin_lock_irqsave(&priv->driver_lock, flags); 221 spin_lock_irqsave(&priv->driver_lock, flags);
@@ -300,7 +225,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
300 225
301 break; 226 break;
302 227
303 case CMD_RET(CMD_802_11_RESET):
304 case CMD_RET(CMD_802_11_AUTHENTICATE): 228 case CMD_RET(CMD_802_11_AUTHENTICATE):
305 case CMD_RET(CMD_802_11_BEACON_STOP): 229 case CMD_RET(CMD_802_11_BEACON_STOP):
306 break; 230 break;
@@ -309,10 +233,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
309 ret = lbs_ret_802_11_rssi(priv, resp); 233 ret = lbs_ret_802_11_rssi(priv, resp);
310 break; 234 break;
311 235
312 case CMD_RET(CMD_802_11_AD_HOC_STOP):
313 ret = lbs_ret_80211_ad_hoc_stop(priv);
314 break;
315
316 case CMD_RET(CMD_802_11D_DOMAIN_INFO): 236 case CMD_RET(CMD_802_11D_DOMAIN_INFO):
317 ret = lbs_ret_802_11d_domain_info(resp); 237 ret = lbs_ret_802_11d_domain_info(resp);
318 break; 238 break;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index a8ac974dacac..1a8888cceadc 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -34,7 +34,6 @@ int lbs_process_event(struct lbs_private *priv, u32 event);
34void lbs_queue_event(struct lbs_private *priv, u32 event); 34void lbs_queue_event(struct lbs_private *priv, u32 event);
35void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx); 35void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
36 36
37int lbs_set_radio_control(struct lbs_private *priv);
38u32 lbs_fw_index_to_data_rate(u8 index); 37u32 lbs_fw_index_to_data_rate(u8 index);
39u8 lbs_data_rate_to_fw_index(u32 rate); 38u8 lbs_data_rate_to_fw_index(u32 rate);
40 39
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 12e687550bce..076a636e8f62 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -189,6 +189,14 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
189#define MRVDRV_CMD_UPLD_RDY 0x0008 189#define MRVDRV_CMD_UPLD_RDY 0x0008
190#define MRVDRV_CARDEVENT 0x0010 190#define MRVDRV_CARDEVENT 0x0010
191 191
192/* Automatic TX control default levels */
193#define POW_ADAPT_DEFAULT_P0 13
194#define POW_ADAPT_DEFAULT_P1 15
195#define POW_ADAPT_DEFAULT_P2 18
196#define TPC_DEFAULT_P0 5
197#define TPC_DEFAULT_P1 10
198#define TPC_DEFAULT_P2 13
199
192/** TxPD status */ 200/** TxPD status */
193 201
194/* Station firmware use TxPD status field to report final Tx transmit 202/* Station firmware use TxPD status field to report final Tx transmit
@@ -243,6 +251,9 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
243 251
244#define CMD_F_HOSTCMD (1 << 0) 252#define CMD_F_HOSTCMD (1 << 0)
245#define FW_CAPINFO_WPA (1 << 0) 253#define FW_CAPINFO_WPA (1 << 0)
254#define FW_CAPINFO_FIRMWARE_UPGRADE (1 << 13)
255#define FW_CAPINFO_BOOT2_UPGRADE (1<<14)
256#define FW_CAPINFO_PERSISTENT_CONFIG (1<<15)
246 257
247#define KEY_LEN_WPA_AES 16 258#define KEY_LEN_WPA_AES 16
248#define KEY_LEN_WPA_TKIP 32 259#define KEY_LEN_WPA_TKIP 32
@@ -316,7 +327,8 @@ enum PS_STATE {
316enum DNLD_STATE { 327enum DNLD_STATE {
317 DNLD_RES_RECEIVED, 328 DNLD_RES_RECEIVED,
318 DNLD_DATA_SENT, 329 DNLD_DATA_SENT,
319 DNLD_CMD_SENT 330 DNLD_CMD_SENT,
331 DNLD_BOOTCMD_SENT,
320}; 332};
321 333
322/** LBS_MEDIA_STATE */ 334/** LBS_MEDIA_STATE */
@@ -339,27 +351,6 @@ enum mv_ms_type {
339 MVMS_EVENT 351 MVMS_EVENT
340}; 352};
341 353
342/** SNMP_MIB_INDEX_e */
343enum SNMP_MIB_INDEX_e {
344 DESIRED_BSSTYPE_I = 0,
345 OP_RATESET_I,
346 BCNPERIOD_I,
347 DTIMPERIOD_I,
348 ASSOCRSP_TIMEOUT_I,
349 RTSTHRESH_I,
350 SHORT_RETRYLIM_I,
351 LONG_RETRYLIM_I,
352 FRAGTHRESH_I,
353 DOT11D_I,
354 DOT11H_I,
355 MANUFID_I,
356 PRODID_I,
357 MANUF_OUI_I,
358 MANUF_NAME_I,
359 MANUF_PRODNAME_I,
360 MANUF_PRODVER_I,
361};
362
363/** KEY_TYPE_ID */ 354/** KEY_TYPE_ID */
364enum KEY_TYPE_ID { 355enum KEY_TYPE_ID {
365 KEY_TYPE_ID_WEP = 0, 356 KEY_TYPE_ID_WEP = 0,
@@ -374,12 +365,6 @@ enum KEY_INFO_WPA {
374 KEY_INFO_WPA_ENABLED = 0x04 365 KEY_INFO_WPA_ENABLED = 0x04
375}; 366};
376 367
377/** SNMP_MIB_VALUE_e */
378enum SNMP_MIB_VALUE_e {
379 SNMP_MIB_VALUE_INFRA = 1,
380 SNMP_MIB_VALUE_ADHOC
381};
382
383/* Default values for fwt commands. */ 368/* Default values for fwt commands. */
384#define FWT_DEFAULT_METRIC 0 369#define FWT_DEFAULT_METRIC 0
385#define FWT_DEFAULT_DIR 1 370#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f5bb40c54d85..f6f3753da303 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -58,6 +58,7 @@ struct lbs_802_11_security {
58 u8 WPA2enabled; 58 u8 WPA2enabled;
59 u8 wep_enabled; 59 u8 wep_enabled;
60 u8 auth_mode; 60 u8 auth_mode;
61 u32 key_mgmt;
61}; 62};
62 63
63/** Current Basic Service Set State Structure */ 64/** Current Basic Service Set State Structure */
@@ -240,9 +241,6 @@ struct lbs_private {
240 uint16_t enablehwauto; 241 uint16_t enablehwauto;
241 uint16_t ratebitmap; 242 uint16_t ratebitmap;
242 243
243 u32 fragthsd;
244 u32 rtsthsd;
245
246 u8 txretrycount; 244 u8 txretrycount;
247 245
248 /** Tx-related variables (for single packet tx) */ 246 /** Tx-related variables (for single packet tx) */
@@ -253,7 +251,9 @@ struct lbs_private {
253 u32 connect_status; 251 u32 connect_status;
254 u32 mesh_connect_status; 252 u32 mesh_connect_status;
255 u16 regioncode; 253 u16 regioncode;
256 u16 txpowerlevel; 254 s16 txpower_cur;
255 s16 txpower_min;
256 s16 txpower_max;
257 257
258 /** POWER MANAGEMENT AND PnP SUPPORT */ 258 /** POWER MANAGEMENT AND PnP SUPPORT */
259 u8 surpriseremoved; 259 u8 surpriseremoved;
@@ -291,8 +291,7 @@ struct lbs_private {
291 u16 nextSNRNF; 291 u16 nextSNRNF;
292 u16 numSNRNF; 292 u16 numSNRNF;
293 293
294 u8 radioon; 294 u8 radio_on;
295 u32 preamble;
296 295
297 /** data rate stuff */ 296 /** data rate stuff */
298 u8 cur_rate; 297 u8 cur_rate;
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index c92e41b4faf4..5004d7679c02 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -9,17 +9,6 @@
9#define DEFAULT_AD_HOC_CHANNEL 6 9#define DEFAULT_AD_HOC_CHANNEL 6
10#define DEFAULT_AD_HOC_CHANNEL_A 36 10#define DEFAULT_AD_HOC_CHANNEL_A 36
11 11
12/** IEEE 802.11 oids */
13#define OID_802_11_SSID 0x00008002
14#define OID_802_11_INFRASTRUCTURE_MODE 0x00008008
15#define OID_802_11_FRAGMENTATION_THRESHOLD 0x00008009
16#define OID_802_11_RTS_THRESHOLD 0x0000800A
17#define OID_802_11_TX_ANTENNA_SELECTED 0x0000800D
18#define OID_802_11_SUPPORTED_RATES 0x0000800E
19#define OID_802_11_STATISTICS 0x00008012
20#define OID_802_11_TX_RETRYCOUNT 0x0000801D
21#define OID_802_11D_ENABLE 0x00008020
22
23#define CMD_OPTION_WAITFORRSP 0x0002 12#define CMD_OPTION_WAITFORRSP 0x0002
24 13
25/** Host command IDs */ 14/** Host command IDs */
@@ -61,7 +50,6 @@
61#define CMD_RF_REG_MAP 0x0023 50#define CMD_RF_REG_MAP 0x0023
62#define CMD_802_11_DEAUTHENTICATE 0x0024 51#define CMD_802_11_DEAUTHENTICATE 0x0024
63#define CMD_802_11_REASSOCIATE 0x0025 52#define CMD_802_11_REASSOCIATE 0x0025
64#define CMD_802_11_DISASSOCIATE 0x0026
65#define CMD_MAC_CONTROL 0x0028 53#define CMD_MAC_CONTROL 0x0028
66#define CMD_802_11_AD_HOC_START 0x002b 54#define CMD_802_11_AD_HOC_START 0x002b
67#define CMD_802_11_AD_HOC_JOIN 0x002c 55#define CMD_802_11_AD_HOC_JOIN 0x002c
@@ -84,6 +72,7 @@
84#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067 72#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067
85#define CMD_802_11_SLEEP_PERIOD 0x0068 73#define CMD_802_11_SLEEP_PERIOD 0x0068
86#define CMD_802_11_TPC_CFG 0x0072 74#define CMD_802_11_TPC_CFG 0x0072
75#define CMD_802_11_PA_CFG 0x0073
87#define CMD_802_11_FW_WAKE_METHOD 0x0074 76#define CMD_802_11_FW_WAKE_METHOD 0x0074
88#define CMD_802_11_SUBSCRIBE_EVENT 0x0075 77#define CMD_802_11_SUBSCRIBE_EVENT 0x0075
89#define CMD_802_11_RATE_ADAPT_RATESET 0x0076 78#define CMD_802_11_RATE_ADAPT_RATESET 0x0076
@@ -153,11 +142,6 @@
153#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 142#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
154#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400 143#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400
155 144
156/* Define action or option for CMD_802_11_RADIO_CONTROL */
157#define CMD_TYPE_AUTO_PREAMBLE 0x0001
158#define CMD_TYPE_SHORT_PREAMBLE 0x0002
159#define CMD_TYPE_LONG_PREAMBLE 0x0003
160
161/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */ 145/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */
162#define CMD_SUBSCRIBE_RSSI_LOW 0x0001 146#define CMD_SUBSCRIBE_RSSI_LOW 0x0001
163#define CMD_SUBSCRIBE_SNR_LOW 0x0002 147#define CMD_SUBSCRIBE_SNR_LOW 0x0002
@@ -166,28 +150,14 @@
166#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010 150#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010
167#define CMD_SUBSCRIBE_SNR_HIGH 0x0020 151#define CMD_SUBSCRIBE_SNR_HIGH 0x0020
168 152
169#define TURN_ON_RF 0x01 153#define RADIO_PREAMBLE_LONG 0x00
170#define RADIO_ON 0x01 154#define RADIO_PREAMBLE_SHORT 0x02
171#define RADIO_OFF 0x00 155#define RADIO_PREAMBLE_AUTO 0x04
172
173#define SET_AUTO_PREAMBLE 0x05
174#define SET_SHORT_PREAMBLE 0x03
175#define SET_LONG_PREAMBLE 0x01
176 156
177/* Define action or option for CMD_802_11_RF_CHANNEL */ 157/* Define action or option for CMD_802_11_RF_CHANNEL */
178#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00 158#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00
179#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01 159#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01
180 160
181/* Define action or option for CMD_802_11_RF_TX_POWER */
182#define CMD_ACT_TX_POWER_OPT_GET 0x0000
183#define CMD_ACT_TX_POWER_OPT_SET_HIGH 0x8007
184#define CMD_ACT_TX_POWER_OPT_SET_MID 0x8004
185#define CMD_ACT_TX_POWER_OPT_SET_LOW 0x8000
186
187#define CMD_ACT_TX_POWER_INDEX_HIGH 0x0007
188#define CMD_ACT_TX_POWER_INDEX_MID 0x0004
189#define CMD_ACT_TX_POWER_INDEX_LOW 0x0000
190
191/* Define action or option for CMD_802_11_DATA_RATE */ 161/* Define action or option for CMD_802_11_DATA_RATE */
192#define CMD_ACT_SET_TX_AUTO 0x0000 162#define CMD_ACT_SET_TX_AUTO 0x0000
193#define CMD_ACT_SET_TX_FIX_RATE 0x0001 163#define CMD_ACT_SET_TX_FIX_RATE 0x0001
@@ -210,6 +180,19 @@
210#define CMD_WAKE_METHOD_COMMAND_INT 0x0001 180#define CMD_WAKE_METHOD_COMMAND_INT 0x0001
211#define CMD_WAKE_METHOD_GPIO 0x0002 181#define CMD_WAKE_METHOD_GPIO 0x0002
212 182
183/* Object IDs for CMD_802_11_SNMP_MIB */
184#define SNMP_MIB_OID_BSS_TYPE 0x0000
185#define SNMP_MIB_OID_OP_RATE_SET 0x0001
186#define SNMP_MIB_OID_BEACON_PERIOD 0x0002 /* Reserved on v9+ */
187#define SNMP_MIB_OID_DTIM_PERIOD 0x0003 /* Reserved on v9+ */
188#define SNMP_MIB_OID_ASSOC_TIMEOUT 0x0004 /* Reserved on v9+ */
189#define SNMP_MIB_OID_RTS_THRESHOLD 0x0005
190#define SNMP_MIB_OID_SHORT_RETRY_LIMIT 0x0006
191#define SNMP_MIB_OID_LONG_RETRY_LIMIT 0x0007
192#define SNMP_MIB_OID_FRAG_THRESHOLD 0x0008
193#define SNMP_MIB_OID_11D_ENABLE 0x0009
194#define SNMP_MIB_OID_11H_ENABLE 0x000A
195
213/* Define action or option for CMD_BT_ACCESS */ 196/* Define action or option for CMD_BT_ACCESS */
214enum cmd_bt_access_opts { 197enum cmd_bt_access_opts {
215 /* The bt commands start at 5 instead of 1 because the old dft commands 198 /* The bt commands start at 5 instead of 1 because the old dft commands
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index 913b480211a9..d9f9a12a739e 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -151,10 +151,6 @@ struct cmd_ds_get_hw_spec {
151 __le32 fwcapinfo; 151 __le32 fwcapinfo;
152} __attribute__ ((packed)); 152} __attribute__ ((packed));
153 153
154struct cmd_ds_802_11_reset {
155 __le16 action;
156};
157
158struct cmd_ds_802_11_subscribe_event { 154struct cmd_ds_802_11_subscribe_event {
159 struct cmd_header hdr; 155 struct cmd_header hdr;
160 156
@@ -232,7 +228,9 @@ struct cmd_ds_802_11_authenticate {
232}; 228};
233 229
234struct cmd_ds_802_11_deauthenticate { 230struct cmd_ds_802_11_deauthenticate {
235 u8 macaddr[6]; 231 struct cmd_header hdr;
232
233 u8 macaddr[ETH_ALEN];
236 __le16 reasoncode; 234 __le16 reasoncode;
237}; 235};
238 236
@@ -251,20 +249,10 @@ struct cmd_ds_802_11_associate {
251#endif 249#endif
252} __attribute__ ((packed)); 250} __attribute__ ((packed));
253 251
254struct cmd_ds_802_11_disassociate {
255 u8 destmacaddr[6];
256 __le16 reasoncode;
257};
258
259struct cmd_ds_802_11_associate_rsp { 252struct cmd_ds_802_11_associate_rsp {
260 struct ieeetypes_assocrsp assocRsp; 253 struct ieeetypes_assocrsp assocRsp;
261}; 254};
262 255
263struct cmd_ds_802_11_ad_hoc_result {
264 u8 pad[3];
265 u8 bssid[ETH_ALEN];
266};
267
268struct cmd_ds_802_11_set_wep { 256struct cmd_ds_802_11_set_wep {
269 struct cmd_header hdr; 257 struct cmd_header hdr;
270 258
@@ -309,7 +297,9 @@ struct cmd_ds_802_11_get_stat {
309}; 297};
310 298
311struct cmd_ds_802_11_snmp_mib { 299struct cmd_ds_802_11_snmp_mib {
312 __le16 querytype; 300 struct cmd_header hdr;
301
302 __le16 action;
313 __le16 oid; 303 __le16 oid;
314 __le16 bufsize; 304 __le16 bufsize;
315 u8 value[128]; 305 u8 value[128];
@@ -435,8 +425,12 @@ struct cmd_ds_802_11_mac_address {
435}; 425};
436 426
437struct cmd_ds_802_11_rf_tx_power { 427struct cmd_ds_802_11_rf_tx_power {
428 struct cmd_header hdr;
429
438 __le16 action; 430 __le16 action;
439 __le16 currentlevel; 431 __le16 curlevel;
432 s8 maxlevel;
433 s8 minlevel;
440}; 434};
441 435
442struct cmd_ds_802_11_rf_antenna { 436struct cmd_ds_802_11_rf_antenna {
@@ -507,10 +501,12 @@ struct cmd_ds_802_11_rate_adapt_rateset {
507}; 501};
508 502
509struct cmd_ds_802_11_ad_hoc_start { 503struct cmd_ds_802_11_ad_hoc_start {
504 struct cmd_header hdr;
505
510 u8 ssid[IW_ESSID_MAX_SIZE]; 506 u8 ssid[IW_ESSID_MAX_SIZE];
511 u8 bsstype; 507 u8 bsstype;
512 __le16 beaconperiod; 508 __le16 beaconperiod;
513 u8 dtimperiod; 509 u8 dtimperiod; /* Reserved on v9 and later */
514 union IEEEtypes_ssparamset ssparamset; 510 union IEEEtypes_ssparamset ssparamset;
515 union ieeetypes_phyparamset phyparamset; 511 union ieeetypes_phyparamset phyparamset;
516 __le16 probedelay; 512 __le16 probedelay;
@@ -519,9 +515,16 @@ struct cmd_ds_802_11_ad_hoc_start {
519 u8 tlv_memory_size_pad[100]; 515 u8 tlv_memory_size_pad[100];
520} __attribute__ ((packed)); 516} __attribute__ ((packed));
521 517
518struct cmd_ds_802_11_ad_hoc_result {
519 struct cmd_header hdr;
520
521 u8 pad[3];
522 u8 bssid[ETH_ALEN];
523};
524
522struct adhoc_bssdesc { 525struct adhoc_bssdesc {
523 u8 bssid[6]; 526 u8 bssid[ETH_ALEN];
524 u8 ssid[32]; 527 u8 ssid[IW_ESSID_MAX_SIZE];
525 u8 type; 528 u8 type;
526 __le16 beaconperiod; 529 __le16 beaconperiod;
527 u8 dtimperiod; 530 u8 dtimperiod;
@@ -539,10 +542,15 @@ struct adhoc_bssdesc {
539} __attribute__ ((packed)); 542} __attribute__ ((packed));
540 543
541struct cmd_ds_802_11_ad_hoc_join { 544struct cmd_ds_802_11_ad_hoc_join {
545 struct cmd_header hdr;
546
542 struct adhoc_bssdesc bss; 547 struct adhoc_bssdesc bss;
543 __le16 failtimeout; 548 __le16 failtimeout; /* Reserved on v9 and later */
544 __le16 probedelay; 549 __le16 probedelay; /* Reserved on v9 and later */
550} __attribute__ ((packed));
545 551
552struct cmd_ds_802_11_ad_hoc_stop {
553 struct cmd_header hdr;
546} __attribute__ ((packed)); 554} __attribute__ ((packed));
547 555
548struct cmd_ds_802_11_enable_rsn { 556struct cmd_ds_802_11_enable_rsn {
@@ -597,14 +605,28 @@ struct cmd_ds_802_11_eeprom_access {
597} __attribute__ ((packed)); 605} __attribute__ ((packed));
598 606
599struct cmd_ds_802_11_tpc_cfg { 607struct cmd_ds_802_11_tpc_cfg {
608 struct cmd_header hdr;
609
600 __le16 action; 610 __le16 action;
601 u8 enable; 611 uint8_t enable;
602 s8 P0; 612 int8_t P0;
603 s8 P1; 613 int8_t P1;
604 s8 P2; 614 int8_t P2;
605 u8 usesnr; 615 uint8_t usesnr;
606} __attribute__ ((packed)); 616} __attribute__ ((packed));
607 617
618
619struct cmd_ds_802_11_pa_cfg {
620 struct cmd_header hdr;
621
622 __le16 action;
623 uint8_t enable;
624 int8_t P0;
625 int8_t P1;
626 int8_t P2;
627} __attribute__ ((packed));
628
629
608struct cmd_ds_802_11_led_ctrl { 630struct cmd_ds_802_11_led_ctrl {
609 __le16 action; 631 __le16 action;
610 __le16 numled; 632 __le16 numled;
@@ -693,21 +715,13 @@ struct cmd_ds_command {
693 union { 715 union {
694 struct cmd_ds_802_11_ps_mode psmode; 716 struct cmd_ds_802_11_ps_mode psmode;
695 struct cmd_ds_802_11_associate associate; 717 struct cmd_ds_802_11_associate associate;
696 struct cmd_ds_802_11_deauthenticate deauth;
697 struct cmd_ds_802_11_ad_hoc_start ads;
698 struct cmd_ds_802_11_reset reset;
699 struct cmd_ds_802_11_ad_hoc_result result;
700 struct cmd_ds_802_11_authenticate auth; 718 struct cmd_ds_802_11_authenticate auth;
701 struct cmd_ds_802_11_get_stat gstat; 719 struct cmd_ds_802_11_get_stat gstat;
702 struct cmd_ds_802_3_get_stat gstat_8023; 720 struct cmd_ds_802_3_get_stat gstat_8023;
703 struct cmd_ds_802_11_snmp_mib smib;
704 struct cmd_ds_802_11_rf_tx_power txp;
705 struct cmd_ds_802_11_rf_antenna rant; 721 struct cmd_ds_802_11_rf_antenna rant;
706 struct cmd_ds_802_11_monitor_mode monitor; 722 struct cmd_ds_802_11_monitor_mode monitor;
707 struct cmd_ds_802_11_ad_hoc_join adj;
708 struct cmd_ds_802_11_rssi rssi; 723 struct cmd_ds_802_11_rssi rssi;
709 struct cmd_ds_802_11_rssi_rsp rssirsp; 724 struct cmd_ds_802_11_rssi_rsp rssirsp;
710 struct cmd_ds_802_11_disassociate dassociate;
711 struct cmd_ds_mac_reg_access macreg; 725 struct cmd_ds_mac_reg_access macreg;
712 struct cmd_ds_bbp_reg_access bbpreg; 726 struct cmd_ds_bbp_reg_access bbpreg;
713 struct cmd_ds_rf_reg_access rfreg; 727 struct cmd_ds_rf_reg_access rfreg;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 8941919001bb..e3505c110af6 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -713,7 +713,7 @@ static int if_cs_host_to_card(struct lbs_private *priv,
713 ret = if_cs_send_cmd(priv, buf, nb); 713 ret = if_cs_send_cmd(priv, buf, nb);
714 break; 714 break;
715 default: 715 default:
716 lbs_pr_err("%s: unsupported type %d\n", __FUNCTION__, type); 716 lbs_pr_err("%s: unsupported type %d\n", __func__, type);
717 } 717 }
718 718
719 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 719 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 632c291404ab..cafbccb74143 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -39,7 +39,10 @@ MODULE_DEVICE_TABLE(usb, if_usb_table);
39 39
40static void if_usb_receive(struct urb *urb); 40static void if_usb_receive(struct urb *urb);
41static void if_usb_receive_fwload(struct urb *urb); 41static void if_usb_receive_fwload(struct urb *urb);
42static int if_usb_prog_firmware(struct if_usb_card *cardp); 42static int __if_usb_prog_firmware(struct if_usb_card *cardp,
43 const char *fwname, int cmd);
44static int if_usb_prog_firmware(struct if_usb_card *cardp,
45 const char *fwname, int cmd);
43static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type, 46static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
44 uint8_t *payload, uint16_t nb); 47 uint8_t *payload, uint16_t nb);
45static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, 48static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
@@ -48,6 +51,62 @@ static void if_usb_free(struct if_usb_card *cardp);
48static int if_usb_submit_rx_urb(struct if_usb_card *cardp); 51static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
49static int if_usb_reset_device(struct if_usb_card *cardp); 52static int if_usb_reset_device(struct if_usb_card *cardp);
50 53
54/* sysfs hooks */
55
56/**
57 * Set function to write firmware to device's persistent memory
58 */
59static ssize_t if_usb_firmware_set(struct device *dev,
60 struct device_attribute *attr, const char *buf, size_t count)
61{
62 struct lbs_private *priv = to_net_dev(dev)->priv;
63 struct if_usb_card *cardp = priv->card;
64 char fwname[FIRMWARE_NAME_MAX];
65 int ret;
66
67 sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
68 ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_FW);
69 if (ret == 0)
70 return count;
71
72 return ret;
73}
74
75/**
76 * lbs_flash_fw attribute to be exported per ethX interface through sysfs
77 * (/sys/class/net/ethX/lbs_flash_fw). Use this like so to write firmware to
78 * the device's persistent memory:
79 * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_fw
80 */
81static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set);
82
83/**
84 * Set function to write firmware to device's persistent memory
85 */
86static ssize_t if_usb_boot2_set(struct device *dev,
87 struct device_attribute *attr, const char *buf, size_t count)
88{
89 struct lbs_private *priv = to_net_dev(dev)->priv;
90 struct if_usb_card *cardp = priv->card;
91 char fwname[FIRMWARE_NAME_MAX];
92 int ret;
93
94 sscanf(buf, "%29s", fwname); /* FIRMWARE_NAME_MAX - 1 = 29 */
95 ret = if_usb_prog_firmware(cardp, fwname, BOOT_CMD_UPDATE_BOOT2);
96 if (ret == 0)
97 return count;
98
99 return ret;
100}
101
102/**
103 * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs
104 * (/sys/class/net/ethX/lbs_flash_boot2). Use this like so to write firmware
105 * to the device's persistent memory:
106 * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_boot2
107 */
108static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set);
109
51/** 110/**
52 * @brief call back function to handle the status of the URB 111 * @brief call back function to handle the status of the URB
53 * @param urb pointer to urb structure 112 * @param urb pointer to urb structure
@@ -66,10 +125,10 @@ static void if_usb_write_bulk_callback(struct urb *urb)
66 lbs_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n", 125 lbs_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n",
67 urb->actual_length); 126 urb->actual_length);
68 127
69 /* Used for both firmware TX and regular TX. priv isn't 128 /* Boot commands such as UPDATE_FW and UPDATE_BOOT2 are not
70 * valid at firmware load time. 129 * passed up to the lbs level.
71 */ 130 */
72 if (priv) 131 if (priv && priv->dnld_sent != DNLD_BOOTCMD_SENT)
73 lbs_host_to_card_done(priv); 132 lbs_host_to_card_done(priv);
74 } else { 133 } else {
75 /* print the failure status number for debug */ 134 /* print the failure status number for debug */
@@ -231,7 +290,7 @@ static int if_usb_probe(struct usb_interface *intf,
231 } 290 }
232 291
233 /* Upload firmware */ 292 /* Upload firmware */
234 if (if_usb_prog_firmware(cardp)) { 293 if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) {
235 lbs_deb_usbd(&udev->dev, "FW upload failed\n"); 294 lbs_deb_usbd(&udev->dev, "FW upload failed\n");
236 goto err_prog_firmware; 295 goto err_prog_firmware;
237 } 296 }
@@ -260,6 +319,12 @@ static int if_usb_probe(struct usb_interface *intf,
260 usb_get_dev(udev); 319 usb_get_dev(udev);
261 usb_set_intfdata(intf, cardp); 320 usb_set_intfdata(intf, cardp);
262 321
322 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw))
323 lbs_pr_err("cannot register lbs_flash_fw attribute\n");
324
325 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
326 lbs_pr_err("cannot register lbs_flash_boot2 attribute\n");
327
263 return 0; 328 return 0;
264 329
265err_start_card: 330err_start_card:
@@ -285,6 +350,9 @@ static void if_usb_disconnect(struct usb_interface *intf)
285 350
286 lbs_deb_enter(LBS_DEB_MAIN); 351 lbs_deb_enter(LBS_DEB_MAIN);
287 352
353 device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2);
354 device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_fw);
355
288 cardp->surprise_removed = 1; 356 cardp->surprise_removed = 1;
289 357
290 if (priv) { 358 if (priv) {
@@ -371,11 +439,10 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
371 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); 439 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
372 440
373 cmd->command = cpu_to_le16(CMD_802_11_RESET); 441 cmd->command = cpu_to_le16(CMD_802_11_RESET);
374 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset) + S_DS_GEN); 442 cmd->size = cpu_to_le16(sizeof(struct cmd_header));
375 cmd->result = cpu_to_le16(0); 443 cmd->result = cpu_to_le16(0);
376 cmd->seqnum = cpu_to_le16(0x5a5a); 444 cmd->seqnum = cpu_to_le16(0x5a5a);
377 cmd->params.reset.action = cpu_to_le16(CMD_ACT_HALT); 445 usb_tx_block(cardp, cardp->ep_out_buf, 4 + sizeof(struct cmd_header));
378 usb_tx_block(cardp, cardp->ep_out_buf, 4 + S_DS_GEN + sizeof(struct cmd_ds_802_11_reset));
379 446
380 msleep(100); 447 msleep(100);
381 ret = usb_reset_device(cardp->udev); 448 ret = usb_reset_device(cardp->udev);
@@ -510,7 +577,7 @@ static void if_usb_receive_fwload(struct urb *urb)
510 if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) { 577 if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) {
511 kfree_skb(skb); 578 kfree_skb(skb);
512 if_usb_submit_rx_urb_fwload(cardp); 579 if_usb_submit_rx_urb_fwload(cardp);
513 cardp->bootcmdresp = 1; 580 cardp->bootcmdresp = BOOT_CMD_RESP_OK;
514 lbs_deb_usbd(&cardp->udev->dev, 581 lbs_deb_usbd(&cardp->udev->dev,
515 "Received valid boot command response\n"); 582 "Received valid boot command response\n");
516 return; 583 return;
@@ -526,7 +593,9 @@ static void if_usb_receive_fwload(struct urb *urb)
526 lbs_pr_info("boot cmd response wrong magic number (0x%x)\n", 593 lbs_pr_info("boot cmd response wrong magic number (0x%x)\n",
527 le32_to_cpu(bootcmdresp.magic)); 594 le32_to_cpu(bootcmdresp.magic));
528 } 595 }
529 } else if (bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) { 596 } else if ((bootcmdresp.cmd != BOOT_CMD_FW_BY_USB) &&
597 (bootcmdresp.cmd != BOOT_CMD_UPDATE_FW) &&
598 (bootcmdresp.cmd != BOOT_CMD_UPDATE_BOOT2)) {
530 lbs_pr_info("boot cmd response cmd_tag error (%d)\n", 599 lbs_pr_info("boot cmd response cmd_tag error (%d)\n",
531 bootcmdresp.cmd); 600 bootcmdresp.cmd);
532 } else if (bootcmdresp.result != BOOT_CMD_RESP_OK) { 601 } else if (bootcmdresp.result != BOOT_CMD_RESP_OK) {
@@ -564,8 +633,8 @@ static void if_usb_receive_fwload(struct urb *urb)
564 633
565 kfree_skb(skb); 634 kfree_skb(skb);
566 635
567 /* reschedule timer for 200ms hence */ 636 /* Give device 5s to either write firmware to its RAM or eeprom */
568 mod_timer(&cardp->fw_timeout, jiffies + (HZ/5)); 637 mod_timer(&cardp->fw_timeout, jiffies + (HZ*5));
569 638
570 if (cardp->fwfinalblk) { 639 if (cardp->fwfinalblk) {
571 cardp->fwdnldover = 1; 640 cardp->fwdnldover = 1;
@@ -809,7 +878,54 @@ static int check_fwfile_format(const uint8_t *data, uint32_t totlen)
809} 878}
810 879
811 880
812static int if_usb_prog_firmware(struct if_usb_card *cardp) 881/**
882* @brief This function programs the firmware subject to cmd
883*
884* @param cardp the if_usb_card descriptor
885* fwname firmware or boot2 image file name
886* cmd either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW,
887* or BOOT_CMD_UPDATE_BOOT2.
888* @return 0 or error code
889*/
890static int if_usb_prog_firmware(struct if_usb_card *cardp,
891 const char *fwname, int cmd)
892{
893 struct lbs_private *priv = cardp->priv;
894 unsigned long flags, caps;
895 int ret;
896
897 caps = priv->fwcapinfo;
898 if (((cmd == BOOT_CMD_UPDATE_FW) && !(caps & FW_CAPINFO_FIRMWARE_UPGRADE)) ||
899 ((cmd == BOOT_CMD_UPDATE_BOOT2) && !(caps & FW_CAPINFO_BOOT2_UPGRADE)))
900 return -EOPNOTSUPP;
901
902 /* Ensure main thread is idle. */
903 spin_lock_irqsave(&priv->driver_lock, flags);
904 while (priv->cur_cmd != NULL || priv->dnld_sent != DNLD_RES_RECEIVED) {
905 spin_unlock_irqrestore(&priv->driver_lock, flags);
906 if (wait_event_interruptible(priv->waitq,
907 (priv->cur_cmd == NULL &&
908 priv->dnld_sent == DNLD_RES_RECEIVED))) {
909 return -ERESTARTSYS;
910 }
911 spin_lock_irqsave(&priv->driver_lock, flags);
912 }
913 priv->dnld_sent = DNLD_BOOTCMD_SENT;
914 spin_unlock_irqrestore(&priv->driver_lock, flags);
915
916 ret = __if_usb_prog_firmware(cardp, fwname, cmd);
917
918 spin_lock_irqsave(&priv->driver_lock, flags);
919 priv->dnld_sent = DNLD_RES_RECEIVED;
920 spin_unlock_irqrestore(&priv->driver_lock, flags);
921
922 wake_up_interruptible(&priv->waitq);
923
924 return ret;
925}
926
927static int __if_usb_prog_firmware(struct if_usb_card *cardp,
928 const char *fwname, int cmd)
813{ 929{
814 int i = 0; 930 int i = 0;
815 static int reset_count = 10; 931 static int reset_count = 10;
@@ -817,20 +933,32 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
817 933
818 lbs_deb_enter(LBS_DEB_USB); 934 lbs_deb_enter(LBS_DEB_USB);
819 935
820 if ((ret = request_firmware(&cardp->fw, lbs_fw_name, 936 ret = request_firmware(&cardp->fw, fwname, &cardp->udev->dev);
821 &cardp->udev->dev)) < 0) { 937 if (ret < 0) {
822 lbs_pr_err("request_firmware() failed with %#x\n", ret); 938 lbs_pr_err("request_firmware() failed with %#x\n", ret);
823 lbs_pr_err("firmware %s not found\n", lbs_fw_name); 939 lbs_pr_err("firmware %s not found\n", fwname);
824 goto done; 940 goto done;
825 } 941 }
826 942
827 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) 943 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
944 ret = -EINVAL;
828 goto release_fw; 945 goto release_fw;
946 }
947
948 /* Cancel any pending usb business */
949 usb_kill_urb(cardp->rx_urb);
950 usb_kill_urb(cardp->tx_urb);
951
952 cardp->fwlastblksent = 0;
953 cardp->fwdnldover = 0;
954 cardp->totalbytes = 0;
955 cardp->fwfinalblk = 0;
956 cardp->bootcmdresp = 0;
829 957
830restart: 958restart:
831 if (if_usb_submit_rx_urb_fwload(cardp) < 0) { 959 if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
832 lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n"); 960 lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
833 ret = -1; 961 ret = -EIO;
834 goto release_fw; 962 goto release_fw;
835 } 963 }
836 964
@@ -838,8 +966,7 @@ restart:
838 do { 966 do {
839 int j = 0; 967 int j = 0;
840 i++; 968 i++;
841 /* Issue Boot command = 1, Boot from Download-FW */ 969 if_usb_issue_boot_command(cardp, cmd);
842 if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB);
843 /* wait for command response */ 970 /* wait for command response */
844 do { 971 do {
845 j++; 972 j++;
@@ -847,12 +974,21 @@ restart:
847 } while (cardp->bootcmdresp == 0 && j < 10); 974 } while (cardp->bootcmdresp == 0 && j < 10);
848 } while (cardp->bootcmdresp == 0 && i < 5); 975 } while (cardp->bootcmdresp == 0 && i < 5);
849 976
850 if (cardp->bootcmdresp <= 0) { 977 if (cardp->bootcmdresp == BOOT_CMD_RESP_NOT_SUPPORTED) {
978 /* Return to normal operation */
979 ret = -EOPNOTSUPP;
980 usb_kill_urb(cardp->rx_urb);
981 usb_kill_urb(cardp->tx_urb);
982 if (if_usb_submit_rx_urb(cardp) < 0)
983 ret = -EIO;
984 goto release_fw;
985 } else if (cardp->bootcmdresp <= 0) {
851 if (--reset_count >= 0) { 986 if (--reset_count >= 0) {
852 if_usb_reset_device(cardp); 987 if_usb_reset_device(cardp);
853 goto restart; 988 goto restart;
854 } 989 }
855 return -1; 990 ret = -EIO;
991 goto release_fw;
856 } 992 }
857 993
858 i = 0; 994 i = 0;
@@ -882,7 +1018,7 @@ restart:
882 } 1018 }
883 1019
884 lbs_pr_info("FW download failure, time = %d ms\n", i * 100); 1020 lbs_pr_info("FW download failure, time = %d ms\n", i * 100);
885 ret = -1; 1021 ret = -EIO;
886 goto release_fw; 1022 goto release_fw;
887 } 1023 }
888 1024
diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h
index 5771a83a43f0..5ba0aee0eb2f 100644
--- a/drivers/net/wireless/libertas/if_usb.h
+++ b/drivers/net/wireless/libertas/if_usb.h
@@ -30,6 +30,7 @@ struct bootcmd
30 30
31#define BOOT_CMD_RESP_OK 0x0001 31#define BOOT_CMD_RESP_OK 0x0001
32#define BOOT_CMD_RESP_FAIL 0x0000 32#define BOOT_CMD_RESP_FAIL 0x0000
33#define BOOT_CMD_RESP_NOT_SUPPORTED 0x0002
33 34
34struct bootcmdresp 35struct bootcmdresp
35{ 36{
@@ -50,6 +51,10 @@ struct if_usb_card {
50 uint8_t ep_in; 51 uint8_t ep_in;
51 uint8_t ep_out; 52 uint8_t ep_out;
52 53
54 /* bootcmdresp == 0 means command is pending
55 * bootcmdresp < 0 means error
56 * bootcmdresp > 0 is a BOOT_CMD_RESP_* from firmware
57 */
53 int8_t bootcmdresp; 58 int8_t bootcmdresp;
54 59
55 int ep_in_size; 60 int ep_in_size;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index bd32ac0b4e07..73dc8c72402a 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -291,9 +291,11 @@ static ssize_t lbs_rtap_set(struct device *dev,
291 if (priv->infra_open || priv->mesh_open) 291 if (priv->infra_open || priv->mesh_open)
292 return -EBUSY; 292 return -EBUSY;
293 if (priv->mode == IW_MODE_INFRA) 293 if (priv->mode == IW_MODE_INFRA)
294 lbs_send_deauthentication(priv); 294 lbs_cmd_80211_deauthenticate(priv,
295 priv->curbssparams.bssid,
296 WLAN_REASON_DEAUTH_LEAVING);
295 else if (priv->mode == IW_MODE_ADHOC) 297 else if (priv->mode == IW_MODE_ADHOC)
296 lbs_stop_adhoc_network(priv); 298 lbs_adhoc_stop(priv);
297 lbs_add_rtap(priv); 299 lbs_add_rtap(priv);
298 } 300 }
299 priv->monitormode = monitor_mode; 301 priv->monitormode = monitor_mode;
@@ -956,17 +958,24 @@ EXPORT_SYMBOL_GPL(lbs_resume);
956static int lbs_setup_firmware(struct lbs_private *priv) 958static int lbs_setup_firmware(struct lbs_private *priv)
957{ 959{
958 int ret = -1; 960 int ret = -1;
961 s16 curlevel = 0, minlevel = 0, maxlevel = 0;
959 962
960 lbs_deb_enter(LBS_DEB_FW); 963 lbs_deb_enter(LBS_DEB_FW);
961 964
962 /* 965 /* Read MAC address from firmware */
963 * Read MAC address from HW
964 */
965 memset(priv->current_addr, 0xff, ETH_ALEN); 966 memset(priv->current_addr, 0xff, ETH_ALEN);
966 ret = lbs_update_hw_spec(priv); 967 ret = lbs_update_hw_spec(priv);
967 if (ret) 968 if (ret)
968 goto done; 969 goto done;
969 970
971 /* Read power levels if available */
972 ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
973 if (ret == 0) {
974 priv->txpower_cur = curlevel;
975 priv->txpower_min = minlevel;
976 priv->txpower_max = maxlevel;
977 }
978
970 lbs_set_mac_control(priv); 979 lbs_set_mac_control(priv);
971done: 980done:
972 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); 981 lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
@@ -1042,7 +1051,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
1042 priv->mode = IW_MODE_INFRA; 1051 priv->mode = IW_MODE_INFRA;
1043 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; 1052 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL;
1044 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 1053 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
1045 priv->radioon = RADIO_ON; 1054 priv->radio_on = 1;
1046 priv->enablehwauto = 1; 1055 priv->enablehwauto = 1;
1047 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; 1056 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
1048 priv->psmode = LBS802_11POWERMODECAM; 1057 priv->psmode = LBS802_11POWERMODECAM;
@@ -1196,7 +1205,13 @@ void lbs_remove_card(struct lbs_private *priv)
1196 cancel_delayed_work_sync(&priv->scan_work); 1205 cancel_delayed_work_sync(&priv->scan_work);
1197 cancel_delayed_work_sync(&priv->assoc_work); 1206 cancel_delayed_work_sync(&priv->assoc_work);
1198 cancel_work_sync(&priv->mcast_work); 1207 cancel_work_sync(&priv->mcast_work);
1208
1209 /* worker thread destruction blocks on the in-flight command which
1210 * should have been cleared already in lbs_stop_card().
1211 */
1212 lbs_deb_main("destroying worker thread\n");
1199 destroy_workqueue(priv->work_thread); 1213 destroy_workqueue(priv->work_thread);
1214 lbs_deb_main("done destroying worker thread\n");
1200 1215
1201 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { 1216 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
1202 priv->psmode = LBS802_11POWERMODECAM; 1217 priv->psmode = LBS802_11POWERMODECAM;
@@ -1314,14 +1329,26 @@ void lbs_stop_card(struct lbs_private *priv)
1314 device_remove_file(&dev->dev, &dev_attr_lbs_rtap); 1329 device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
1315 } 1330 }
1316 1331
1317 /* Flush pending command nodes */ 1332 /* Delete the timeout of the currently processing command */
1318 del_timer_sync(&priv->command_timer); 1333 del_timer_sync(&priv->command_timer);
1334
1335 /* Flush pending command nodes */
1319 spin_lock_irqsave(&priv->driver_lock, flags); 1336 spin_lock_irqsave(&priv->driver_lock, flags);
1337 lbs_deb_main("clearing pending commands\n");
1320 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) { 1338 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
1321 cmdnode->result = -ENOENT; 1339 cmdnode->result = -ENOENT;
1322 cmdnode->cmdwaitqwoken = 1; 1340 cmdnode->cmdwaitqwoken = 1;
1323 wake_up_interruptible(&cmdnode->cmdwait_q); 1341 wake_up_interruptible(&cmdnode->cmdwait_q);
1324 } 1342 }
1343
1344 /* Flush the command the card is currently processing */
1345 if (priv->cur_cmd) {
1346 lbs_deb_main("clearing current command\n");
1347 priv->cur_cmd->result = -ENOENT;
1348 priv->cur_cmd->cmdwaitqwoken = 1;
1349 wake_up_interruptible(&priv->cur_cmd->cmdwait_q);
1350 }
1351 lbs_deb_main("done clearing commands\n");
1325 spin_unlock_irqrestore(&priv->driver_lock, flags); 1352 spin_unlock_irqrestore(&priv->driver_lock, flags);
1326 1353
1327 unregister_netdev(dev); 1354 unregister_netdev(dev);
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 4b274562f965..8f66903641b9 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -944,6 +944,11 @@ int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
944 944
945 lbs_deb_enter(LBS_DEB_WEXT); 945 lbs_deb_enter(LBS_DEB_WEXT);
946 946
947 if (!priv->radio_on) {
948 ret = -EINVAL;
949 goto out;
950 }
951
947 if (!netif_running(dev)) { 952 if (!netif_running(dev)) {
948 ret = -ENETDOWN; 953 ret = -ENETDOWN;
949 goto out; 954 goto out;
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 8b3ed77860b3..82c3e5a50ea6 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -30,6 +30,14 @@ static inline void lbs_postpone_association_work(struct lbs_private *priv)
30 queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2); 30 queue_delayed_work(priv->work_thread, &priv->assoc_work, HZ / 2);
31} 31}
32 32
33static inline void lbs_do_association_work(struct lbs_private *priv)
34{
35 if (priv->surpriseremoved)
36 return;
37 cancel_delayed_work(&priv->assoc_work);
38 queue_delayed_work(priv->work_thread, &priv->assoc_work, 0);
39}
40
33static inline void lbs_cancel_association_work(struct lbs_private *priv) 41static inline void lbs_cancel_association_work(struct lbs_private *priv)
34{ 42{
35 cancel_delayed_work(&priv->assoc_work); 43 cancel_delayed_work(&priv->assoc_work);
@@ -120,34 +128,6 @@ static struct chan_freq_power *find_cfp_by_band_and_freq(
120 return cfp; 128 return cfp;
121} 129}
122 130
123
124/**
125 * @brief Set Radio On/OFF
126 *
127 * @param priv A pointer to struct lbs_private structure
128 * @option Radio Option
129 * @return 0 --success, otherwise fail
130 */
131static int lbs_radio_ioctl(struct lbs_private *priv, u8 option)
132{
133 int ret = 0;
134
135 lbs_deb_enter(LBS_DEB_WEXT);
136
137 if (priv->radioon != option) {
138 lbs_deb_wext("switching radio %s\n", option ? "on" : "off");
139 priv->radioon = option;
140
141 ret = lbs_prepare_and_send_command(priv,
142 CMD_802_11_RADIO_CONTROL,
143 CMD_ACT_SET,
144 CMD_OPTION_WAITFORRSP, 0, NULL);
145 }
146
147 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
148 return ret;
149}
150
151/** 131/**
152 * @brief Copy active data rates based on adapter mode and status 132 * @brief Copy active data rates based on adapter mode and status
153 * 133 *
@@ -294,21 +274,17 @@ static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
294{ 274{
295 int ret = 0; 275 int ret = 0;
296 struct lbs_private *priv = dev->priv; 276 struct lbs_private *priv = dev->priv;
297 u32 rthr = vwrq->value; 277 u32 val = vwrq->value;
298 278
299 lbs_deb_enter(LBS_DEB_WEXT); 279 lbs_deb_enter(LBS_DEB_WEXT);
300 280
301 if (vwrq->disabled) { 281 if (vwrq->disabled)
302 priv->rtsthsd = rthr = MRVDRV_RTS_MAX_VALUE; 282 val = MRVDRV_RTS_MAX_VALUE;
303 } else { 283
304 if (rthr < MRVDRV_RTS_MIN_VALUE || rthr > MRVDRV_RTS_MAX_VALUE) 284 if (val > MRVDRV_RTS_MAX_VALUE) /* min rts value is 0 */
305 return -EINVAL; 285 return -EINVAL;
306 priv->rtsthsd = rthr;
307 }
308 286
309 ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, 287 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, (u16) val);
310 CMD_ACT_SET, CMD_OPTION_WAITFORRSP,
311 OID_802_11_RTS_THRESHOLD, &rthr);
312 288
313 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 289 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
314 return ret; 290 return ret;
@@ -317,21 +293,18 @@ static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
317static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info, 293static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info,
318 struct iw_param *vwrq, char *extra) 294 struct iw_param *vwrq, char *extra)
319{ 295{
320 int ret = 0;
321 struct lbs_private *priv = dev->priv; 296 struct lbs_private *priv = dev->priv;
297 int ret = 0;
298 u16 val = 0;
322 299
323 lbs_deb_enter(LBS_DEB_WEXT); 300 lbs_deb_enter(LBS_DEB_WEXT);
324 301
325 priv->rtsthsd = 0; 302 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_RTS_THRESHOLD, &val);
326 ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB,
327 CMD_ACT_GET, CMD_OPTION_WAITFORRSP,
328 OID_802_11_RTS_THRESHOLD, NULL);
329 if (ret) 303 if (ret)
330 goto out; 304 goto out;
331 305
332 vwrq->value = priv->rtsthsd; 306 vwrq->value = val;
333 vwrq->disabled = ((vwrq->value < MRVDRV_RTS_MIN_VALUE) 307 vwrq->disabled = val > MRVDRV_RTS_MAX_VALUE; /* min rts value is 0 */
334 || (vwrq->value > MRVDRV_RTS_MAX_VALUE));
335 vwrq->fixed = 1; 308 vwrq->fixed = 1;
336 309
337out: 310out:
@@ -342,24 +315,19 @@ out:
342static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info, 315static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
343 struct iw_param *vwrq, char *extra) 316 struct iw_param *vwrq, char *extra)
344{ 317{
345 int ret = 0;
346 u32 fthr = vwrq->value;
347 struct lbs_private *priv = dev->priv; 318 struct lbs_private *priv = dev->priv;
319 int ret = 0;
320 u32 val = vwrq->value;
348 321
349 lbs_deb_enter(LBS_DEB_WEXT); 322 lbs_deb_enter(LBS_DEB_WEXT);
350 323
351 if (vwrq->disabled) { 324 if (vwrq->disabled)
352 priv->fragthsd = fthr = MRVDRV_FRAG_MAX_VALUE; 325 val = MRVDRV_FRAG_MAX_VALUE;
353 } else { 326
354 if (fthr < MRVDRV_FRAG_MIN_VALUE 327 if (val < MRVDRV_FRAG_MIN_VALUE || val > MRVDRV_FRAG_MAX_VALUE)
355 || fthr > MRVDRV_FRAG_MAX_VALUE) 328 return -EINVAL;
356 return -EINVAL;
357 priv->fragthsd = fthr;
358 }
359 329
360 ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, 330 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, (u16) val);
361 CMD_ACT_SET, CMD_OPTION_WAITFORRSP,
362 OID_802_11_FRAGMENTATION_THRESHOLD, &fthr);
363 331
364 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 332 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
365 return ret; 333 return ret;
@@ -368,22 +336,19 @@ static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
368static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info, 336static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info,
369 struct iw_param *vwrq, char *extra) 337 struct iw_param *vwrq, char *extra)
370{ 338{
371 int ret = 0;
372 struct lbs_private *priv = dev->priv; 339 struct lbs_private *priv = dev->priv;
340 int ret = 0;
341 u16 val = 0;
373 342
374 lbs_deb_enter(LBS_DEB_WEXT); 343 lbs_deb_enter(LBS_DEB_WEXT);
375 344
376 priv->fragthsd = 0; 345 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_FRAG_THRESHOLD, &val);
377 ret = lbs_prepare_and_send_command(priv,
378 CMD_802_11_SNMP_MIB,
379 CMD_ACT_GET, CMD_OPTION_WAITFORRSP,
380 OID_802_11_FRAGMENTATION_THRESHOLD, NULL);
381 if (ret) 346 if (ret)
382 goto out; 347 goto out;
383 348
384 vwrq->value = priv->fragthsd; 349 vwrq->value = val;
385 vwrq->disabled = ((vwrq->value < MRVDRV_FRAG_MIN_VALUE) 350 vwrq->disabled = ((val < MRVDRV_FRAG_MIN_VALUE)
386 || (vwrq->value > MRVDRV_FRAG_MAX_VALUE)); 351 || (val > MRVDRV_FRAG_MAX_VALUE));
387 vwrq->fixed = 1; 352 vwrq->fixed = 1;
388 353
389out: 354out:
@@ -410,7 +375,7 @@ static int mesh_wlan_get_mode(struct net_device *dev,
410{ 375{
411 lbs_deb_enter(LBS_DEB_WEXT); 376 lbs_deb_enter(LBS_DEB_WEXT);
412 377
413 *uwrq = IW_MODE_REPEAT ; 378 *uwrq = IW_MODE_REPEAT;
414 379
415 lbs_deb_leave(LBS_DEB_WEXT); 380 lbs_deb_leave(LBS_DEB_WEXT);
416 return 0; 381 return 0;
@@ -420,28 +385,30 @@ static int lbs_get_txpow(struct net_device *dev,
420 struct iw_request_info *info, 385 struct iw_request_info *info,
421 struct iw_param *vwrq, char *extra) 386 struct iw_param *vwrq, char *extra)
422{ 387{
423 int ret = 0;
424 struct lbs_private *priv = dev->priv; 388 struct lbs_private *priv = dev->priv;
389 s16 curlevel = 0;
390 int ret = 0;
425 391
426 lbs_deb_enter(LBS_DEB_WEXT); 392 lbs_deb_enter(LBS_DEB_WEXT);
427 393
428 ret = lbs_prepare_and_send_command(priv, 394 if (!priv->radio_on) {
429 CMD_802_11_RF_TX_POWER, 395 lbs_deb_wext("tx power off\n");
430 CMD_ACT_TX_POWER_OPT_GET, 396 vwrq->value = 0;
431 CMD_OPTION_WAITFORRSP, 0, NULL); 397 vwrq->disabled = 1;
398 goto out;
399 }
432 400
401 ret = lbs_get_tx_power(priv, &curlevel, NULL, NULL);
433 if (ret) 402 if (ret)
434 goto out; 403 goto out;
435 404
436 lbs_deb_wext("tx power level %d dbm\n", priv->txpowerlevel); 405 lbs_deb_wext("tx power level %d dbm\n", curlevel);
437 vwrq->value = priv->txpowerlevel; 406 priv->txpower_cur = curlevel;
407
408 vwrq->value = curlevel;
438 vwrq->fixed = 1; 409 vwrq->fixed = 1;
439 if (priv->radioon) { 410 vwrq->disabled = 0;
440 vwrq->disabled = 0; 411 vwrq->flags = IW_TXPOW_DBM;
441 vwrq->flags = IW_TXPOW_DBM;
442 } else {
443 vwrq->disabled = 1;
444 }
445 412
446out: 413out:
447 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 414 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
@@ -451,31 +418,44 @@ out:
451static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info, 418static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info,
452 struct iw_param *vwrq, char *extra) 419 struct iw_param *vwrq, char *extra)
453{ 420{
454 int ret = 0;
455 struct lbs_private *priv = dev->priv; 421 struct lbs_private *priv = dev->priv;
422 int ret = 0;
423 u16 slimit = 0, llimit = 0;
456 424
457 lbs_deb_enter(LBS_DEB_WEXT); 425 lbs_deb_enter(LBS_DEB_WEXT);
458 426
459 if (vwrq->flags == IW_RETRY_LIMIT) { 427 if ((vwrq->flags & IW_RETRY_TYPE) != IW_RETRY_LIMIT)
460 /* The MAC has a 4-bit Total_Tx_Count register 428 return -EOPNOTSUPP;
461 Total_Tx_Count = 1 + Tx_Retry_Count */ 429
430 /* The MAC has a 4-bit Total_Tx_Count register
431 Total_Tx_Count = 1 + Tx_Retry_Count */
462#define TX_RETRY_MIN 0 432#define TX_RETRY_MIN 0
463#define TX_RETRY_MAX 14 433#define TX_RETRY_MAX 14
464 if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX) 434 if (vwrq->value < TX_RETRY_MIN || vwrq->value > TX_RETRY_MAX)
465 return -EINVAL; 435 return -EINVAL;
466 436
467 /* Adding 1 to convert retry count to try count */ 437 /* Add 1 to convert retry count to try count */
468 priv->txretrycount = vwrq->value + 1; 438 if (vwrq->flags & IW_RETRY_SHORT)
439 slimit = (u16) (vwrq->value + 1);
440 else if (vwrq->flags & IW_RETRY_LONG)
441 llimit = (u16) (vwrq->value + 1);
442 else
443 slimit = llimit = (u16) (vwrq->value + 1); /* set both */
469 444
470 ret = lbs_prepare_and_send_command(priv, CMD_802_11_SNMP_MIB, 445 if (llimit) {
471 CMD_ACT_SET, 446 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT,
472 CMD_OPTION_WAITFORRSP, 447 llimit);
473 OID_802_11_TX_RETRYCOUNT, NULL); 448 if (ret)
449 goto out;
450 }
474 451
452 if (slimit) {
453 /* txretrycount follows the short retry limit */
454 priv->txretrycount = slimit;
455 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT,
456 slimit);
475 if (ret) 457 if (ret)
476 goto out; 458 goto out;
477 } else {
478 return -EOPNOTSUPP;
479 } 459 }
480 460
481out: 461out:
@@ -488,22 +468,30 @@ static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info,
488{ 468{
489 struct lbs_private *priv = dev->priv; 469 struct lbs_private *priv = dev->priv;
490 int ret = 0; 470 int ret = 0;
471 u16 val = 0;
491 472
492 lbs_deb_enter(LBS_DEB_WEXT); 473 lbs_deb_enter(LBS_DEB_WEXT);
493 474
494 priv->txretrycount = 0;
495 ret = lbs_prepare_and_send_command(priv,
496 CMD_802_11_SNMP_MIB,
497 CMD_ACT_GET, CMD_OPTION_WAITFORRSP,
498 OID_802_11_TX_RETRYCOUNT, NULL);
499 if (ret)
500 goto out;
501
502 vwrq->disabled = 0; 475 vwrq->disabled = 0;
503 if (!vwrq->flags) { 476
504 vwrq->flags = IW_RETRY_LIMIT; 477 if (vwrq->flags & IW_RETRY_LONG) {
478 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_LONG_RETRY_LIMIT, &val);
479 if (ret)
480 goto out;
481
482 /* Subtract 1 to convert try count to retry count */
483 vwrq->value = val - 1;
484 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
485 } else {
486 ret = lbs_get_snmp_mib(priv, SNMP_MIB_OID_SHORT_RETRY_LIMIT, &val);
487 if (ret)
488 goto out;
489
490 /* txretry count follows the short retry limit */
491 priv->txretrycount = val;
505 /* Subtract 1 to convert try count to retry count */ 492 /* Subtract 1 to convert try count to retry count */
506 vwrq->value = priv->txretrycount - 1; 493 vwrq->value = val - 1;
494 vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
507 } 495 }
508 496
509out: 497out:
@@ -693,22 +681,12 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
693 681
694 range->sensitivity = 0; 682 range->sensitivity = 0;
695 683
696 /* 684 /* Setup the supported power level ranges */
697 * Setup the supported power level ranges
698 */
699 memset(range->txpower, 0, sizeof(range->txpower)); 685 memset(range->txpower, 0, sizeof(range->txpower));
700 range->txpower[0] = 5; 686 range->txpower_capa = IW_TXPOW_DBM | IW_TXPOW_RANGE;
701 range->txpower[1] = 7; 687 range->txpower[0] = priv->txpower_min;
702 range->txpower[2] = 9; 688 range->txpower[1] = priv->txpower_max;
703 range->txpower[3] = 11; 689 range->num_txpower = 2;
704 range->txpower[4] = 13;
705 range->txpower[5] = 15;
706 range->txpower[6] = 17;
707 range->txpower[7] = 19;
708
709 range->num_txpower = 8;
710 range->txpower_capa = IW_TXPOW_DBM;
711 range->txpower_capa |= IW_TXPOW_RANGE;
712 690
713 range->event_capa[0] = (IW_EVENT_CAPA_K_0 | 691 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
714 IW_EVENT_CAPA_MASK(SIOCGIWAP) | 692 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
@@ -998,9 +976,11 @@ static int lbs_mesh_set_freq(struct net_device *dev,
998 if (fwrq->m != priv->curbssparams.channel) { 976 if (fwrq->m != priv->curbssparams.channel) {
999 lbs_deb_wext("mesh channel change forces eth disconnect\n"); 977 lbs_deb_wext("mesh channel change forces eth disconnect\n");
1000 if (priv->mode == IW_MODE_INFRA) 978 if (priv->mode == IW_MODE_INFRA)
1001 lbs_send_deauthentication(priv); 979 lbs_cmd_80211_deauthenticate(priv,
980 priv->curbssparams.bssid,
981 WLAN_REASON_DEAUTH_LEAVING);
1002 else if (priv->mode == IW_MODE_ADHOC) 982 else if (priv->mode == IW_MODE_ADHOC)
1003 lbs_stop_adhoc_network(priv); 983 lbs_adhoc_stop(priv);
1004 } 984 }
1005 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m); 985 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, fwrq->m);
1006 lbs_update_channel(priv); 986 lbs_update_channel(priv);
@@ -1045,6 +1025,18 @@ static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
1045 new_rate); 1025 new_rate);
1046 goto out; 1026 goto out;
1047 } 1027 }
1028 if (priv->fwrelease < 0x09000000) {
1029 ret = lbs_set_power_adapt_cfg(priv, 0,
1030 POW_ADAPT_DEFAULT_P0,
1031 POW_ADAPT_DEFAULT_P1,
1032 POW_ADAPT_DEFAULT_P2);
1033 if (ret)
1034 goto out;
1035 }
1036 ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
1037 TPC_DEFAULT_P2, 1);
1038 if (ret)
1039 goto out;
1048 } 1040 }
1049 1041
1050 /* Try the newer command first (Firmware Spec 5.1 and above) */ 1042 /* Try the newer command first (Firmware Spec 5.1 and above) */
@@ -1612,12 +1604,26 @@ static int lbs_set_encodeext(struct net_device *dev,
1612 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags); 1604 set_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags);
1613 } 1605 }
1614 1606
1615 disable_wep (assoc_req); 1607 /* Only disable wep if necessary: can't waste time here. */
1608 if (priv->mac_control & CMD_ACT_MAC_WEP_ENABLE)
1609 disable_wep(assoc_req);
1616 } 1610 }
1617 1611
1618out: 1612out:
1619 if (ret == 0) { 1613 if (ret == 0) {
1620 lbs_postpone_association_work(priv); 1614 /* 802.1x and WPA rekeying must happen as quickly as possible,
1615 * especially during the 4-way handshake; thus if in
1616 * infrastructure mode, and either (a) 802.1x is enabled or
1617 * (b) WPA is being used, set the key right away.
1618 */
1619 if (assoc_req->mode == IW_MODE_INFRA &&
1620 ((assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_802_1X) ||
1621 (assoc_req->secinfo.key_mgmt & IW_AUTH_KEY_MGMT_PSK) ||
1622 assoc_req->secinfo.WPAenabled ||
1623 assoc_req->secinfo.WPA2enabled)) {
1624 lbs_do_association_work(priv);
1625 } else
1626 lbs_postpone_association_work(priv);
1621 } else { 1627 } else {
1622 lbs_cancel_association_work(priv); 1628 lbs_cancel_association_work(priv);
1623 } 1629 }
@@ -1725,13 +1731,17 @@ static int lbs_set_auth(struct net_device *dev,
1725 case IW_AUTH_TKIP_COUNTERMEASURES: 1731 case IW_AUTH_TKIP_COUNTERMEASURES:
1726 case IW_AUTH_CIPHER_PAIRWISE: 1732 case IW_AUTH_CIPHER_PAIRWISE:
1727 case IW_AUTH_CIPHER_GROUP: 1733 case IW_AUTH_CIPHER_GROUP:
1728 case IW_AUTH_KEY_MGMT:
1729 case IW_AUTH_DROP_UNENCRYPTED: 1734 case IW_AUTH_DROP_UNENCRYPTED:
1730 /* 1735 /*
1731 * libertas does not use these parameters 1736 * libertas does not use these parameters
1732 */ 1737 */
1733 break; 1738 break;
1734 1739
1740 case IW_AUTH_KEY_MGMT:
1741 assoc_req->secinfo.key_mgmt = dwrq->value;
1742 updated = 1;
1743 break;
1744
1735 case IW_AUTH_WPA_VERSION: 1745 case IW_AUTH_WPA_VERSION:
1736 if (dwrq->value & IW_AUTH_WPA_VERSION_DISABLED) { 1746 if (dwrq->value & IW_AUTH_WPA_VERSION_DISABLED) {
1737 assoc_req->secinfo.WPAenabled = 0; 1747 assoc_req->secinfo.WPAenabled = 0;
@@ -1811,6 +1821,10 @@ static int lbs_get_auth(struct net_device *dev,
1811 lbs_deb_enter(LBS_DEB_WEXT); 1821 lbs_deb_enter(LBS_DEB_WEXT);
1812 1822
1813 switch (dwrq->flags & IW_AUTH_INDEX) { 1823 switch (dwrq->flags & IW_AUTH_INDEX) {
1824 case IW_AUTH_KEY_MGMT:
1825 dwrq->value = priv->secinfo.key_mgmt;
1826 break;
1827
1814 case IW_AUTH_WPA_VERSION: 1828 case IW_AUTH_WPA_VERSION:
1815 dwrq->value = 0; 1829 dwrq->value = 0;
1816 if (priv->secinfo.WPAenabled) 1830 if (priv->secinfo.WPAenabled)
@@ -1844,39 +1858,77 @@ static int lbs_set_txpow(struct net_device *dev, struct iw_request_info *info,
1844{ 1858{
1845 int ret = 0; 1859 int ret = 0;
1846 struct lbs_private *priv = dev->priv; 1860 struct lbs_private *priv = dev->priv;
1847 1861 s16 dbm = (s16) vwrq->value;
1848 u16 dbm;
1849 1862
1850 lbs_deb_enter(LBS_DEB_WEXT); 1863 lbs_deb_enter(LBS_DEB_WEXT);
1851 1864
1852 if (vwrq->disabled) { 1865 if (vwrq->disabled) {
1853 lbs_radio_ioctl(priv, RADIO_OFF); 1866 lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 0);
1854 return 0; 1867 goto out;
1855 } 1868 }
1856 1869
1857 priv->preamble = CMD_TYPE_AUTO_PREAMBLE; 1870 if (vwrq->fixed == 0) {
1858 1871 /* User requests automatic tx power control, however there are
1859 lbs_radio_ioctl(priv, RADIO_ON); 1872 * many auto tx settings. For now use firmware defaults until
1873 * we come up with a good way to expose these to the user. */
1874 if (priv->fwrelease < 0x09000000) {
1875 ret = lbs_set_power_adapt_cfg(priv, 1,
1876 POW_ADAPT_DEFAULT_P0,
1877 POW_ADAPT_DEFAULT_P1,
1878 POW_ADAPT_DEFAULT_P2);
1879 if (ret)
1880 goto out;
1881 }
1882 ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
1883 TPC_DEFAULT_P2, 1);
1884 if (ret)
1885 goto out;
1886 dbm = priv->txpower_max;
1887 } else {
1888 /* Userspace check in iwrange if it should use dBm or mW,
1889 * therefore this should never happen... Jean II */
1890 if ((vwrq->flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) {
1891 ret = -EOPNOTSUPP;
1892 goto out;
1893 }
1860 1894
1861 /* Userspace check in iwrange if it should use dBm or mW, 1895 /* Validate requested power level against firmware allowed
1862 * therefore this should never happen... Jean II */ 1896 * levels */
1863 if ((vwrq->flags & IW_TXPOW_TYPE) == IW_TXPOW_MWATT) { 1897 if (priv->txpower_min && (dbm < priv->txpower_min)) {
1864 return -EOPNOTSUPP; 1898 ret = -EINVAL;
1865 } else 1899 goto out;
1866 dbm = (u16) vwrq->value; 1900 }
1867 1901
1868 /* auto tx power control */ 1902 if (priv->txpower_max && (dbm > priv->txpower_max)) {
1903 ret = -EINVAL;
1904 goto out;
1905 }
1906 if (priv->fwrelease < 0x09000000) {
1907 ret = lbs_set_power_adapt_cfg(priv, 0,
1908 POW_ADAPT_DEFAULT_P0,
1909 POW_ADAPT_DEFAULT_P1,
1910 POW_ADAPT_DEFAULT_P2);
1911 if (ret)
1912 goto out;
1913 }
1914 ret = lbs_set_tpc_cfg(priv, 0, TPC_DEFAULT_P0, TPC_DEFAULT_P1,
1915 TPC_DEFAULT_P2, 1);
1916 if (ret)
1917 goto out;
1918 }
1869 1919
1870 if (vwrq->fixed == 0) 1920 /* If the radio was off, turn it on */
1871 dbm = 0xffff; 1921 if (!priv->radio_on) {
1922 ret = lbs_set_radio(priv, RADIO_PREAMBLE_AUTO, 1);
1923 if (ret)
1924 goto out;
1925 }
1872 1926
1873 lbs_deb_wext("txpower set %d dbm\n", dbm); 1927 lbs_deb_wext("txpower set %d dBm\n", dbm);
1874 1928
1875 ret = lbs_prepare_and_send_command(priv, 1929 ret = lbs_set_tx_power(priv, dbm);
1876 CMD_802_11_RF_TX_POWER,
1877 CMD_ACT_TX_POWER_OPT_SET_LOW,
1878 CMD_OPTION_WAITFORRSP, 0, (void *)&dbm);
1879 1930
1931out:
1880 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 1932 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
1881 return ret; 1933 return ret;
1882} 1934}
@@ -1928,6 +1980,11 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
1928 1980
1929 lbs_deb_enter(LBS_DEB_WEXT); 1981 lbs_deb_enter(LBS_DEB_WEXT);
1930 1982
1983 if (!priv->radio_on) {
1984 ret = -EINVAL;
1985 goto out;
1986 }
1987
1931 /* Check the size of the string */ 1988 /* Check the size of the string */
1932 if (in_ssid_len > IW_ESSID_MAX_SIZE) { 1989 if (in_ssid_len > IW_ESSID_MAX_SIZE) {
1933 ret = -E2BIG; 1990 ret = -E2BIG;
@@ -2005,6 +2062,11 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2005 2062
2006 lbs_deb_enter(LBS_DEB_WEXT); 2063 lbs_deb_enter(LBS_DEB_WEXT);
2007 2064
2065 if (!priv->radio_on) {
2066 ret = -EINVAL;
2067 goto out;
2068 }
2069
2008 /* Check the size of the string */ 2070 /* Check the size of the string */
2009 if (dwrq->length > IW_ESSID_MAX_SIZE) { 2071 if (dwrq->length > IW_ESSID_MAX_SIZE) {
2010 ret = -E2BIG; 2072 ret = -E2BIG;
@@ -2046,6 +2108,9 @@ static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info,
2046 2108
2047 lbs_deb_enter(LBS_DEB_WEXT); 2109 lbs_deb_enter(LBS_DEB_WEXT);
2048 2110
2111 if (!priv->radio_on)
2112 return -EINVAL;
2113
2049 if (awrq->sa_family != ARPHRD_ETHER) 2114 if (awrq->sa_family != ARPHRD_ETHER)
2050 return -EINVAL; 2115 return -EINVAL;
2051 2116
diff --git a/drivers/net/wireless/libertas_tf/Makefile b/drivers/net/wireless/libertas_tf/Makefile
new file mode 100644
index 000000000000..ff5544d6ac9d
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/Makefile
@@ -0,0 +1,6 @@
1libertas_tf-objs := main.o cmd.o
2
3libertas_tf_usb-objs += if_usb.o
4
5obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf.o
6obj-$(CONFIG_LIBERTAS_THINFIRM_USB) += libertas_tf_usb.o
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
new file mode 100644
index 000000000000..fdbcf8ba3e8a
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -0,0 +1,669 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2003-2006, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10#include "libertas_tf.h"
11
12static const struct channel_range channel_ranges[] = {
13 { LBTF_REGDOMAIN_US, 1, 12 },
14 { LBTF_REGDOMAIN_CA, 1, 12 },
15 { LBTF_REGDOMAIN_EU, 1, 14 },
16 { LBTF_REGDOMAIN_JP, 1, 14 },
17 { LBTF_REGDOMAIN_SP, 1, 14 },
18 { LBTF_REGDOMAIN_FR, 1, 14 },
19};
20
21static u16 lbtf_region_code_to_index[MRVDRV_MAX_REGION_CODE] =
22{
23 LBTF_REGDOMAIN_US, LBTF_REGDOMAIN_CA, LBTF_REGDOMAIN_EU,
24 LBTF_REGDOMAIN_SP, LBTF_REGDOMAIN_FR, LBTF_REGDOMAIN_JP,
25};
26
27static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv);
28
29
30/**
31 * lbtf_cmd_copyback - Simple callback that copies response back into command
32 *
33 * @priv A pointer to struct lbtf_private structure
34 * @extra A pointer to the original command structure for which
35 * 'resp' is a response
36 * @resp A pointer to the command response
37 *
38 * Returns: 0 on success, error on failure
39 */
40int lbtf_cmd_copyback(struct lbtf_private *priv, unsigned long extra,
41 struct cmd_header *resp)
42{
43 struct cmd_header *buf = (void *)extra;
44 uint16_t copy_len;
45
46 copy_len = min(le16_to_cpu(buf->size), le16_to_cpu(resp->size));
47 memcpy(buf, resp, copy_len);
48 return 0;
49}
50EXPORT_SYMBOL_GPL(lbtf_cmd_copyback);
51
52#define CHAN_TO_IDX(chan) ((chan) - 1)
53
54static void lbtf_geo_init(struct lbtf_private *priv)
55{
56 const struct channel_range *range = channel_ranges;
57 u8 ch;
58 int i;
59
60 for (i = 0; i < ARRAY_SIZE(channel_ranges); i++)
61 if (channel_ranges[i].regdomain == priv->regioncode) {
62 range = &channel_ranges[i];
63 break;
64 }
65
66 for (ch = priv->range.start; ch < priv->range.end; ch++)
67 priv->channels[CHAN_TO_IDX(ch)].flags = 0;
68}
69
70/**
71 * lbtf_update_hw_spec: Updates the hardware details.
72 *
73 * @priv A pointer to struct lbtf_private structure
74 *
75 * Returns: 0 on success, error on failure
76 */
77int lbtf_update_hw_spec(struct lbtf_private *priv)
78{
79 struct cmd_ds_get_hw_spec cmd;
80 int ret = -1;
81 u32 i;
82 DECLARE_MAC_BUF(mac);
83
84 memset(&cmd, 0, sizeof(cmd));
85 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
86 memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN);
87 ret = lbtf_cmd_with_response(priv, CMD_GET_HW_SPEC, &cmd);
88 if (ret)
89 goto out;
90
91 priv->fwcapinfo = le32_to_cpu(cmd.fwcapinfo);
92
93 /* The firmware release is in an interesting format: the patch
94 * level is in the most significant nibble ... so fix that: */
95 priv->fwrelease = le32_to_cpu(cmd.fwrelease);
96 priv->fwrelease = (priv->fwrelease << 8) |
97 (priv->fwrelease >> 24 & 0xff);
98
99 printk(KERN_INFO "libertastf: %s, fw %u.%u.%up%u, cap 0x%08x\n",
100 print_mac(mac, cmd.permanentaddr),
101 priv->fwrelease >> 24 & 0xff,
102 priv->fwrelease >> 16 & 0xff,
103 priv->fwrelease >> 8 & 0xff,
104 priv->fwrelease & 0xff,
105 priv->fwcapinfo);
106
107 /* Clamp region code to 8-bit since FW spec indicates that it should
108 * only ever be 8-bit, even though the field size is 16-bit. Some
109 * firmware returns non-zero high 8 bits here.
110 */
111 priv->regioncode = le16_to_cpu(cmd.regioncode) & 0xFF;
112
113 for (i = 0; i < MRVDRV_MAX_REGION_CODE; i++) {
114 /* use the region code to search for the index */
115 if (priv->regioncode == lbtf_region_code_to_index[i])
116 break;
117 }
118
119 /* if it's unidentified region code, use the default (USA) */
120 if (i >= MRVDRV_MAX_REGION_CODE)
121 priv->regioncode = 0x10;
122
123 if (priv->current_addr[0] == 0xff)
124 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
125
126 SET_IEEE80211_PERM_ADDR(priv->hw, priv->current_addr);
127
128 lbtf_geo_init(priv);
129out:
130 return ret;
131}
132
133/**
134 * lbtf_set_channel: Set the radio channel
135 *
136 * @priv A pointer to struct lbtf_private structure
137 * @channel The desired channel, or 0 to clear a locked channel
138 *
139 * Returns: 0 on success, error on failure
140 */
141int lbtf_set_channel(struct lbtf_private *priv, u8 channel)
142{
143 struct cmd_ds_802_11_rf_channel cmd;
144
145 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
146 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
147 cmd.channel = cpu_to_le16(channel);
148
149 return lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
150}
151
152int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
153{
154 struct cmd_ds_802_11_beacon_set cmd;
155 int size;
156
157 if (beacon->len > MRVL_MAX_BCN_SIZE)
158 return -1;
159 size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len;
160 cmd.hdr.size = cpu_to_le16(size);
161 cmd.len = cpu_to_le16(beacon->len);
162 memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len);
163
164 lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size);
165 return 0;
166}
167
168int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
169 int beacon_int) {
170 struct cmd_ds_802_11_beacon_control cmd;
171
172 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
173 cmd.action = cpu_to_le16(CMD_ACT_SET);
174 cmd.beacon_enable = cpu_to_le16(beacon_enable);
175 cmd.beacon_period = cpu_to_le16(beacon_int);
176
177 lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd));
178 return 0;
179}
180
181static void lbtf_queue_cmd(struct lbtf_private *priv,
182 struct cmd_ctrl_node *cmdnode)
183{
184 unsigned long flags;
185
186 if (!cmdnode)
187 return;
188
189 if (!cmdnode->cmdbuf->size)
190 return;
191
192 cmdnode->result = 0;
193 spin_lock_irqsave(&priv->driver_lock, flags);
194 list_add_tail(&cmdnode->list, &priv->cmdpendingq);
195 spin_unlock_irqrestore(&priv->driver_lock, flags);
196}
197
198static void lbtf_submit_command(struct lbtf_private *priv,
199 struct cmd_ctrl_node *cmdnode)
200{
201 unsigned long flags;
202 struct cmd_header *cmd;
203 uint16_t cmdsize;
204 uint16_t command;
205 int timeo = 5 * HZ;
206 int ret;
207
208 cmd = cmdnode->cmdbuf;
209
210 spin_lock_irqsave(&priv->driver_lock, flags);
211 priv->cur_cmd = cmdnode;
212 cmdsize = le16_to_cpu(cmd->size);
213 command = le16_to_cpu(cmd->command);
214 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
215 spin_unlock_irqrestore(&priv->driver_lock, flags);
216
217 if (ret)
218 /* Let the timer kick in and retry, and potentially reset
219 the whole thing if the condition persists */
220 timeo = HZ;
221
222 /* Setup the timer after transmit command */
223 mod_timer(&priv->command_timer, jiffies + timeo);
224}
225
226/**
227 * This function inserts command node to cmdfreeq
228 * after cleans it. Requires priv->driver_lock held.
229 */
230static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
231 struct cmd_ctrl_node *cmdnode)
232{
233 if (!cmdnode)
234 return;
235
236 cmdnode->callback = NULL;
237 cmdnode->callback_arg = 0;
238
239 memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE);
240
241 list_add_tail(&cmdnode->list, &priv->cmdfreeq);
242}
243
244static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
245 struct cmd_ctrl_node *ptempcmd)
246{
247 unsigned long flags;
248
249 spin_lock_irqsave(&priv->driver_lock, flags);
250 __lbtf_cleanup_and_insert_cmd(priv, ptempcmd);
251 spin_unlock_irqrestore(&priv->driver_lock, flags);
252}
253
254void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
255 int result)
256{
257 cmd->result = result;
258 cmd->cmdwaitqwoken = 1;
259 wake_up_interruptible(&cmd->cmdwait_q);
260
261 if (!cmd->callback)
262 __lbtf_cleanup_and_insert_cmd(priv, cmd);
263 priv->cur_cmd = NULL;
264}
265
266int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv)
267{
268 struct cmd_ds_mac_multicast_addr cmd;
269
270 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
271 cmd.action = cpu_to_le16(CMD_ACT_SET);
272
273 cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
274 memcpy(cmd.maclist, priv->multicastlist,
275 priv->nr_of_multicastmacaddr * ETH_ALEN);
276
277 lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd));
278 return 0;
279}
280
281void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode)
282{
283 struct cmd_ds_set_mode cmd;
284
285 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
286 cmd.mode = cpu_to_le16(mode);
287 lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd));
288}
289
290void lbtf_set_bssid(struct lbtf_private *priv, bool activate, u8 *bssid)
291{
292 struct cmd_ds_set_bssid cmd;
293
294 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
295 cmd.activate = activate ? 1 : 0;
296 if (activate)
297 memcpy(cmd.bssid, bssid, ETH_ALEN);
298
299 lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd));
300}
301
302int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
303{
304 struct cmd_ds_802_11_mac_address cmd;
305
306 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
307 cmd.action = cpu_to_le16(CMD_ACT_SET);
308
309 memcpy(cmd.macadd, mac_addr, ETH_ALEN);
310
311 lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd));
312 return 0;
313}
314
315int lbtf_set_radio_control(struct lbtf_private *priv)
316{
317 int ret = 0;
318 struct cmd_ds_802_11_radio_control cmd;
319
320 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
321 cmd.action = cpu_to_le16(CMD_ACT_SET);
322
323 switch (priv->preamble) {
324 case CMD_TYPE_SHORT_PREAMBLE:
325 cmd.control = cpu_to_le16(SET_SHORT_PREAMBLE);
326 break;
327
328 case CMD_TYPE_LONG_PREAMBLE:
329 cmd.control = cpu_to_le16(SET_LONG_PREAMBLE);
330 break;
331
332 case CMD_TYPE_AUTO_PREAMBLE:
333 default:
334 cmd.control = cpu_to_le16(SET_AUTO_PREAMBLE);
335 break;
336 }
337
338 if (priv->radioon)
339 cmd.control |= cpu_to_le16(TURN_ON_RF);
340 else
341 cmd.control &= cpu_to_le16(~TURN_ON_RF);
342
343 ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
344 return ret;
345}
346
347void lbtf_set_mac_control(struct lbtf_private *priv)
348{
349 struct cmd_ds_mac_control cmd;
350 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
351 cmd.action = cpu_to_le16(priv->mac_control);
352 cmd.reserved = 0;
353
354 lbtf_cmd_async(priv, CMD_MAC_CONTROL,
355 &cmd.hdr, sizeof(cmd));
356}
357
358/**
359 * lbtf_allocate_cmd_buffer - Allocates cmd buffer, links it to free cmd queue
360 *
361 * @priv A pointer to struct lbtf_private structure
362 *
363 * Returns: 0 on success.
364 */
365int lbtf_allocate_cmd_buffer(struct lbtf_private *priv)
366{
367 u32 bufsize;
368 u32 i;
369 struct cmd_ctrl_node *cmdarray;
370
371 /* Allocate and initialize the command array */
372 bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS;
373 cmdarray = kzalloc(bufsize, GFP_KERNEL);
374 if (!cmdarray)
375 return -1;
376 priv->cmd_array = cmdarray;
377
378 /* Allocate and initialize each command buffer in the command array */
379 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
380 cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL);
381 if (!cmdarray[i].cmdbuf)
382 return -1;
383 }
384
385 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
386 init_waitqueue_head(&cmdarray[i].cmdwait_q);
387 lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]);
388 }
389 return 0;
390}
391
392/**
393 * lbtf_free_cmd_buffer - Frees the cmd buffer.
394 *
395 * @priv A pointer to struct lbtf_private structure
396 *
397 * Returns: 0
398 */
399int lbtf_free_cmd_buffer(struct lbtf_private *priv)
400{
401 struct cmd_ctrl_node *cmdarray;
402 unsigned int i;
403
404 /* need to check if cmd array is allocated or not */
405 if (priv->cmd_array == NULL)
406 return 0;
407
408 cmdarray = priv->cmd_array;
409
410 /* Release shared memory buffers */
411 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
412 kfree(cmdarray[i].cmdbuf);
413 cmdarray[i].cmdbuf = NULL;
414 }
415
416 /* Release cmd_ctrl_node */
417 kfree(priv->cmd_array);
418 priv->cmd_array = NULL;
419
420 return 0;
421}
422
423/**
424 * lbtf_get_cmd_ctrl_node - Gets free cmd node from free cmd queue.
425 *
426 * @priv A pointer to struct lbtf_private structure
427 *
428 * Returns: pointer to a struct cmd_ctrl_node or NULL if none available.
429 */
430static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
431{
432 struct cmd_ctrl_node *tempnode;
433 unsigned long flags;
434
435 if (!priv)
436 return NULL;
437
438 spin_lock_irqsave(&priv->driver_lock, flags);
439
440 if (!list_empty(&priv->cmdfreeq)) {
441 tempnode = list_first_entry(&priv->cmdfreeq,
442 struct cmd_ctrl_node, list);
443 list_del(&tempnode->list);
444 } else
445 tempnode = NULL;
446
447 spin_unlock_irqrestore(&priv->driver_lock, flags);
448
449 return tempnode;
450}
451
452/**
453 * lbtf_execute_next_command: execute next command in cmd pending queue.
454 *
455 * @priv A pointer to struct lbtf_private structure
456 *
457 * Returns: 0 on success.
458 */
459int lbtf_execute_next_command(struct lbtf_private *priv)
460{
461 struct cmd_ctrl_node *cmdnode = NULL;
462 struct cmd_header *cmd;
463 unsigned long flags;
464
465 /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the
466 * only caller to us is lbtf_thread() and we get even when a
467 * data packet is received */
468
469 spin_lock_irqsave(&priv->driver_lock, flags);
470
471 if (priv->cur_cmd) {
472 spin_unlock_irqrestore(&priv->driver_lock, flags);
473 return -1;
474 }
475
476 if (!list_empty(&priv->cmdpendingq)) {
477 cmdnode = list_first_entry(&priv->cmdpendingq,
478 struct cmd_ctrl_node, list);
479 }
480
481 if (cmdnode) {
482 cmd = cmdnode->cmdbuf;
483
484 list_del(&cmdnode->list);
485 spin_unlock_irqrestore(&priv->driver_lock, flags);
486 lbtf_submit_command(priv, cmdnode);
487 } else
488 spin_unlock_irqrestore(&priv->driver_lock, flags);
489 return 0;
490}
491
492static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
493 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
494 int (*callback)(struct lbtf_private *, unsigned long,
495 struct cmd_header *),
496 unsigned long callback_arg)
497{
498 struct cmd_ctrl_node *cmdnode;
499
500 if (priv->surpriseremoved)
501 return ERR_PTR(-ENOENT);
502
503 cmdnode = lbtf_get_cmd_ctrl_node(priv);
504 if (cmdnode == NULL) {
505 /* Wake up main thread to execute next command */
506 queue_work(lbtf_wq, &priv->cmd_work);
507 return ERR_PTR(-ENOBUFS);
508 }
509
510 cmdnode->callback = callback;
511 cmdnode->callback_arg = callback_arg;
512
513 /* Copy the incoming command to the buffer */
514 memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
515
516 /* Set sequence number, clean result, move to buffer */
517 priv->seqnum++;
518 cmdnode->cmdbuf->command = cpu_to_le16(command);
519 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
520 cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
521 cmdnode->cmdbuf->result = 0;
522 cmdnode->cmdwaitqwoken = 0;
523 lbtf_queue_cmd(priv, cmdnode);
524 queue_work(lbtf_wq, &priv->cmd_work);
525
526 return cmdnode;
527}
528
529void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
530 struct cmd_header *in_cmd, int in_cmd_size)
531{
532 __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0);
533}
534
535int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
536 struct cmd_header *in_cmd, int in_cmd_size,
537 int (*callback)(struct lbtf_private *,
538 unsigned long, struct cmd_header *),
539 unsigned long callback_arg)
540{
541 struct cmd_ctrl_node *cmdnode;
542 unsigned long flags;
543 int ret = 0;
544
545 cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size,
546 callback, callback_arg);
547 if (IS_ERR(cmdnode))
548 return PTR_ERR(cmdnode);
549
550 might_sleep();
551 ret = wait_event_interruptible(cmdnode->cmdwait_q,
552 cmdnode->cmdwaitqwoken);
553 if (ret) {
554 printk(KERN_DEBUG
555 "libertastf: command 0x%04x interrupted by signal",
556 command);
557 return ret;
558 }
559
560 spin_lock_irqsave(&priv->driver_lock, flags);
561 ret = cmdnode->result;
562 if (ret)
563 printk(KERN_DEBUG "libertastf: command 0x%04x failed: %d\n",
564 command, ret);
565
566 __lbtf_cleanup_and_insert_cmd(priv, cmdnode);
567 spin_unlock_irqrestore(&priv->driver_lock, flags);
568
569 return ret;
570}
571EXPORT_SYMBOL_GPL(__lbtf_cmd);
572
573/* Call holding driver_lock */
574void lbtf_cmd_response_rx(struct lbtf_private *priv)
575{
576 priv->cmd_response_rxed = 1;
577 queue_work(lbtf_wq, &priv->cmd_work);
578}
579EXPORT_SYMBOL_GPL(lbtf_cmd_response_rx);
580
581int lbtf_process_rx_command(struct lbtf_private *priv)
582{
583 uint16_t respcmd, curcmd;
584 struct cmd_header *resp;
585 int ret = 0;
586 unsigned long flags;
587 uint16_t result;
588
589 mutex_lock(&priv->lock);
590 spin_lock_irqsave(&priv->driver_lock, flags);
591
592 if (!priv->cur_cmd) {
593 ret = -1;
594 spin_unlock_irqrestore(&priv->driver_lock, flags);
595 goto done;
596 }
597
598 resp = (void *)priv->cmd_resp_buff;
599 curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command);
600 respcmd = le16_to_cpu(resp->command);
601 result = le16_to_cpu(resp->result);
602
603 if (net_ratelimit())
604 printk(KERN_DEBUG "libertastf: cmd response 0x%04x, seq %d, size %d\n",
605 respcmd, le16_to_cpu(resp->seqnum),
606 le16_to_cpu(resp->size));
607
608 if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) {
609 spin_unlock_irqrestore(&priv->driver_lock, flags);
610 ret = -1;
611 goto done;
612 }
613 if (respcmd != CMD_RET(curcmd)) {
614 spin_unlock_irqrestore(&priv->driver_lock, flags);
615 ret = -1;
616 goto done;
617 }
618
619 if (resp->result == cpu_to_le16(0x0004)) {
620 /* 0x0004 means -EAGAIN. Drop the response, let it time out
621 and be resubmitted */
622 spin_unlock_irqrestore(&priv->driver_lock, flags);
623 ret = -1;
624 goto done;
625 }
626
627 /* Now we got response from FW, cancel the command timer */
628 del_timer(&priv->command_timer);
629 priv->cmd_timed_out = 0;
630 if (priv->nr_retries)
631 priv->nr_retries = 0;
632
633 /* If the command is not successful, cleanup and return failure */
634 if ((result != 0 || !(respcmd & 0x8000))) {
635 /*
636 * Handling errors here
637 */
638 switch (respcmd) {
639 case CMD_RET(CMD_GET_HW_SPEC):
640 case CMD_RET(CMD_802_11_RESET):
641 printk(KERN_DEBUG "libertastf: reset failed\n");
642 break;
643
644 }
645 lbtf_complete_command(priv, priv->cur_cmd, result);
646 spin_unlock_irqrestore(&priv->driver_lock, flags);
647
648 ret = -1;
649 goto done;
650 }
651
652 spin_unlock_irqrestore(&priv->driver_lock, flags);
653
654 if (priv->cur_cmd && priv->cur_cmd->callback) {
655 ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg,
656 resp);
657 }
658 spin_lock_irqsave(&priv->driver_lock, flags);
659
660 if (priv->cur_cmd) {
661 /* Clean up and Put current command back to cmdfreeq */
662 lbtf_complete_command(priv, priv->cur_cmd, result);
663 }
664 spin_unlock_irqrestore(&priv->driver_lock, flags);
665
666done:
667 mutex_unlock(&priv->lock);
668 return ret;
669}
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
new file mode 100644
index 000000000000..1cc03a8dd67a
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -0,0 +1,766 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2003-2006, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10#include <linux/delay.h>
11#include <linux/moduleparam.h>
12#include <linux/firmware.h>
13#include <linux/netdevice.h>
14#include <linux/usb.h>
15
16#define DRV_NAME "lbtf_usb"
17
18#include "libertas_tf.h"
19#include "if_usb.h"
20
21#define MESSAGE_HEADER_LEN 4
22
23static char *lbtf_fw_name = "lbtf_usb.bin";
24module_param_named(fw_name, lbtf_fw_name, charp, 0644);
25
26static struct usb_device_id if_usb_table[] = {
27 /* Enter the device signature inside */
28 { USB_DEVICE(0x1286, 0x2001) },
29 { USB_DEVICE(0x05a3, 0x8388) },
30 {} /* Terminating entry */
31};
32
33MODULE_DEVICE_TABLE(usb, if_usb_table);
34
35static void if_usb_receive(struct urb *urb);
36static void if_usb_receive_fwload(struct urb *urb);
37static int if_usb_prog_firmware(struct if_usb_card *cardp);
38static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
39 uint8_t *payload, uint16_t nb);
40static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
41 uint16_t nb, u8 data);
42static void if_usb_free(struct if_usb_card *cardp);
43static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
44static int if_usb_reset_device(struct if_usb_card *cardp);
45
46/**
47 * if_usb_wrike_bulk_callback - call back to handle URB status
48 *
49 * @param urb pointer to urb structure
50 */
51static void if_usb_write_bulk_callback(struct urb *urb)
52{
53 if (urb->status != 0)
54 printk(KERN_INFO "libertastf: URB in failure status: %d\n",
55 urb->status);
56}
57
58/**
59 * if_usb_free - free tx/rx urb, skb and rx buffer
60 *
61 * @param cardp pointer if_usb_card
62 */
63static void if_usb_free(struct if_usb_card *cardp)
64{
65 /* Unlink tx & rx urb */
66 usb_kill_urb(cardp->tx_urb);
67 usb_kill_urb(cardp->rx_urb);
68 usb_kill_urb(cardp->cmd_urb);
69
70 usb_free_urb(cardp->tx_urb);
71 cardp->tx_urb = NULL;
72
73 usb_free_urb(cardp->rx_urb);
74 cardp->rx_urb = NULL;
75
76 usb_free_urb(cardp->cmd_urb);
77 cardp->cmd_urb = NULL;
78
79 kfree(cardp->ep_out_buf);
80 cardp->ep_out_buf = NULL;
81}
82
83static void if_usb_setup_firmware(struct lbtf_private *priv)
84{
85 struct if_usb_card *cardp = priv->card;
86 struct cmd_ds_set_boot2_ver b2_cmd;
87
88 if_usb_submit_rx_urb(cardp);
89 b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd));
90 b2_cmd.action = 0;
91 b2_cmd.version = cardp->boot2_version;
92
93 if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd))
94 printk(KERN_INFO "libertastf: setting boot2 version failed\n");
95}
96
97static void if_usb_fw_timeo(unsigned long priv)
98{
99 struct if_usb_card *cardp = (void *)priv;
100
101 if (!cardp->fwdnldover)
102 /* Download timed out */
103 cardp->priv->surpriseremoved = 1;
104 wake_up(&cardp->fw_wq);
105}
106
107/**
108 * if_usb_probe - sets the configuration values
109 *
110 * @ifnum interface number
111 * @id pointer to usb_device_id
112 *
113 * Returns: 0 on success, error code on failure
114 */
115static int if_usb_probe(struct usb_interface *intf,
116 const struct usb_device_id *id)
117{
118 struct usb_device *udev;
119 struct usb_host_interface *iface_desc;
120 struct usb_endpoint_descriptor *endpoint;
121 struct lbtf_private *priv;
122 struct if_usb_card *cardp;
123 int i;
124
125 udev = interface_to_usbdev(intf);
126
127 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
128 if (!cardp)
129 goto error;
130
131 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
132 init_waitqueue_head(&cardp->fw_wq);
133
134 cardp->udev = udev;
135 iface_desc = intf->cur_altsetting;
136
137 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
138 endpoint = &iface_desc->endpoint[i].desc;
139 if (usb_endpoint_is_bulk_in(endpoint)) {
140 cardp->ep_in_size =
141 le16_to_cpu(endpoint->wMaxPacketSize);
142 cardp->ep_in = usb_endpoint_num(endpoint);
143 } else if (usb_endpoint_is_bulk_out(endpoint)) {
144 cardp->ep_out_size =
145 le16_to_cpu(endpoint->wMaxPacketSize);
146 cardp->ep_out = usb_endpoint_num(endpoint);
147 }
148 }
149 if (!cardp->ep_out_size || !cardp->ep_in_size)
150 /* Endpoints not found */
151 goto dealloc;
152
153 cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
154 if (!cardp->rx_urb)
155 goto dealloc;
156
157 cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
158 if (!cardp->tx_urb)
159 goto dealloc;
160
161 cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
162 if (!cardp->cmd_urb)
163 goto dealloc;
164
165 cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE,
166 GFP_KERNEL);
167 if (!cardp->ep_out_buf)
168 goto dealloc;
169
170 priv = lbtf_add_card(cardp, &udev->dev);
171 if (!priv)
172 goto dealloc;
173
174 cardp->priv = priv;
175
176 priv->hw_host_to_card = if_usb_host_to_card;
177 priv->hw_prog_firmware = if_usb_prog_firmware;
178 priv->hw_reset_device = if_usb_reset_device;
179 cardp->boot2_version = udev->descriptor.bcdDevice;
180
181 usb_get_dev(udev);
182 usb_set_intfdata(intf, cardp);
183
184 return 0;
185
186dealloc:
187 if_usb_free(cardp);
188error:
189 return -ENOMEM;
190}
191
192/**
193 * if_usb_disconnect - free resource and cleanup
194 *
195 * @intf USB interface structure
196 */
197static void if_usb_disconnect(struct usb_interface *intf)
198{
199 struct if_usb_card *cardp = usb_get_intfdata(intf);
200 struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
201
202 if_usb_reset_device(cardp);
203
204 if (priv)
205 lbtf_remove_card(priv);
206
207 /* Unlink and free urb */
208 if_usb_free(cardp);
209
210 usb_set_intfdata(intf, NULL);
211 usb_put_dev(interface_to_usbdev(intf));
212}
213
214/**
215 * if_usb_send_fw_pkt - This function downloads the FW
216 *
217 * @priv pointer to struct lbtf_private
218 *
219 * Returns: 0
220 */
221static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
222{
223 struct fwdata *fwdata = cardp->ep_out_buf;
224 u8 *firmware = (u8 *) cardp->fw->data;
225
226 /* If we got a CRC failure on the last block, back
227 up and retry it */
228 if (!cardp->CRC_OK) {
229 cardp->totalbytes = cardp->fwlastblksent;
230 cardp->fwseqnum--;
231 }
232
233 /* struct fwdata (which we sent to the card) has an
234 extra __le32 field in between the header and the data,
235 which is not in the struct fwheader in the actual
236 firmware binary. Insert the seqnum in the middle... */
237 memcpy(&fwdata->hdr, &firmware[cardp->totalbytes],
238 sizeof(struct fwheader));
239
240 cardp->fwlastblksent = cardp->totalbytes;
241 cardp->totalbytes += sizeof(struct fwheader);
242
243 memcpy(fwdata->data, &firmware[cardp->totalbytes],
244 le32_to_cpu(fwdata->hdr.datalength));
245
246 fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum);
247 cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength);
248
249 usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) +
250 le32_to_cpu(fwdata->hdr.datalength), 0);
251
252 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK))
253 /* Host has finished FW downloading
254 * Donwloading FW JUMP BLOCK
255 */
256 cardp->fwfinalblk = 1;
257
258 return 0;
259}
260
261static int if_usb_reset_device(struct if_usb_card *cardp)
262{
263 struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4;
264 int ret;
265
266 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
267
268 cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET);
269 cmd->hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_reset));
270 cmd->hdr.result = cpu_to_le16(0);
271 cmd->hdr.seqnum = cpu_to_le16(0x5a5a);
272 cmd->action = cpu_to_le16(CMD_ACT_HALT);
273 usb_tx_block(cardp, cardp->ep_out_buf,
274 4 + sizeof(struct cmd_ds_802_11_reset), 0);
275
276 msleep(100);
277 ret = usb_reset_device(cardp->udev);
278 msleep(100);
279
280 return ret;
281}
282EXPORT_SYMBOL_GPL(if_usb_reset_device);
283
284/**
285 * usb_tx_block - transfer data to the device
286 *
287 * @priv pointer to struct lbtf_private
288 * @payload pointer to payload data
289 * @nb data length
290 * @data non-zero for data, zero for commands
291 *
292 * Returns: 0 on success, nonzero otherwise.
293 */
294static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
295 uint16_t nb, u8 data)
296{
297 struct urb *urb;
298
299 /* check if device is removed */
300 if (cardp->priv->surpriseremoved)
301 return -1;
302
303 if (data)
304 urb = cardp->tx_urb;
305 else
306 urb = cardp->cmd_urb;
307
308 usb_fill_bulk_urb(urb, cardp->udev,
309 usb_sndbulkpipe(cardp->udev,
310 cardp->ep_out),
311 payload, nb, if_usb_write_bulk_callback, cardp);
312
313 urb->transfer_flags |= URB_ZERO_PACKET;
314
315 if (usb_submit_urb(urb, GFP_ATOMIC))
316 return -1;
317 return 0;
318}
319
320static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
321 void (*callbackfn)(struct urb *urb))
322{
323 struct sk_buff *skb;
324
325 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
326 if (!skb)
327 return -1;
328
329 cardp->rx_skb = skb;
330
331 /* Fill the receive configuration URB and initialise the Rx call back */
332 usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
333 usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
334 (void *) (skb->tail),
335 MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
336
337 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
338
339 if (usb_submit_urb(cardp->rx_urb, GFP_ATOMIC)) {
340 kfree_skb(skb);
341 cardp->rx_skb = NULL;
342 return -1;
343 } else
344 return 0;
345}
346
347static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp)
348{
349 return __if_usb_submit_rx_urb(cardp, &if_usb_receive_fwload);
350}
351
352static int if_usb_submit_rx_urb(struct if_usb_card *cardp)
353{
354 return __if_usb_submit_rx_urb(cardp, &if_usb_receive);
355}
356
357static void if_usb_receive_fwload(struct urb *urb)
358{
359 struct if_usb_card *cardp = urb->context;
360 struct sk_buff *skb = cardp->rx_skb;
361 struct fwsyncheader *syncfwheader;
362 struct bootcmdresp bcmdresp;
363
364 if (urb->status) {
365 kfree_skb(skb);
366 return;
367 }
368
369 if (cardp->fwdnldover) {
370 __le32 *tmp = (__le32 *)(skb->data);
371
372 if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) &&
373 tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY))
374 /* Firmware ready event received */
375 wake_up(&cardp->fw_wq);
376 else
377 if_usb_submit_rx_urb_fwload(cardp);
378 kfree_skb(skb);
379 return;
380 }
381 if (cardp->bootcmdresp <= 0) {
382 memcpy(&bcmdresp, skb->data, sizeof(bcmdresp));
383
384 if (le16_to_cpu(cardp->udev->descriptor.bcdDevice) < 0x3106) {
385 kfree_skb(skb);
386 if_usb_submit_rx_urb_fwload(cardp);
387 cardp->bootcmdresp = 1;
388 /* Received valid boot command response */
389 return;
390 }
391 if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) {
392 if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) ||
393 bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) ||
394 bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION))
395 cardp->bootcmdresp = -1;
396 } else if (bcmdresp.cmd == BOOT_CMD_FW_BY_USB &&
397 bcmdresp.result == BOOT_CMD_RESP_OK)
398 cardp->bootcmdresp = 1;
399
400 kfree_skb(skb);
401 if_usb_submit_rx_urb_fwload(cardp);
402 return;
403 }
404
405 syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
406 if (!syncfwheader) {
407 kfree_skb(skb);
408 return;
409 }
410
411 memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
412
413 if (!syncfwheader->cmd)
414 cardp->CRC_OK = 1;
415 else
416 cardp->CRC_OK = 0;
417 kfree_skb(skb);
418
419 /* reschedule timer for 200ms hence */
420 mod_timer(&cardp->fw_timeout, jiffies + (HZ/5));
421
422 if (cardp->fwfinalblk) {
423 cardp->fwdnldover = 1;
424 goto exit;
425 }
426
427 if_usb_send_fw_pkt(cardp);
428
429 exit:
430 if_usb_submit_rx_urb_fwload(cardp);
431
432 kfree(syncfwheader);
433
434 return;
435}
436
437#define MRVDRV_MIN_PKT_LEN 30
438
439static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
440 struct if_usb_card *cardp,
441 struct lbtf_private *priv)
442{
443 if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN
444 || recvlength < MRVDRV_MIN_PKT_LEN) {
445 kfree_skb(skb);
446 return;
447 }
448
449 skb_put(skb, recvlength);
450 skb_pull(skb, MESSAGE_HEADER_LEN);
451 lbtf_rx(priv, skb);
452}
453
454static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
455 struct sk_buff *skb,
456 struct if_usb_card *cardp,
457 struct lbtf_private *priv)
458{
459 if (recvlength > LBS_CMD_BUFFER_SIZE) {
460 kfree_skb(skb);
461 return;
462 }
463
464 if (!in_interrupt())
465 BUG();
466
467 spin_lock(&priv->driver_lock);
468 memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
469 recvlength - MESSAGE_HEADER_LEN);
470 kfree_skb(skb);
471 lbtf_cmd_response_rx(priv);
472 spin_unlock(&priv->driver_lock);
473}
474
475/**
476 * if_usb_receive - read data received from the device.
477 *
478 * @urb pointer to struct urb
479 */
480static void if_usb_receive(struct urb *urb)
481{
482 struct if_usb_card *cardp = urb->context;
483 struct sk_buff *skb = cardp->rx_skb;
484 struct lbtf_private *priv = cardp->priv;
485 int recvlength = urb->actual_length;
486 uint8_t *recvbuff = NULL;
487 uint32_t recvtype = 0;
488 __le32 *pkt = (__le32 *) skb->data;
489
490 if (recvlength) {
491 if (urb->status) {
492 kfree_skb(skb);
493 goto setup_for_next;
494 }
495
496 recvbuff = skb->data;
497 recvtype = le32_to_cpu(pkt[0]);
498 } else if (urb->status) {
499 kfree_skb(skb);
500 return;
501 }
502
503 switch (recvtype) {
504 case CMD_TYPE_DATA:
505 process_cmdtypedata(recvlength, skb, cardp, priv);
506 break;
507
508 case CMD_TYPE_REQUEST:
509 process_cmdrequest(recvlength, recvbuff, skb, cardp, priv);
510 break;
511
512 case CMD_TYPE_INDICATION:
513 {
514 /* Event cause handling */
515 u32 event_cause = le32_to_cpu(pkt[1]);
516
517 /* Icky undocumented magic special case */
518 if (event_cause & 0xffff0000) {
519 u16 tmp;
520 u8 retrycnt;
521 u8 failure;
522
523 tmp = event_cause >> 16;
524 retrycnt = tmp & 0x00ff;
525 failure = (tmp & 0xff00) >> 8;
526 lbtf_send_tx_feedback(priv, retrycnt, failure);
527 } else if (event_cause == LBTF_EVENT_BCN_SENT)
528 lbtf_bcn_sent(priv);
529 else
530 printk(KERN_DEBUG
531 "Unsupported notification %d received\n",
532 event_cause);
533 kfree_skb(skb);
534 break;
535 }
536 default:
537 printk(KERN_DEBUG "libertastf: unknown command type 0x%X\n",
538 recvtype);
539 kfree_skb(skb);
540 break;
541 }
542
543setup_for_next:
544 if_usb_submit_rx_urb(cardp);
545}
546
547/**
548 * if_usb_host_to_card - Download data to the device
549 *
550 * @priv pointer to struct lbtf_private structure
551 * @type type of data
552 * @buf pointer to data buffer
553 * @len number of bytes
554 *
555 * Returns: 0 on success, nonzero otherwise
556 */
557static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
558 uint8_t *payload, uint16_t nb)
559{
560 struct if_usb_card *cardp = priv->card;
561 u8 data = 0;
562
563 if (type == MVMS_CMD) {
564 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
565 } else {
566 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_DATA);
567 data = 1;
568 }
569
570 memcpy((cardp->ep_out_buf + MESSAGE_HEADER_LEN), payload, nb);
571
572 return usb_tx_block(cardp, cardp->ep_out_buf, nb + MESSAGE_HEADER_LEN,
573 data);
574}
575
576/**
577 * if_usb_issue_boot_command - Issue boot command to Boot2.
578 *
579 * @ivalue 1 boots from FW by USB-Download, 2 boots from FW in EEPROM.
580 *
581 * Returns: 0
582 */
583static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue)
584{
585 struct bootcmd *bootcmd = cardp->ep_out_buf;
586
587 /* Prepare command */
588 bootcmd->magic = cpu_to_le32(BOOT_CMD_MAGIC_NUMBER);
589 bootcmd->cmd = ivalue;
590 memset(bootcmd->pad, 0, sizeof(bootcmd->pad));
591
592 /* Issue command */
593 usb_tx_block(cardp, cardp->ep_out_buf, sizeof(*bootcmd), 0);
594
595 return 0;
596}
597
598
599/**
600 * check_fwfile_format - Check the validity of Boot2/FW image.
601 *
602 * @data pointer to image
603 * @totlen image length
604 *
605 * Returns: 0 if the image is valid, nonzero otherwise.
606 */
607static int check_fwfile_format(const u8 *data, u32 totlen)
608{
609 u32 bincmd, exit;
610 u32 blksize, offset, len;
611 int ret;
612
613 ret = 1;
614 exit = len = 0;
615
616 do {
617 struct fwheader *fwh = (void *) data;
618
619 bincmd = le32_to_cpu(fwh->dnldcmd);
620 blksize = le32_to_cpu(fwh->datalength);
621 switch (bincmd) {
622 case FW_HAS_DATA_TO_RECV:
623 offset = sizeof(struct fwheader) + blksize;
624 data += offset;
625 len += offset;
626 if (len >= totlen)
627 exit = 1;
628 break;
629 case FW_HAS_LAST_BLOCK:
630 exit = 1;
631 ret = 0;
632 break;
633 default:
634 exit = 1;
635 break;
636 }
637 } while (!exit);
638
639 if (ret)
640 printk(KERN_INFO
641 "libertastf: firmware file format check failed\n");
642 return ret;
643}
644
645
646static int if_usb_prog_firmware(struct if_usb_card *cardp)
647{
648 int i = 0;
649 static int reset_count = 10;
650 int ret = 0;
651
652 ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
653 if (ret < 0) {
654 printk(KERN_INFO "libertastf: firmware %s not found\n",
655 lbtf_fw_name);
656 goto done;
657 }
658
659 if (check_fwfile_format(cardp->fw->data, cardp->fw->size))
660 goto release_fw;
661
662restart:
663 if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
664 ret = -1;
665 goto release_fw;
666 }
667
668 cardp->bootcmdresp = 0;
669 do {
670 int j = 0;
671 i++;
672 /* Issue Boot command = 1, Boot from Download-FW */
673 if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB);
674 /* wait for command response */
675 do {
676 j++;
677 msleep_interruptible(100);
678 } while (cardp->bootcmdresp == 0 && j < 10);
679 } while (cardp->bootcmdresp == 0 && i < 5);
680
681 if (cardp->bootcmdresp <= 0) {
682 if (--reset_count >= 0) {
683 if_usb_reset_device(cardp);
684 goto restart;
685 }
686 return -1;
687 }
688
689 i = 0;
690
691 cardp->totalbytes = 0;
692 cardp->fwlastblksent = 0;
693 cardp->CRC_OK = 1;
694 cardp->fwdnldover = 0;
695 cardp->fwseqnum = -1;
696 cardp->totalbytes = 0;
697 cardp->fwfinalblk = 0;
698
699 /* Send the first firmware packet... */
700 if_usb_send_fw_pkt(cardp);
701
702 /* ... and wait for the process to complete */
703 wait_event_interruptible(cardp->fw_wq, cardp->priv->surpriseremoved ||
704 cardp->fwdnldover);
705
706 del_timer_sync(&cardp->fw_timeout);
707 usb_kill_urb(cardp->rx_urb);
708
709 if (!cardp->fwdnldover) {
710 printk(KERN_INFO "libertastf: failed to load fw,"
711 " resetting device!\n");
712 if (--reset_count >= 0) {
713 if_usb_reset_device(cardp);
714 goto restart;
715 }
716
717 printk(KERN_INFO "libertastf: fw download failure\n");
718 ret = -1;
719 goto release_fw;
720 }
721
722 cardp->priv->fw_ready = 1;
723
724 release_fw:
725 release_firmware(cardp->fw);
726 cardp->fw = NULL;
727
728 if_usb_setup_firmware(cardp->priv);
729
730 done:
731 return ret;
732}
733EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
734
735
736#define if_usb_suspend NULL
737#define if_usb_resume NULL
738
739static struct usb_driver if_usb_driver = {
740 .name = DRV_NAME,
741 .probe = if_usb_probe,
742 .disconnect = if_usb_disconnect,
743 .id_table = if_usb_table,
744 .suspend = if_usb_suspend,
745 .resume = if_usb_resume,
746};
747
748static int __init if_usb_init_module(void)
749{
750 int ret = 0;
751
752 ret = usb_register(&if_usb_driver);
753 return ret;
754}
755
756static void __exit if_usb_exit_module(void)
757{
758 usb_deregister(&if_usb_driver);
759}
760
761module_init(if_usb_init_module);
762module_exit(if_usb_exit_module);
763
764MODULE_DESCRIPTION("8388 USB WLAN Thinfirm Driver");
765MODULE_AUTHOR("Cozybit Inc.");
766MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/libertas_tf/if_usb.h b/drivers/net/wireless/libertas_tf/if_usb.h
new file mode 100644
index 000000000000..6fa5b3f59efe
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/if_usb.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2003-2006, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10#include <linux/wait.h>
11#include <linux/timer.h>
12
13struct lbtf_private;
14
15/**
16 * This file contains definition for USB interface.
17 */
18#define CMD_TYPE_REQUEST 0xF00DFACE
19#define CMD_TYPE_DATA 0xBEADC0DE
20#define CMD_TYPE_INDICATION 0xBEEFFACE
21
22#define BOOT_CMD_FW_BY_USB 0x01
23#define BOOT_CMD_FW_IN_EEPROM 0x02
24#define BOOT_CMD_UPDATE_BOOT2 0x03
25#define BOOT_CMD_UPDATE_FW 0x04
26#define BOOT_CMD_MAGIC_NUMBER 0x4C56524D /* LVRM */
27
28struct bootcmd {
29 __le32 magic;
30 uint8_t cmd;
31 uint8_t pad[11];
32};
33
34#define BOOT_CMD_RESP_OK 0x0001
35#define BOOT_CMD_RESP_FAIL 0x0000
36
37struct bootcmdresp {
38 __le32 magic;
39 uint8_t cmd;
40 uint8_t result;
41 uint8_t pad[2];
42};
43
44/** USB card description structure*/
45struct if_usb_card {
46 struct usb_device *udev;
47 struct urb *rx_urb, *tx_urb, *cmd_urb;
48 struct lbtf_private *priv;
49
50 struct sk_buff *rx_skb;
51
52 uint8_t ep_in;
53 uint8_t ep_out;
54
55 int8_t bootcmdresp;
56
57 int ep_in_size;
58
59 void *ep_out_buf;
60 int ep_out_size;
61
62 const struct firmware *fw;
63 struct timer_list fw_timeout;
64 wait_queue_head_t fw_wq;
65 uint32_t fwseqnum;
66 uint32_t totalbytes;
67 uint32_t fwlastblksent;
68 uint8_t CRC_OK;
69 uint8_t fwdnldover;
70 uint8_t fwfinalblk;
71
72 __le16 boot2_version;
73};
74
75/** fwheader */
76struct fwheader {
77 __le32 dnldcmd;
78 __le32 baseaddr;
79 __le32 datalength;
80 __le32 CRC;
81};
82
83#define FW_MAX_DATA_BLK_SIZE 600
84/** FWData */
85struct fwdata {
86 struct fwheader hdr;
87 __le32 seqnum;
88 uint8_t data[0];
89};
90
91/** fwsyncheader */
92struct fwsyncheader {
93 __le32 cmd;
94 __le32 seqnum;
95};
96
97#define FW_HAS_DATA_TO_RECV 0x00000001
98#define FW_HAS_LAST_BLOCK 0x00000004
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
new file mode 100644
index 000000000000..8995cd7c29bf
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -0,0 +1,514 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2007, Red Hat, Inc.
4 * Copyright (C) 2003-2006, Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11#include <linux/spinlock.h>
12#include <linux/device.h>
13#include <linux/kthread.h>
14#include <net/mac80211.h>
15
16#ifndef DRV_NAME
17#define DRV_NAME "libertas_tf"
18#endif
19
20#define MRVL_DEFAULT_RETRIES 9
21#define MRVL_PER_PACKET_RATE 0x10
22#define MRVL_MAX_BCN_SIZE 440
23#define CMD_OPTION_WAITFORRSP 0x0002
24
25/* Return command are almost always the same as the host command, but with
26 * bit 15 set high. There are a few exceptions, though...
27 */
28#define CMD_RET(cmd) (0x8000 | cmd)
29
30/* Command codes */
31#define CMD_GET_HW_SPEC 0x0003
32#define CMD_802_11_RESET 0x0005
33#define CMD_MAC_MULTICAST_ADR 0x0010
34#define CMD_802_11_RADIO_CONTROL 0x001c
35#define CMD_802_11_RF_CHANNEL 0x001d
36#define CMD_802_11_RF_TX_POWER 0x001e
37#define CMD_MAC_CONTROL 0x0028
38#define CMD_802_11_MAC_ADDRESS 0x004d
39#define CMD_SET_BOOT2_VER 0x00a5
40#define CMD_802_11_BEACON_CTRL 0x00b0
41#define CMD_802_11_BEACON_SET 0x00cb
42#define CMD_802_11_SET_MODE 0x00cc
43#define CMD_802_11_SET_BSSID 0x00cd
44
45#define CMD_ACT_GET 0x0000
46#define CMD_ACT_SET 0x0001
47
48/* Define action or option for CMD_802_11_RESET */
49#define CMD_ACT_HALT 0x0003
50
51/* Define action or option for CMD_MAC_CONTROL */
52#define CMD_ACT_MAC_RX_ON 0x0001
53#define CMD_ACT_MAC_TX_ON 0x0002
54#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020
55#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040
56#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080
57#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
58
59/* Define action or option for CMD_802_11_RADIO_CONTROL */
60#define CMD_TYPE_AUTO_PREAMBLE 0x0001
61#define CMD_TYPE_SHORT_PREAMBLE 0x0002
62#define CMD_TYPE_LONG_PREAMBLE 0x0003
63
64#define TURN_ON_RF 0x01
65#define RADIO_ON 0x01
66#define RADIO_OFF 0x00
67
68#define SET_AUTO_PREAMBLE 0x05
69#define SET_SHORT_PREAMBLE 0x03
70#define SET_LONG_PREAMBLE 0x01
71
72/* Define action or option for CMD_802_11_RF_CHANNEL */
73#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00
74#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01
75
76/* Codes for CMD_802_11_SET_MODE */
77enum lbtf_mode {
78 LBTF_PASSIVE_MODE,
79 LBTF_STA_MODE,
80 LBTF_AP_MODE,
81};
82
83/** Card Event definition */
84#define MACREG_INT_CODE_FIRMWARE_READY 48
85/** Buffer Constants */
86
87/* The size of SQ memory PPA, DPA are 8 DWORDs, that keep the physical
88* addresses of TxPD buffers. Station has only 8 TxPD available, Whereas
89* driver has more local TxPDs. Each TxPD on the host memory is associated
90* with a Tx control node. The driver maintains 8 RxPD descriptors for
91* station firmware to store Rx packet information.
92*
93* Current version of MAC has a 32x6 multicast address buffer.
94*
95* 802.11b can have up to 14 channels, the driver keeps the
96* BSSID(MAC address) of each APs or Ad hoc stations it has sensed.
97*/
98
99#define MRVDRV_MAX_MULTICAST_LIST_SIZE 32
100#define LBS_NUM_CMD_BUFFERS 10
101#define LBS_CMD_BUFFER_SIZE (2 * 1024)
102#define MRVDRV_MAX_CHANNEL_SIZE 14
103#define MRVDRV_SNAP_HEADER_LEN 8
104
105#define LBS_UPLD_SIZE 2312
106#define DEV_NAME_LEN 32
107
108/** Misc constants */
109/* This section defines 802.11 specific contants */
110
111#define MRVDRV_MAX_REGION_CODE 6
112/**
113 * the table to keep region code
114 */
115#define LBTF_REGDOMAIN_US 0x10
116#define LBTF_REGDOMAIN_CA 0x20
117#define LBTF_REGDOMAIN_EU 0x30
118#define LBTF_REGDOMAIN_SP 0x31
119#define LBTF_REGDOMAIN_FR 0x32
120#define LBTF_REGDOMAIN_JP 0x40
121
122#define SBI_EVENT_CAUSE_SHIFT 3
123
124/** RxPD status */
125
126#define MRVDRV_RXPD_STATUS_OK 0x0001
127
128
129/* This is for firmware specific length */
130#define EXTRA_LEN 36
131
132#define MRVDRV_ETH_TX_PACKET_BUFFER_SIZE \
133 (ETH_FRAME_LEN + sizeof(struct txpd) + EXTRA_LEN)
134
135#define MRVDRV_ETH_RX_PACKET_BUFFER_SIZE \
136 (ETH_FRAME_LEN + sizeof(struct rxpd) \
137 + MRVDRV_SNAP_HEADER_LEN + EXTRA_LEN)
138
139#define CMD_F_HOSTCMD (1 << 0)
140#define FW_CAPINFO_WPA (1 << 0)
141
142#define RF_ANTENNA_1 0x1
143#define RF_ANTENNA_2 0x2
144#define RF_ANTENNA_AUTO 0xFFFF
145
146#define LBTF_EVENT_BCN_SENT 55
147
148/** Global Variable Declaration */
149/** mv_ms_type */
150enum mv_ms_type {
151 MVMS_DAT = 0,
152 MVMS_CMD = 1,
153 MVMS_TXDONE = 2,
154 MVMS_EVENT
155};
156
157extern struct workqueue_struct *lbtf_wq;
158
159struct lbtf_private;
160
161struct lbtf_offset_value {
162 u32 offset;
163 u32 value;
164};
165
166struct channel_range {
167 u8 regdomain;
168 u8 start;
169 u8 end; /* exclusive (channel must be less than end) */
170};
171
172struct if_usb_card;
173
174/** Private structure for the MV device */
175struct lbtf_private {
176 void *card;
177 struct ieee80211_hw *hw;
178
179 /* Command response buffer */
180 u8 cmd_resp_buff[LBS_UPLD_SIZE];
181 /* Download sent:
182 bit0 1/0=data_sent/data_tx_done,
183 bit1 1/0=cmd_sent/cmd_tx_done,
184 all other bits reserved 0 */
185 struct ieee80211_vif *vif;
186
187 struct work_struct cmd_work;
188 struct work_struct tx_work;
189 /** Hardware access */
190 int (*hw_host_to_card) (struct lbtf_private *priv, u8 type, u8 *payload, u16 nb);
191 int (*hw_prog_firmware) (struct if_usb_card *cardp);
192 int (*hw_reset_device) (struct if_usb_card *cardp);
193
194
195 /** Wlan adapter data structure*/
196 /** STATUS variables */
197 u32 fwrelease;
198 u32 fwcapinfo;
199 /* protected with big lock */
200
201 struct mutex lock;
202
203 /** command-related variables */
204 u16 seqnum;
205 /* protected by big lock */
206
207 struct cmd_ctrl_node *cmd_array;
208 /** Current command */
209 struct cmd_ctrl_node *cur_cmd;
210 /** command Queues */
211 /** Free command buffers */
212 struct list_head cmdfreeq;
213 /** Pending command buffers */
214 struct list_head cmdpendingq;
215
216 /** spin locks */
217 spinlock_t driver_lock;
218
219 /** Timers */
220 struct timer_list command_timer;
221 int nr_retries;
222 int cmd_timed_out;
223
224 u8 cmd_response_rxed;
225
226 /** capability Info used in Association, start, join */
227 u16 capability;
228
229 /** MAC address information */
230 u8 current_addr[ETH_ALEN];
231 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
232 u32 nr_of_multicastmacaddr;
233 int cur_freq;
234
235 struct sk_buff *skb_to_tx;
236 struct sk_buff *tx_skb;
237
238 /** NIC Operation characteristics */
239 u16 mac_control;
240 u16 regioncode;
241 struct channel_range range;
242
243 u8 radioon;
244 u32 preamble;
245
246 struct ieee80211_channel channels[14];
247 struct ieee80211_rate rates[12];
248 struct ieee80211_supported_band band;
249 struct lbtf_offset_value offsetvalue;
250
251 u8 fw_ready;
252 u8 surpriseremoved;
253 struct sk_buff_head bc_ps_buf;
254};
255
256/* 802.11-related definitions */
257
258/* TxPD descriptor */
259struct txpd {
260 /* Current Tx packet status */
261 __le32 tx_status;
262 /* Tx control */
263 __le32 tx_control;
264 __le32 tx_packet_location;
265 /* Tx packet length */
266 __le16 tx_packet_length;
267 /* First 2 byte of destination MAC address */
268 u8 tx_dest_addr_high[2];
269 /* Last 4 byte of destination MAC address */
270 u8 tx_dest_addr_low[4];
271 /* Pkt Priority */
272 u8 priority;
273 /* Pkt Trasnit Power control */
274 u8 powermgmt;
275 /* Time the packet has been queued in the driver (units = 2ms) */
276 u8 pktdelay_2ms;
277 /* reserved */
278 u8 reserved1;
279};
280
281/* RxPD Descriptor */
282struct rxpd {
283 /* Current Rx packet status */
284 __le16 status;
285
286 /* SNR */
287 u8 snr;
288
289 /* Tx control */
290 u8 rx_control;
291
292 /* Pkt length */
293 __le16 pkt_len;
294
295 /* Noise Floor */
296 u8 nf;
297
298 /* Rx Packet Rate */
299 u8 rx_rate;
300
301 /* Pkt addr */
302 __le32 pkt_ptr;
303
304 /* Next Rx RxPD addr */
305 __le32 next_rxpd_ptr;
306
307 /* Pkt Priority */
308 u8 priority;
309 u8 reserved[3];
310};
311
312struct cmd_header {
313 __le16 command;
314 __le16 size;
315 __le16 seqnum;
316 __le16 result;
317} __attribute__ ((packed));
318
319struct cmd_ctrl_node {
320 struct list_head list;
321 int result;
322 /* command response */
323 int (*callback)(struct lbtf_private *,
324 unsigned long, struct cmd_header *);
325 unsigned long callback_arg;
326 /* command data */
327 struct cmd_header *cmdbuf;
328 /* wait queue */
329 u16 cmdwaitqwoken;
330 wait_queue_head_t cmdwait_q;
331};
332
333/*
334 * Define data structure for CMD_GET_HW_SPEC
335 * This structure defines the response for the GET_HW_SPEC command
336 */
337struct cmd_ds_get_hw_spec {
338 struct cmd_header hdr;
339
340 /* HW Interface version number */
341 __le16 hwifversion;
342 /* HW version number */
343 __le16 version;
344 /* Max number of TxPD FW can handle */
345 __le16 nr_txpd;
346 /* Max no of Multicast address */
347 __le16 nr_mcast_adr;
348 /* MAC address */
349 u8 permanentaddr[6];
350
351 /* region Code */
352 __le16 regioncode;
353
354 /* Number of antenna used */
355 __le16 nr_antenna;
356
357 /* FW release number, example 0x01030304 = 2.3.4p1 */
358 __le32 fwrelease;
359
360 /* Base Address of TxPD queue */
361 __le32 wcb_base;
362 /* Read Pointer of RxPd queue */
363 __le32 rxpd_rdptr;
364
365 /* Write Pointer of RxPd queue */
366 __le32 rxpd_wrptr;
367
368 /*FW/HW capability */
369 __le32 fwcapinfo;
370} __attribute__ ((packed));
371
372struct cmd_ds_mac_control {
373 struct cmd_header hdr;
374 __le16 action;
375 u16 reserved;
376};
377
378struct cmd_ds_802_11_mac_address {
379 struct cmd_header hdr;
380
381 __le16 action;
382 uint8_t macadd[ETH_ALEN];
383};
384
385struct cmd_ds_mac_multicast_addr {
386 struct cmd_header hdr;
387
388 __le16 action;
389 __le16 nr_of_adrs;
390 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
391};
392
393struct cmd_ds_set_mode {
394 struct cmd_header hdr;
395
396 __le16 mode;
397};
398
399struct cmd_ds_set_bssid {
400 struct cmd_header hdr;
401
402 u8 bssid[6];
403 u8 activate;
404};
405
406struct cmd_ds_802_11_radio_control {
407 struct cmd_header hdr;
408
409 __le16 action;
410 __le16 control;
411};
412
413
414struct cmd_ds_802_11_rf_channel {
415 struct cmd_header hdr;
416
417 __le16 action;
418 __le16 channel;
419 __le16 rftype; /* unused */
420 __le16 reserved; /* unused */
421 u8 channellist[32]; /* unused */
422};
423
424struct cmd_ds_set_boot2_ver {
425 struct cmd_header hdr;
426
427 __le16 action;
428 __le16 version;
429};
430
431struct cmd_ds_802_11_reset {
432 struct cmd_header hdr;
433
434 __le16 action;
435};
436
437struct cmd_ds_802_11_beacon_control {
438 struct cmd_header hdr;
439
440 __le16 action;
441 __le16 beacon_enable;
442 __le16 beacon_period;
443};
444
445struct cmd_ds_802_11_beacon_set {
446 struct cmd_header hdr;
447
448 __le16 len;
449 u8 beacon[MRVL_MAX_BCN_SIZE];
450};
451
452struct lbtf_private;
453struct cmd_ctrl_node;
454
455/** Function Prototype Declaration */
456void lbtf_set_mac_control(struct lbtf_private *priv);
457
458int lbtf_free_cmd_buffer(struct lbtf_private *priv);
459
460int lbtf_allocate_cmd_buffer(struct lbtf_private *priv);
461int lbtf_execute_next_command(struct lbtf_private *priv);
462int lbtf_set_radio_control(struct lbtf_private *priv);
463int lbtf_update_hw_spec(struct lbtf_private *priv);
464int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv);
465void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode);
466void lbtf_set_bssid(struct lbtf_private *priv, bool activate, u8 *bssid);
467int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr);
468
469int lbtf_set_channel(struct lbtf_private *priv, u8 channel);
470
471int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon);
472int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
473 int beacon_int);
474
475
476int lbtf_process_rx_command(struct lbtf_private *priv);
477void lbtf_complete_command(struct lbtf_private *priv, struct cmd_ctrl_node *cmd,
478 int result);
479void lbtf_cmd_response_rx(struct lbtf_private *priv);
480
481/* main.c */
482struct chan_freq_power *lbtf_get_region_cfp_table(u8 region,
483 int *cfp_no);
484struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev);
485int lbtf_remove_card(struct lbtf_private *priv);
486int lbtf_start_card(struct lbtf_private *priv);
487int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb);
488void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail);
489void lbtf_bcn_sent(struct lbtf_private *priv);
490
491/* support functions for cmd.c */
492/* lbtf_cmd() infers the size of the buffer to copy data back into, from
493 the size of the target of the pointer. Since the command to be sent
494 may often be smaller, that size is set in cmd->size by the caller.*/
495#define lbtf_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \
496 uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \
497 (cmd)->hdr.size = cpu_to_le16(sizeof(*(cmd))); \
498 __lbtf_cmd(priv, cmdnr, &(cmd)->hdr, __sz, cb, cb_arg); \
499})
500
501#define lbtf_cmd_with_response(priv, cmdnr, cmd) \
502 lbtf_cmd(priv, cmdnr, cmd, lbtf_cmd_copyback, (unsigned long) (cmd))
503
504void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
505 struct cmd_header *in_cmd, int in_cmd_size);
506
507int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
508 struct cmd_header *in_cmd, int in_cmd_size,
509 int (*callback)(struct lbtf_private *, unsigned long,
510 struct cmd_header *),
511 unsigned long callback_arg);
512
513int lbtf_cmd_copyback(struct lbtf_private *priv, unsigned long extra,
514 struct cmd_header *resp);
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
new file mode 100644
index 000000000000..feff945ad856
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -0,0 +1,662 @@
1/*
2 * Copyright (C) 2008, cozybit Inc.
3 * Copyright (C) 2003-2006, Marvell International Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version.
9 */
10#include "libertas_tf.h"
11#include "linux/etherdevice.h"
12
13#define DRIVER_RELEASE_VERSION "004.p0"
14/* thinfirm version: 5.132.X.pX */
15#define LBTF_FW_VER_MIN 0x05840300
16#define LBTF_FW_VER_MAX 0x0584ffff
17#define QOS_CONTROL_LEN 2
18
19static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION;
20struct workqueue_struct *lbtf_wq;
21
22static const struct ieee80211_channel lbtf_channels[] = {
23 { .center_freq = 2412, .hw_value = 1 },
24 { .center_freq = 2417, .hw_value = 2 },
25 { .center_freq = 2422, .hw_value = 3 },
26 { .center_freq = 2427, .hw_value = 4 },
27 { .center_freq = 2432, .hw_value = 5 },
28 { .center_freq = 2437, .hw_value = 6 },
29 { .center_freq = 2442, .hw_value = 7 },
30 { .center_freq = 2447, .hw_value = 8 },
31 { .center_freq = 2452, .hw_value = 9 },
32 { .center_freq = 2457, .hw_value = 10 },
33 { .center_freq = 2462, .hw_value = 11 },
34 { .center_freq = 2467, .hw_value = 12 },
35 { .center_freq = 2472, .hw_value = 13 },
36 { .center_freq = 2484, .hw_value = 14 },
37};
38
39/* This table contains the hardware specific values for the modulation rates. */
40static const struct ieee80211_rate lbtf_rates[] = {
41 { .bitrate = 10,
42 .hw_value = 0, },
43 { .bitrate = 20,
44 .hw_value = 1,
45 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
46 { .bitrate = 55,
47 .hw_value = 2,
48 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
49 { .bitrate = 110,
50 .hw_value = 3,
51 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
52 { .bitrate = 60,
53 .hw_value = 5,
54 .flags = 0 },
55 { .bitrate = 90,
56 .hw_value = 6,
57 .flags = 0 },
58 { .bitrate = 120,
59 .hw_value = 7,
60 .flags = 0 },
61 { .bitrate = 180,
62 .hw_value = 8,
63 .flags = 0 },
64 { .bitrate = 240,
65 .hw_value = 9,
66 .flags = 0 },
67 { .bitrate = 360,
68 .hw_value = 10,
69 .flags = 0 },
70 { .bitrate = 480,
71 .hw_value = 11,
72 .flags = 0 },
73 { .bitrate = 540,
74 .hw_value = 12,
75 .flags = 0 },
76};
77
78static void lbtf_cmd_work(struct work_struct *work)
79{
80 struct lbtf_private *priv = container_of(work, struct lbtf_private,
81 cmd_work);
82 spin_lock_irq(&priv->driver_lock);
83 /* command response? */
84 if (priv->cmd_response_rxed) {
85 priv->cmd_response_rxed = 0;
86 spin_unlock_irq(&priv->driver_lock);
87 lbtf_process_rx_command(priv);
88 spin_lock_irq(&priv->driver_lock);
89 }
90
91 if (priv->cmd_timed_out && priv->cur_cmd) {
92 struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
93
94 if (++priv->nr_retries > 10) {
95 lbtf_complete_command(priv, cmdnode,
96 -ETIMEDOUT);
97 priv->nr_retries = 0;
98 } else {
99 priv->cur_cmd = NULL;
100
101 /* Stick it back at the _top_ of the pending
102 * queue for immediate resubmission */
103 list_add(&cmdnode->list, &priv->cmdpendingq);
104 }
105 }
106 priv->cmd_timed_out = 0;
107 spin_unlock_irq(&priv->driver_lock);
108
109 if (!priv->fw_ready)
110 return;
111 /* Execute the next command */
112 if (!priv->cur_cmd)
113 lbtf_execute_next_command(priv);
114}
115
116/**
117 * lbtf_setup_firmware: initialize firmware.
118 *
119 * @priv A pointer to struct lbtf_private structure
120 *
121 * Returns: 0 on success.
122 */
123static int lbtf_setup_firmware(struct lbtf_private *priv)
124{
125 int ret = -1;
126
127 /*
128 * Read priv address from HW
129 */
130 memset(priv->current_addr, 0xff, ETH_ALEN);
131 ret = lbtf_update_hw_spec(priv);
132 if (ret) {
133 ret = -1;
134 goto done;
135 }
136
137 lbtf_set_mac_control(priv);
138 lbtf_set_radio_control(priv);
139
140 ret = 0;
141done:
142 return ret;
143}
144
145/**
146 * This function handles the timeout of command sending.
147 * It will re-send the same command again.
148 */
149static void command_timer_fn(unsigned long data)
150{
151 struct lbtf_private *priv = (struct lbtf_private *)data;
152 unsigned long flags;
153
154 spin_lock_irqsave(&priv->driver_lock, flags);
155
156 if (!priv->cur_cmd) {
157 printk(KERN_DEBUG "libertastf: command timer expired; "
158 "no pending command\n");
159 goto out;
160 }
161
162 printk(KERN_DEBUG "libertas: command %x timed out\n",
163 le16_to_cpu(priv->cur_cmd->cmdbuf->command));
164
165 priv->cmd_timed_out = 1;
166 queue_work(lbtf_wq, &priv->cmd_work);
167out:
168 spin_unlock_irqrestore(&priv->driver_lock, flags);
169}
170
171static int lbtf_init_adapter(struct lbtf_private *priv)
172{
173 memset(priv->current_addr, 0xff, ETH_ALEN);
174 mutex_init(&priv->lock);
175
176 priv->vif = NULL;
177 setup_timer(&priv->command_timer, command_timer_fn,
178 (unsigned long)priv);
179
180 INIT_LIST_HEAD(&priv->cmdfreeq);
181 INIT_LIST_HEAD(&priv->cmdpendingq);
182
183 spin_lock_init(&priv->driver_lock);
184
185 /* Allocate the command buffers */
186 if (lbtf_allocate_cmd_buffer(priv))
187 return -1;
188
189 return 0;
190}
191
192static void lbtf_free_adapter(struct lbtf_private *priv)
193{
194 lbtf_free_cmd_buffer(priv);
195 del_timer(&priv->command_timer);
196}
197
198static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
199{
200 struct lbtf_private *priv = hw->priv;
201
202 priv->skb_to_tx = skb;
203 queue_work(lbtf_wq, &priv->tx_work);
204 /*
205 * queue will be restarted when we receive transmission feedback if
206 * there are no buffered multicast frames to send
207 */
208 ieee80211_stop_queues(priv->hw);
209 return 0;
210}
211
212static void lbtf_tx_work(struct work_struct *work)
213{
214 struct lbtf_private *priv = container_of(work, struct lbtf_private,
215 tx_work);
216 unsigned int len;
217 struct ieee80211_tx_info *info;
218 struct txpd *txpd;
219 struct sk_buff *skb = NULL;
220 int err;
221
222 if ((priv->vif->type == NL80211_IFTYPE_AP) &&
223 (!skb_queue_empty(&priv->bc_ps_buf)))
224 skb = skb_dequeue(&priv->bc_ps_buf);
225 else if (priv->skb_to_tx) {
226 skb = priv->skb_to_tx;
227 priv->skb_to_tx = NULL;
228 } else
229 return;
230
231 len = skb->len;
232 info = IEEE80211_SKB_CB(skb);
233 txpd = (struct txpd *) skb_push(skb, sizeof(struct txpd));
234
235 if (priv->surpriseremoved) {
236 dev_kfree_skb_any(skb);
237 return;
238 }
239
240 memset(txpd, 0, sizeof(struct txpd));
241 /* Activate per-packet rate selection */
242 txpd->tx_control |= cpu_to_le32(MRVL_PER_PACKET_RATE |
243 ieee80211_get_tx_rate(priv->hw, info)->hw_value);
244
245 /* copy destination address from 802.11 header */
246 memcpy(txpd->tx_dest_addr_high, skb->data + sizeof(struct txpd) + 4,
247 ETH_ALEN);
248 txpd->tx_packet_length = cpu_to_le16(len);
249 txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
250 BUG_ON(priv->tx_skb);
251 spin_lock_irq(&priv->driver_lock);
252 priv->tx_skb = skb;
253 err = priv->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len);
254 spin_unlock_irq(&priv->driver_lock);
255 if (err) {
256 dev_kfree_skb_any(skb);
257 priv->tx_skb = NULL;
258 }
259}
260
261static int lbtf_op_start(struct ieee80211_hw *hw)
262{
263 struct lbtf_private *priv = hw->priv;
264 void *card = priv->card;
265 int ret = -1;
266
267 if (!priv->fw_ready)
268 /* Upload firmware */
269 if (priv->hw_prog_firmware(card))
270 goto err_prog_firmware;
271
272 /* poke the firmware */
273 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
274 priv->radioon = RADIO_ON;
275 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
276 ret = lbtf_setup_firmware(priv);
277 if (ret)
278 goto err_prog_firmware;
279
280 if ((priv->fwrelease < LBTF_FW_VER_MIN) ||
281 (priv->fwrelease > LBTF_FW_VER_MAX)) {
282 ret = -1;
283 goto err_prog_firmware;
284 }
285
286 printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
287 return 0;
288
289err_prog_firmware:
290 priv->hw_reset_device(card);
291 return ret;
292}
293
294static void lbtf_op_stop(struct ieee80211_hw *hw)
295{
296 struct lbtf_private *priv = hw->priv;
297 unsigned long flags;
298 struct sk_buff *skb;
299
300 struct cmd_ctrl_node *cmdnode;
301 /* Flush pending command nodes */
302 spin_lock_irqsave(&priv->driver_lock, flags);
303 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
304 cmdnode->result = -ENOENT;
305 cmdnode->cmdwaitqwoken = 1;
306 wake_up_interruptible(&cmdnode->cmdwait_q);
307 }
308
309 spin_unlock_irqrestore(&priv->driver_lock, flags);
310 cancel_work_sync(&priv->cmd_work);
311 cancel_work_sync(&priv->tx_work);
312 while ((skb = skb_dequeue(&priv->bc_ps_buf)))
313 dev_kfree_skb_any(skb);
314 priv->radioon = RADIO_OFF;
315 lbtf_set_radio_control(priv);
316
317 return;
318}
319
320static int lbtf_op_add_interface(struct ieee80211_hw *hw,
321 struct ieee80211_if_init_conf *conf)
322{
323 struct lbtf_private *priv = hw->priv;
324 if (priv->vif != NULL)
325 return -EOPNOTSUPP;
326
327 priv->vif = conf->vif;
328 switch (conf->type) {
329 case NL80211_IFTYPE_MESH_POINT:
330 case NL80211_IFTYPE_AP:
331 lbtf_set_mode(priv, LBTF_AP_MODE);
332 break;
333 case NL80211_IFTYPE_STATION:
334 lbtf_set_mode(priv, LBTF_STA_MODE);
335 break;
336 default:
337 priv->vif = NULL;
338 return -EOPNOTSUPP;
339 }
340 lbtf_set_mac_address(priv, (u8 *) conf->mac_addr);
341 return 0;
342}
343
344static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
345 struct ieee80211_if_init_conf *conf)
346{
347 struct lbtf_private *priv = hw->priv;
348
349 if (priv->vif->type == NL80211_IFTYPE_AP ||
350 priv->vif->type == NL80211_IFTYPE_MESH_POINT)
351 lbtf_beacon_ctrl(priv, 0, 0);
352 lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
353 lbtf_set_bssid(priv, 0, NULL);
354 priv->vif = NULL;
355}
356
357static int lbtf_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
358{
359 struct lbtf_private *priv = hw->priv;
360 if (conf->channel->center_freq != priv->cur_freq) {
361 priv->cur_freq = conf->channel->center_freq;
362 lbtf_set_channel(priv, conf->channel->hw_value);
363 }
364 return 0;
365}
366
367static int lbtf_op_config_interface(struct ieee80211_hw *hw,
368 struct ieee80211_vif *vif,
369 struct ieee80211_if_conf *conf)
370{
371 struct lbtf_private *priv = hw->priv;
372 struct sk_buff *beacon;
373
374 switch (priv->vif->type) {
375 case NL80211_IFTYPE_AP:
376 case NL80211_IFTYPE_MESH_POINT:
377 beacon = ieee80211_beacon_get(hw, vif);
378 if (beacon) {
379 lbtf_beacon_set(priv, beacon);
380 kfree_skb(beacon);
381 lbtf_beacon_ctrl(priv, 1, hw->conf.beacon_int);
382 }
383 break;
384 default:
385 break;
386 }
387
388 if (conf->bssid) {
389 u8 null_bssid[ETH_ALEN] = {0};
390 bool activate = compare_ether_addr(conf->bssid, null_bssid);
391 lbtf_set_bssid(priv, activate, conf->bssid);
392 }
393
394 return 0;
395}
396
397#define SUPPORTED_FIF_FLAGS (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
398static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
399 unsigned int changed_flags,
400 unsigned int *new_flags,
401 int mc_count, struct dev_mc_list *mclist)
402{
403 struct lbtf_private *priv = hw->priv;
404 int old_mac_control = priv->mac_control;
405 int i;
406 changed_flags &= SUPPORTED_FIF_FLAGS;
407 *new_flags &= SUPPORTED_FIF_FLAGS;
408
409 if (!changed_flags)
410 return;
411
412 if (*new_flags & (FIF_PROMISC_IN_BSS))
413 priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
414 else
415 priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
416 if (*new_flags & (FIF_ALLMULTI) ||
417 mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) {
418 priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
419 priv->mac_control &= ~CMD_ACT_MAC_MULTICAST_ENABLE;
420 } else if (mc_count) {
421 priv->mac_control |= CMD_ACT_MAC_MULTICAST_ENABLE;
422 priv->mac_control &= ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
423 priv->nr_of_multicastmacaddr = mc_count;
424 for (i = 0; i < mc_count; i++) {
425 if (!mclist)
426 break;
427 memcpy(&priv->multicastlist[i], mclist->da_addr,
428 ETH_ALEN);
429 mclist = mclist->next;
430 }
431 lbtf_cmd_set_mac_multicast_addr(priv);
432 } else {
433 priv->mac_control &= ~(CMD_ACT_MAC_MULTICAST_ENABLE |
434 CMD_ACT_MAC_ALL_MULTICAST_ENABLE);
435 if (priv->nr_of_multicastmacaddr) {
436 priv->nr_of_multicastmacaddr = 0;
437 lbtf_cmd_set_mac_multicast_addr(priv);
438 }
439 }
440
441
442 if (priv->mac_control != old_mac_control)
443 lbtf_set_mac_control(priv);
444}
445
446static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
447 struct ieee80211_vif *vif,
448 struct ieee80211_bss_conf *bss_conf,
449 u32 changes)
450{
451 struct lbtf_private *priv = hw->priv;
452
453 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
454 if (bss_conf->use_short_preamble)
455 priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
456 else
457 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
458 lbtf_set_radio_control(priv);
459 }
460
461 return;
462}
463
464static const struct ieee80211_ops lbtf_ops = {
465 .tx = lbtf_op_tx,
466 .start = lbtf_op_start,
467 .stop = lbtf_op_stop,
468 .add_interface = lbtf_op_add_interface,
469 .remove_interface = lbtf_op_remove_interface,
470 .config = lbtf_op_config,
471 .config_interface = lbtf_op_config_interface,
472 .configure_filter = lbtf_op_configure_filter,
473 .bss_info_changed = lbtf_op_bss_info_changed,
474};
475
476int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
477{
478 struct ieee80211_rx_status stats;
479 struct rxpd *prxpd;
480 int need_padding;
481 unsigned int flags;
482 struct ieee80211_hdr *hdr;
483
484 prxpd = (struct rxpd *) skb->data;
485
486 stats.flag = 0;
487 if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
488 stats.flag |= RX_FLAG_FAILED_FCS_CRC;
489 stats.freq = priv->cur_freq;
490 stats.band = IEEE80211_BAND_2GHZ;
491 stats.signal = prxpd->snr;
492 stats.noise = prxpd->nf;
493 stats.qual = prxpd->snr - prxpd->nf;
494 /* Marvell rate index has a hole at value 4 */
495 if (prxpd->rx_rate > 4)
496 --prxpd->rx_rate;
497 stats.rate_idx = prxpd->rx_rate;
498 skb_pull(skb, sizeof(struct rxpd));
499
500 hdr = (struct ieee80211_hdr *)skb->data;
501 flags = le32_to_cpu(*(__le32 *)(skb->data + 4));
502
503 need_padding = ieee80211_is_data_qos(hdr->frame_control);
504 need_padding ^= ieee80211_has_a4(hdr->frame_control);
505 need_padding ^= ieee80211_is_data_qos(hdr->frame_control) &&
506 (*ieee80211_get_qos_ctl(hdr) &
507 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT);
508
509 if (need_padding) {
510 memmove(skb->data + 2, skb->data, skb->len);
511 skb_reserve(skb, 2);
512 }
513
514 ieee80211_rx_irqsafe(priv->hw, skb, &stats);
515 return 0;
516}
517EXPORT_SYMBOL_GPL(lbtf_rx);
518
519/**
520 * lbtf_add_card: Add and initialize the card, no fw upload yet.
521 *
522 * @card A pointer to card
523 *
524 * Returns: pointer to struct lbtf_priv.
525 */
526struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
527{
528 struct ieee80211_hw *hw;
529 struct lbtf_private *priv = NULL;
530
531 hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
532 if (!hw)
533 goto done;
534
535 priv = hw->priv;
536 if (lbtf_init_adapter(priv))
537 goto err_init_adapter;
538
539 priv->hw = hw;
540 priv->card = card;
541 priv->tx_skb = NULL;
542
543 hw->queues = 1;
544 hw->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
545 hw->extra_tx_headroom = sizeof(struct txpd);
546 memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels));
547 memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates));
548 priv->band.n_bitrates = ARRAY_SIZE(lbtf_rates);
549 priv->band.bitrates = priv->rates;
550 priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
551 priv->band.channels = priv->channels;
552 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
553 skb_queue_head_init(&priv->bc_ps_buf);
554
555 SET_IEEE80211_DEV(hw, dmdev);
556
557 INIT_WORK(&priv->cmd_work, lbtf_cmd_work);
558 INIT_WORK(&priv->tx_work, lbtf_tx_work);
559 if (ieee80211_register_hw(hw))
560 goto err_init_adapter;
561
562 goto done;
563
564err_init_adapter:
565 lbtf_free_adapter(priv);
566 ieee80211_free_hw(hw);
567 priv = NULL;
568
569done:
570 return priv;
571}
572EXPORT_SYMBOL_GPL(lbtf_add_card);
573
574
575int lbtf_remove_card(struct lbtf_private *priv)
576{
577 struct ieee80211_hw *hw = priv->hw;
578
579 priv->surpriseremoved = 1;
580 del_timer(&priv->command_timer);
581 lbtf_free_adapter(priv);
582 priv->hw = NULL;
583 ieee80211_unregister_hw(hw);
584 ieee80211_free_hw(hw);
585
586 return 0;
587}
588EXPORT_SYMBOL_GPL(lbtf_remove_card);
589
590void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail)
591{
592 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb);
593 memset(&info->status, 0, sizeof(info->status));
594 /*
595 * Commented out, otherwise we never go beyond 1Mbit/s using mac80211
596 * default pid rc algorithm.
597 *
598 * info->status.retry_count = MRVL_DEFAULT_RETRIES - retrycnt;
599 */
600 info->status.excessive_retries = fail ? 1 : 0;
601 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !fail)
602 info->flags |= IEEE80211_TX_STAT_ACK;
603 skb_pull(priv->tx_skb, sizeof(struct txpd));
604 ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb);
605 priv->tx_skb = NULL;
606 if (!priv->skb_to_tx && skb_queue_empty(&priv->bc_ps_buf))
607 ieee80211_wake_queues(priv->hw);
608 else
609 queue_work(lbtf_wq, &priv->tx_work);
610}
611EXPORT_SYMBOL_GPL(lbtf_send_tx_feedback);
612
613void lbtf_bcn_sent(struct lbtf_private *priv)
614{
615 struct sk_buff *skb = NULL;
616
617 if (priv->vif->type != NL80211_IFTYPE_AP)
618 return;
619
620 if (skb_queue_empty(&priv->bc_ps_buf)) {
621 bool tx_buff_bc = 0;
622
623 while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
624 skb_queue_tail(&priv->bc_ps_buf, skb);
625 tx_buff_bc = 1;
626 }
627 if (tx_buff_bc) {
628 ieee80211_stop_queues(priv->hw);
629 queue_work(lbtf_wq, &priv->tx_work);
630 }
631 }
632
633 skb = ieee80211_beacon_get(priv->hw, priv->vif);
634
635 if (skb) {
636 lbtf_beacon_set(priv, skb);
637 kfree_skb(skb);
638 }
639}
640EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
641
642static int __init lbtf_init_module(void)
643{
644 lbtf_wq = create_workqueue("libertastf");
645 if (lbtf_wq == NULL) {
646 printk(KERN_ERR "libertastf: couldn't create workqueue\n");
647 return -ENOMEM;
648 }
649 return 0;
650}
651
652static void __exit lbtf_exit_module(void)
653{
654 destroy_workqueue(lbtf_wq);
655}
656
657module_init(lbtf_init_module);
658module_exit(lbtf_exit_module);
659
660MODULE_DESCRIPTION("Libertas WLAN Thinfirm Driver Library");
661MODULE_AUTHOR("Cozybit Inc.");
662MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 248d31a7aa33..c9e4a435b2fc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -14,6 +14,8 @@
14 * - RX filtering based on filter configuration (data->rx_filter) 14 * - RX filtering based on filter configuration (data->rx_filter)
15 */ 15 */
16 16
17#include <linux/list.h>
18#include <linux/spinlock.h>
17#include <net/mac80211.h> 19#include <net/mac80211.h>
18#include <net/ieee80211_radiotap.h> 20#include <net/ieee80211_radiotap.h>
19#include <linux/if_arp.h> 21#include <linux/if_arp.h>
@@ -28,11 +30,56 @@ static int radios = 2;
28module_param(radios, int, 0444); 30module_param(radios, int, 0444);
29MODULE_PARM_DESC(radios, "Number of simulated radios"); 31MODULE_PARM_DESC(radios, "Number of simulated radios");
30 32
33struct hwsim_vif_priv {
34 u32 magic;
35};
36
37#define HWSIM_VIF_MAGIC 0x69537748
38
39static inline void hwsim_check_magic(struct ieee80211_vif *vif)
40{
41 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
42 WARN_ON(vp->magic != HWSIM_VIF_MAGIC);
43}
44
45static inline void hwsim_set_magic(struct ieee80211_vif *vif)
46{
47 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
48 vp->magic = HWSIM_VIF_MAGIC;
49}
50
51static inline void hwsim_clear_magic(struct ieee80211_vif *vif)
52{
53 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
54 vp->magic = 0;
55}
56
57struct hwsim_sta_priv {
58 u32 magic;
59};
60
61#define HWSIM_STA_MAGIC 0x6d537748
62
63static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta)
64{
65 struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
66 WARN_ON(sp->magic != HWSIM_VIF_MAGIC);
67}
68
69static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta)
70{
71 struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
72 sp->magic = HWSIM_VIF_MAGIC;
73}
74
75static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta)
76{
77 struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
78 sp->magic = 0;
79}
31 80
32static struct class *hwsim_class; 81static struct class *hwsim_class;
33 82
34static struct ieee80211_hw **hwsim_radios;
35static int hwsim_radio_count;
36static struct net_device *hwsim_mon; /* global monitor netdev */ 83static struct net_device *hwsim_mon; /* global monitor netdev */
37 84
38 85
@@ -68,7 +115,12 @@ static const struct ieee80211_rate hwsim_rates[] = {
68 { .bitrate = 540 } 115 { .bitrate = 540 }
69}; 116};
70 117
118static spinlock_t hwsim_radio_lock;
119static struct list_head hwsim_radios;
120
71struct mac80211_hwsim_data { 121struct mac80211_hwsim_data {
122 struct list_head list;
123 struct ieee80211_hw *hw;
72 struct device *dev; 124 struct device *dev;
73 struct ieee80211_supported_band band; 125 struct ieee80211_supported_band band;
74 struct ieee80211_channel channels[ARRAY_SIZE(hwsim_channels)]; 126 struct ieee80211_channel channels[ARRAY_SIZE(hwsim_channels)];
@@ -144,11 +196,11 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
144} 196}
145 197
146 198
147static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, 199static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
148 struct sk_buff *skb) 200 struct sk_buff *skb)
149{ 201{
150 struct mac80211_hwsim_data *data = hw->priv; 202 struct mac80211_hwsim_data *data = hw->priv, *data2;
151 int i, ack = 0; 203 bool ack = false;
152 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 204 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
153 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 205 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
154 struct ieee80211_rx_status rx_status; 206 struct ieee80211_rx_status rx_status;
@@ -161,13 +213,13 @@ static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
161 /* TODO: simulate signal strength (and optional packet drop) */ 213 /* TODO: simulate signal strength (and optional packet drop) */
162 214
163 /* Copy skb to all enabled radios that are on the current frequency */ 215 /* Copy skb to all enabled radios that are on the current frequency */
164 for (i = 0; i < hwsim_radio_count; i++) { 216 spin_lock(&hwsim_radio_lock);
165 struct mac80211_hwsim_data *data2; 217 list_for_each_entry(data2, &hwsim_radios, list) {
166 struct sk_buff *nskb; 218 struct sk_buff *nskb;
167 219
168 if (hwsim_radios[i] == NULL || hwsim_radios[i] == hw) 220 if (data == data2)
169 continue; 221 continue;
170 data2 = hwsim_radios[i]->priv; 222
171 if (!data2->started || !data2->radio_enabled || 223 if (!data2->started || !data2->radio_enabled ||
172 data->channel->center_freq != data2->channel->center_freq) 224 data->channel->center_freq != data2->channel->center_freq)
173 continue; 225 continue;
@@ -176,11 +228,12 @@ static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
176 if (nskb == NULL) 228 if (nskb == NULL)
177 continue; 229 continue;
178 230
179 if (memcmp(hdr->addr1, hwsim_radios[i]->wiphy->perm_addr, 231 if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr,
180 ETH_ALEN) == 0) 232 ETH_ALEN) == 0)
181 ack = 1; 233 ack = true;
182 ieee80211_rx_irqsafe(hwsim_radios[i], nskb, &rx_status); 234 ieee80211_rx_irqsafe(data2->hw, nskb, &rx_status);
183 } 235 }
236 spin_unlock(&hwsim_radio_lock);
184 237
185 return ack; 238 return ack;
186} 239}
@@ -189,7 +242,7 @@ static int mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
189static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 242static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
190{ 243{
191 struct mac80211_hwsim_data *data = hw->priv; 244 struct mac80211_hwsim_data *data = hw->priv;
192 int ack; 245 bool ack;
193 struct ieee80211_tx_info *txi; 246 struct ieee80211_tx_info *txi;
194 247
195 mac80211_hwsim_monitor_rx(hw, skb); 248 mac80211_hwsim_monitor_rx(hw, skb);
@@ -210,6 +263,12 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
210 ack = mac80211_hwsim_tx_frame(hw, skb); 263 ack = mac80211_hwsim_tx_frame(hw, skb);
211 264
212 txi = IEEE80211_SKB_CB(skb); 265 txi = IEEE80211_SKB_CB(skb);
266
267 if (txi->control.vif)
268 hwsim_check_magic(txi->control.vif);
269 if (txi->control.sta)
270 hwsim_check_sta_magic(txi->control.sta);
271
213 memset(&txi->status, 0, sizeof(txi->status)); 272 memset(&txi->status, 0, sizeof(txi->status));
214 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) { 273 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) {
215 if (ack) 274 if (ack)
@@ -246,6 +305,7 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
246 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n", 305 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n",
247 wiphy_name(hw->wiphy), __func__, conf->type, 306 wiphy_name(hw->wiphy), __func__, conf->type,
248 print_mac(mac, conf->mac_addr)); 307 print_mac(mac, conf->mac_addr));
308 hwsim_set_magic(conf->vif);
249 return 0; 309 return 0;
250} 310}
251 311
@@ -257,6 +317,8 @@ static void mac80211_hwsim_remove_interface(
257 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n", 317 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n",
258 wiphy_name(hw->wiphy), __func__, conf->type, 318 wiphy_name(hw->wiphy), __func__, conf->type,
259 print_mac(mac, conf->mac_addr)); 319 print_mac(mac, conf->mac_addr));
320 hwsim_check_magic(conf->vif);
321 hwsim_clear_magic(conf->vif);
260} 322}
261 323
262 324
@@ -267,7 +329,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
267 struct sk_buff *skb; 329 struct sk_buff *skb;
268 struct ieee80211_tx_info *info; 330 struct ieee80211_tx_info *info;
269 331
270 if (vif->type != IEEE80211_IF_TYPE_AP) 332 hwsim_check_magic(vif);
333
334 if (vif->type != NL80211_IFTYPE_AP)
271 return; 335 return;
272 336
273 skb = ieee80211_beacon_get(hw, vif); 337 skb = ieee80211_beacon_get(hw, vif);
@@ -341,7 +405,45 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
341 *total_flags = data->rx_filter; 405 *total_flags = data->rx_filter;
342} 406}
343 407
408static int mac80211_hwsim_config_interface(struct ieee80211_hw *hw,
409 struct ieee80211_vif *vif,
410 struct ieee80211_if_conf *conf)
411{
412 hwsim_check_magic(vif);
413 return 0;
414}
415
416static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
417 struct ieee80211_vif *vif,
418 struct ieee80211_bss_conf *info,
419 u32 changed)
420{
421 hwsim_check_magic(vif);
422}
423
424static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
425 struct ieee80211_vif *vif,
426 enum sta_notify_cmd cmd,
427 struct ieee80211_sta *sta)
428{
429 hwsim_check_magic(vif);
430 switch (cmd) {
431 case STA_NOTIFY_ADD:
432 hwsim_set_sta_magic(sta);
433 break;
434 case STA_NOTIFY_REMOVE:
435 hwsim_clear_sta_magic(sta);
436 break;
437 }
438}
344 439
440static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw,
441 struct ieee80211_sta *sta,
442 bool set)
443{
444 hwsim_check_sta_magic(sta);
445 return 0;
446}
345 447
346static const struct ieee80211_ops mac80211_hwsim_ops = 448static const struct ieee80211_ops mac80211_hwsim_ops =
347{ 449{
@@ -352,23 +454,30 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
352 .remove_interface = mac80211_hwsim_remove_interface, 454 .remove_interface = mac80211_hwsim_remove_interface,
353 .config = mac80211_hwsim_config, 455 .config = mac80211_hwsim_config,
354 .configure_filter = mac80211_hwsim_configure_filter, 456 .configure_filter = mac80211_hwsim_configure_filter,
457 .config_interface = mac80211_hwsim_config_interface,
458 .bss_info_changed = mac80211_hwsim_bss_info_changed,
459 .sta_notify = mac80211_hwsim_sta_notify,
460 .set_tim = mac80211_hwsim_set_tim,
355}; 461};
356 462
357 463
358static void mac80211_hwsim_free(void) 464static void mac80211_hwsim_free(void)
359{ 465{
360 int i; 466 struct list_head tmplist, *i, *tmp;
361 467 struct mac80211_hwsim_data *data;
362 for (i = 0; i < hwsim_radio_count; i++) { 468
363 if (hwsim_radios[i]) { 469 INIT_LIST_HEAD(&tmplist);
364 struct mac80211_hwsim_data *data; 470
365 data = hwsim_radios[i]->priv; 471 spin_lock_bh(&hwsim_radio_lock);
366 ieee80211_unregister_hw(hwsim_radios[i]); 472 list_for_each_safe(i, tmp, &hwsim_radios)
367 device_unregister(data->dev); 473 list_move(i, &tmplist);
368 ieee80211_free_hw(hwsim_radios[i]); 474 spin_unlock_bh(&hwsim_radio_lock);
369 } 475
476 list_for_each_entry(data, &tmplist, list) {
477 ieee80211_unregister_hw(data->hw);
478 device_unregister(data->dev);
479 ieee80211_free_hw(data->hw);
370 } 480 }
371 kfree(hwsim_radios);
372 class_destroy(hwsim_class); 481 class_destroy(hwsim_class);
373} 482}
374 483
@@ -398,37 +507,32 @@ static int __init init_mac80211_hwsim(void)
398 struct ieee80211_hw *hw; 507 struct ieee80211_hw *hw;
399 DECLARE_MAC_BUF(mac); 508 DECLARE_MAC_BUF(mac);
400 509
401 if (radios < 1 || radios > 65535) 510 if (radios < 1 || radios > 100)
402 return -EINVAL; 511 return -EINVAL;
403 512
404 hwsim_radio_count = radios; 513 spin_lock_init(&hwsim_radio_lock);
405 hwsim_radios = kcalloc(hwsim_radio_count, 514 INIT_LIST_HEAD(&hwsim_radios);
406 sizeof(struct ieee80211_hw *), GFP_KERNEL);
407 if (hwsim_radios == NULL)
408 return -ENOMEM;
409 515
410 hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); 516 hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
411 if (IS_ERR(hwsim_class)) { 517 if (IS_ERR(hwsim_class))
412 kfree(hwsim_radios);
413 return PTR_ERR(hwsim_class); 518 return PTR_ERR(hwsim_class);
414 }
415 519
416 memset(addr, 0, ETH_ALEN); 520 memset(addr, 0, ETH_ALEN);
417 addr[0] = 0x02; 521 addr[0] = 0x02;
418 522
419 for (i = 0; i < hwsim_radio_count; i++) { 523 for (i = 0; i < radios; i++) {
420 printk(KERN_DEBUG "mac80211_hwsim: Initializing radio %d\n", 524 printk(KERN_DEBUG "mac80211_hwsim: Initializing radio %d\n",
421 i); 525 i);
422 hw = ieee80211_alloc_hw(sizeof(*data), &mac80211_hwsim_ops); 526 hw = ieee80211_alloc_hw(sizeof(*data), &mac80211_hwsim_ops);
423 if (hw == NULL) { 527 if (!hw) {
424 printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw " 528 printk(KERN_DEBUG "mac80211_hwsim: ieee80211_alloc_hw "
425 "failed\n"); 529 "failed\n");
426 err = -ENOMEM; 530 err = -ENOMEM;
427 goto failed; 531 goto failed;
428 } 532 }
429 hwsim_radios[i] = hw;
430
431 data = hw->priv; 533 data = hw->priv;
534 data->hw = hw;
535
432 data->dev = device_create_drvdata(hwsim_class, NULL, 0, hw, 536 data->dev = device_create_drvdata(hwsim_class, NULL, 0, hw,
433 "hwsim%d", i); 537 "hwsim%d", i);
434 if (IS_ERR(data->dev)) { 538 if (IS_ERR(data->dev)) {
@@ -446,7 +550,15 @@ static int __init init_mac80211_hwsim(void)
446 SET_IEEE80211_PERM_ADDR(hw, addr); 550 SET_IEEE80211_PERM_ADDR(hw, addr);
447 551
448 hw->channel_change_time = 1; 552 hw->channel_change_time = 1;
449 hw->queues = 1; 553 hw->queues = 4;
554 hw->wiphy->interface_modes =
555 BIT(NL80211_IFTYPE_STATION) |
556 BIT(NL80211_IFTYPE_AP);
557 hw->ampdu_queues = 1;
558
559 /* ask mac80211 to reserve space for magic */
560 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
561 hw->sta_data_size = sizeof(struct hwsim_sta_priv);
450 562
451 memcpy(data->channels, hwsim_channels, sizeof(hwsim_channels)); 563 memcpy(data->channels, hwsim_channels, sizeof(hwsim_channels));
452 memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates)); 564 memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
@@ -454,6 +566,19 @@ static int __init init_mac80211_hwsim(void)
454 data->band.n_channels = ARRAY_SIZE(hwsim_channels); 566 data->band.n_channels = ARRAY_SIZE(hwsim_channels);
455 data->band.bitrates = data->rates; 567 data->band.bitrates = data->rates;
456 data->band.n_bitrates = ARRAY_SIZE(hwsim_rates); 568 data->band.n_bitrates = ARRAY_SIZE(hwsim_rates);
569 data->band.ht_info.ht_supported = 1;
570 data->band.ht_info.cap = IEEE80211_HT_CAP_SUP_WIDTH |
571 IEEE80211_HT_CAP_GRN_FLD |
572 IEEE80211_HT_CAP_SGI_40 |
573 IEEE80211_HT_CAP_DSSSCCK40;
574 data->band.ht_info.ampdu_factor = 0x3;
575 data->band.ht_info.ampdu_density = 0x6;
576 memset(data->band.ht_info.supp_mcs_set, 0,
577 sizeof(data->band.ht_info.supp_mcs_set));
578 data->band.ht_info.supp_mcs_set[0] = 0xff;
579 data->band.ht_info.supp_mcs_set[1] = 0xff;
580 data->band.ht_info.supp_mcs_set[12] =
581 IEEE80211_HT_CAP_MCS_TX_DEFINED;
457 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &data->band; 582 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &data->band;
458 583
459 err = ieee80211_register_hw(hw); 584 err = ieee80211_register_hw(hw);
@@ -469,6 +594,8 @@ static int __init init_mac80211_hwsim(void)
469 594
470 setup_timer(&data->beacon_timer, mac80211_hwsim_beacon, 595 setup_timer(&data->beacon_timer, mac80211_hwsim_beacon,
471 (unsigned long) hw); 596 (unsigned long) hw);
597
598 list_add_tail(&data->list, &hwsim_radios);
472 } 599 }
473 600
474 hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup); 601 hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
@@ -500,7 +627,6 @@ failed_hw:
500 device_unregister(data->dev); 627 device_unregister(data->dev);
501failed_drvdata: 628failed_drvdata:
502 ieee80211_free_hw(hw); 629 ieee80211_free_hw(hw);
503 hwsim_radios[i] = NULL;
504failed: 630failed:
505 mac80211_hwsim_free(); 631 mac80211_hwsim_free();
506 return err; 632 return err;
@@ -509,8 +635,7 @@ failed:
509 635
510static void __exit exit_mac80211_hwsim(void) 636static void __exit exit_mac80211_hwsim(void)
511{ 637{
512 printk(KERN_DEBUG "mac80211_hwsim: unregister %d radios\n", 638 printk(KERN_DEBUG "mac80211_hwsim: unregister radios\n");
513 hwsim_radio_count);
514 639
515 unregister_netdev(hwsim_mon); 640 unregister_netdev(hwsim_mon);
516 mac80211_hwsim_free(); 641 mac80211_hwsim_free();
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index f479c1af6782..25bae7933aa5 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -398,7 +398,7 @@ static int netwave_probe(struct pcmcia_device *link)
398 link->io.IOAddrLines = 5; 398 link->io.IOAddrLines = 5;
399 399
400 /* Interrupt setup */ 400 /* Interrupt setup */
401 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 401 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
402 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 402 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
403 link->irq.Handler = &netwave_interrupt; 403 link->irq.Handler = &netwave_interrupt;
404 404
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 36c004e15602..50904771f291 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -79,15 +79,21 @@
79#include <linux/module.h> 79#include <linux/module.h>
80#include <linux/kernel.h> 80#include <linux/kernel.h>
81#include <linux/init.h> 81#include <linux/init.h>
82#include <linux/delay.h>
82#include <linux/netdevice.h> 83#include <linux/netdevice.h>
83#include <linux/etherdevice.h> 84#include <linux/etherdevice.h>
84#include <linux/ethtool.h> 85#include <linux/ethtool.h>
86#include <linux/firmware.h>
85#include <linux/if_arp.h> 87#include <linux/if_arp.h>
86#include <linux/wireless.h> 88#include <linux/wireless.h>
87#include <net/iw_handler.h> 89#include <net/iw_handler.h>
88#include <net/ieee80211.h> 90#include <net/ieee80211.h>
89 91
92#include <linux/scatterlist.h>
93#include <linux/crypto.h>
94
90#include "hermes_rid.h" 95#include "hermes_rid.h"
96#include "hermes_dld.h"
91#include "orinoco.h" 97#include "orinoco.h"
92 98
93/********************************************************************/ 99/********************************************************************/
@@ -241,6 +247,74 @@ static int __orinoco_program_rids(struct net_device *dev);
241static void __orinoco_set_multicast_list(struct net_device *dev); 247static void __orinoco_set_multicast_list(struct net_device *dev);
242 248
243/********************************************************************/ 249/********************************************************************/
250/* Michael MIC crypto setup */
251/********************************************************************/
252#define MICHAEL_MIC_LEN 8
253static int orinoco_mic_init(struct orinoco_private *priv)
254{
255 priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
256 if (IS_ERR(priv->tx_tfm_mic)) {
257 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
258 "crypto API michael_mic\n");
259 priv->tx_tfm_mic = NULL;
260 return -ENOMEM;
261 }
262
263 priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
264 if (IS_ERR(priv->rx_tfm_mic)) {
265 printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
266 "crypto API michael_mic\n");
267 priv->rx_tfm_mic = NULL;
268 return -ENOMEM;
269 }
270
271 return 0;
272}
273
274static void orinoco_mic_free(struct orinoco_private *priv)
275{
276 if (priv->tx_tfm_mic)
277 crypto_free_hash(priv->tx_tfm_mic);
278 if (priv->rx_tfm_mic)
279 crypto_free_hash(priv->rx_tfm_mic);
280}
281
282static int michael_mic(struct crypto_hash *tfm_michael, u8 *key,
283 u8 *da, u8 *sa, u8 priority,
284 u8 *data, size_t data_len, u8 *mic)
285{
286 struct hash_desc desc;
287 struct scatterlist sg[2];
288 u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
289
290 if (tfm_michael == NULL) {
291 printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
292 return -1;
293 }
294
295 /* Copy header into buffer. We need the padding on the end zeroed */
296 memcpy(&hdr[0], da, ETH_ALEN);
297 memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN);
298 hdr[ETH_ALEN*2] = priority;
299 hdr[ETH_ALEN*2+1] = 0;
300 hdr[ETH_ALEN*2+2] = 0;
301 hdr[ETH_ALEN*2+3] = 0;
302
303 /* Use scatter gather to MIC header and data in one go */
304 sg_init_table(sg, 2);
305 sg_set_buf(&sg[0], hdr, sizeof(hdr));
306 sg_set_buf(&sg[1], data, data_len);
307
308 if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN))
309 return -1;
310
311 desc.tfm = tfm_michael;
312 desc.flags = 0;
313 return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr),
314 mic);
315}
316
317/********************************************************************/
244/* Internal helper functions */ 318/* Internal helper functions */
245/********************************************************************/ 319/********************************************************************/
246 320
@@ -273,12 +347,19 @@ static inline void set_port_type(struct orinoco_private *priv)
273#define ORINOCO_MAX_BSS_COUNT 64 347#define ORINOCO_MAX_BSS_COUNT 64
274static int orinoco_bss_data_allocate(struct orinoco_private *priv) 348static int orinoco_bss_data_allocate(struct orinoco_private *priv)
275{ 349{
276 if (priv->bss_data) 350 if (priv->bss_xbss_data)
277 return 0; 351 return 0;
278 352
279 priv->bss_data = 353 if (priv->has_ext_scan)
280 kzalloc(ORINOCO_MAX_BSS_COUNT * sizeof(bss_element), GFP_KERNEL); 354 priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
281 if (!priv->bss_data) { 355 sizeof(struct xbss_element),
356 GFP_KERNEL);
357 else
358 priv->bss_xbss_data = kzalloc(ORINOCO_MAX_BSS_COUNT *
359 sizeof(struct bss_element),
360 GFP_KERNEL);
361
362 if (!priv->bss_xbss_data) {
282 printk(KERN_WARNING "Out of memory allocating beacons"); 363 printk(KERN_WARNING "Out of memory allocating beacons");
283 return -ENOMEM; 364 return -ENOMEM;
284 } 365 }
@@ -287,18 +368,319 @@ static int orinoco_bss_data_allocate(struct orinoco_private *priv)
287 368
288static void orinoco_bss_data_free(struct orinoco_private *priv) 369static void orinoco_bss_data_free(struct orinoco_private *priv)
289{ 370{
290 kfree(priv->bss_data); 371 kfree(priv->bss_xbss_data);
291 priv->bss_data = NULL; 372 priv->bss_xbss_data = NULL;
292} 373}
293 374
375#define PRIV_BSS ((struct bss_element *)priv->bss_xbss_data)
376#define PRIV_XBSS ((struct xbss_element *)priv->bss_xbss_data)
294static void orinoco_bss_data_init(struct orinoco_private *priv) 377static void orinoco_bss_data_init(struct orinoco_private *priv)
295{ 378{
296 int i; 379 int i;
297 380
298 INIT_LIST_HEAD(&priv->bss_free_list); 381 INIT_LIST_HEAD(&priv->bss_free_list);
299 INIT_LIST_HEAD(&priv->bss_list); 382 INIT_LIST_HEAD(&priv->bss_list);
300 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++) 383 if (priv->has_ext_scan)
301 list_add_tail(&priv->bss_data[i].list, &priv->bss_free_list); 384 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
385 list_add_tail(&(PRIV_XBSS[i].list),
386 &priv->bss_free_list);
387 else
388 for (i = 0; i < ORINOCO_MAX_BSS_COUNT; i++)
389 list_add_tail(&(PRIV_BSS[i].list),
390 &priv->bss_free_list);
391
392}
393
394static inline u8 *orinoco_get_ie(u8 *data, size_t len,
395 enum ieee80211_mfie eid)
396{
397 u8 *p = data;
398 while ((p + 2) < (data + len)) {
399 if (p[0] == eid)
400 return p;
401 p += p[1] + 2;
402 }
403 return NULL;
404}
405
406#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
407#define WPA_SELECTOR_LEN 4
408static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
409{
410 u8 *p = data;
411 while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
412 if ((p[0] == MFIE_TYPE_GENERIC) &&
413 (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
414 return p;
415 p += p[1] + 2;
416 }
417 return NULL;
418}
419
420
421/********************************************************************/
422/* Download functionality */
423/********************************************************************/
424
425struct fw_info {
426 char *pri_fw;
427 char *sta_fw;
428 char *ap_fw;
429 u32 pda_addr;
430 u16 pda_size;
431};
432
433const static struct fw_info orinoco_fw[] = {
434 { "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
435 { "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
436 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 0x100 }
437};
438
439/* Structure used to access fields in FW
440 * Make sure LE decoding macros are used
441 */
442struct orinoco_fw_header {
443 char hdr_vers[6]; /* ASCII string for header version */
444 __le16 headersize; /* Total length of header */
445 __le32 entry_point; /* NIC entry point */
446 __le32 blocks; /* Number of blocks to program */
447 __le32 block_offset; /* Offset of block data from eof header */
448 __le32 pdr_offset; /* Offset to PDR data from eof header */
449 __le32 pri_offset; /* Offset to primary plug data */
450 __le32 compat_offset; /* Offset to compatibility data*/
451 char signature[0]; /* FW signature length headersize-20 */
452} __attribute__ ((packed));
453
454/* Download either STA or AP firmware into the card. */
455static int
456orinoco_dl_firmware(struct orinoco_private *priv,
457 const struct fw_info *fw,
458 int ap)
459{
460 /* Plug Data Area (PDA) */
461 __le16 pda[512] = { 0 };
462
463 hermes_t *hw = &priv->hw;
464 const struct firmware *fw_entry;
465 const struct orinoco_fw_header *hdr;
466 const unsigned char *first_block;
467 const unsigned char *end;
468 const char *firmware;
469 struct net_device *dev = priv->ndev;
470 int err;
471
472 if (ap)
473 firmware = fw->ap_fw;
474 else
475 firmware = fw->sta_fw;
476
477 printk(KERN_DEBUG "%s: Attempting to download firmware %s\n",
478 dev->name, firmware);
479
480 /* Read current plug data */
481 err = hermes_read_pda(hw, pda, fw->pda_addr,
482 min_t(u16, fw->pda_size, sizeof(pda)), 0);
483 printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err);
484 if (err)
485 return err;
486
487 err = request_firmware(&fw_entry, firmware, priv->dev);
488 if (err) {
489 printk(KERN_ERR "%s: Cannot find firmware %s\n",
490 dev->name, firmware);
491 return -ENOENT;
492 }
493
494 hdr = (const struct orinoco_fw_header *) fw_entry->data;
495
496 /* Enable aux port to allow programming */
497 err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point));
498 printk(KERN_DEBUG "%s: Program init returned %d\n", dev->name, err);
499 if (err != 0)
500 goto abort;
501
502 /* Program data */
503 first_block = (fw_entry->data +
504 le16_to_cpu(hdr->headersize) +
505 le32_to_cpu(hdr->block_offset));
506 end = fw_entry->data + fw_entry->size;
507
508 err = hermes_program(hw, first_block, end);
509 printk(KERN_DEBUG "%s: Program returned %d\n", dev->name, err);
510 if (err != 0)
511 goto abort;
512
513 /* Update production data */
514 first_block = (fw_entry->data +
515 le16_to_cpu(hdr->headersize) +
516 le32_to_cpu(hdr->pdr_offset));
517
518 err = hermes_apply_pda_with_defaults(hw, first_block, pda);
519 printk(KERN_DEBUG "%s: Apply PDA returned %d\n", dev->name, err);
520 if (err)
521 goto abort;
522
523 /* Tell card we've finished */
524 err = hermesi_program_end(hw);
525 printk(KERN_DEBUG "%s: Program end returned %d\n", dev->name, err);
526 if (err != 0)
527 goto abort;
528
529 /* Check if we're running */
530 printk(KERN_DEBUG "%s: hermes_present returned %d\n",
531 dev->name, hermes_present(hw));
532
533abort:
534 release_firmware(fw_entry);
535 return err;
536}
537
538/* End markers */
539#define TEXT_END 0x1A /* End of text header */
540
541/*
542 * Process a firmware image - stop the card, load the firmware, reset
543 * the card and make sure it responds. For the secondary firmware take
544 * care of the PDA - read it and then write it on top of the firmware.
545 */
546static int
547symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
548 const unsigned char *image, const unsigned char *end,
549 int secondary)
550{
551 hermes_t *hw = &priv->hw;
552 int ret;
553 const unsigned char *ptr;
554 const unsigned char *first_block;
555
556 /* Plug Data Area (PDA) */
557 __le16 pda[256];
558
559 /* Binary block begins after the 0x1A marker */
560 ptr = image;
561 while (*ptr++ != TEXT_END);
562 first_block = ptr;
563
564 /* Read the PDA from EEPROM */
565 if (secondary) {
566 ret = hermes_read_pda(hw, pda, fw->pda_addr, sizeof(pda), 1);
567 if (ret)
568 return ret;
569 }
570
571 /* Stop the firmware, so that it can be safely rewritten */
572 if (priv->stop_fw) {
573 ret = priv->stop_fw(priv, 1);
574 if (ret)
575 return ret;
576 }
577
578 /* Program the adapter with new firmware */
579 ret = hermes_program(hw, first_block, end);
580 if (ret)
581 return ret;
582
583 /* Write the PDA to the adapter */
584 if (secondary) {
585 size_t len = hermes_blocks_length(first_block);
586 ptr = first_block + len;
587 ret = hermes_apply_pda(hw, ptr, pda);
588 if (ret)
589 return ret;
590 }
591
592 /* Run the firmware */
593 if (priv->stop_fw) {
594 ret = priv->stop_fw(priv, 0);
595 if (ret)
596 return ret;
597 }
598
599 /* Reset hermes chip and make sure it responds */
600 ret = hermes_init(hw);
601
602 /* hermes_reset() should return 0 with the secondary firmware */
603 if (secondary && ret != 0)
604 return -ENODEV;
605
606 /* And this should work with any firmware */
607 if (!hermes_present(hw))
608 return -ENODEV;
609
610 return 0;
611}
612
613
614/*
615 * Download the firmware into the card, this also does a PCMCIA soft
616 * reset on the card, to make sure it's in a sane state.
617 */
618static int
619symbol_dl_firmware(struct orinoco_private *priv,
620 const struct fw_info *fw)
621{
622 struct net_device *dev = priv->ndev;
623 int ret;
624 const struct firmware *fw_entry;
625
626 if (request_firmware(&fw_entry, fw->pri_fw,
627 priv->dev) != 0) {
628 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
629 dev->name, fw->pri_fw);
630 return -ENOENT;
631 }
632
633 /* Load primary firmware */
634 ret = symbol_dl_image(priv, fw, fw_entry->data,
635 fw_entry->data + fw_entry->size, 0);
636 release_firmware(fw_entry);
637 if (ret) {
638 printk(KERN_ERR "%s: Primary firmware download failed\n",
639 dev->name);
640 return ret;
641 }
642
643 if (request_firmware(&fw_entry, fw->sta_fw,
644 priv->dev) != 0) {
645 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
646 dev->name, fw->sta_fw);
647 return -ENOENT;
648 }
649
650 /* Load secondary firmware */
651 ret = symbol_dl_image(priv, fw, fw_entry->data,
652 fw_entry->data + fw_entry->size, 1);
653 release_firmware(fw_entry);
654 if (ret) {
655 printk(KERN_ERR "%s: Secondary firmware download failed\n",
656 dev->name);
657 }
658
659 return ret;
660}
661
662static int orinoco_download(struct orinoco_private *priv)
663{
664 int err = 0;
665 /* Reload firmware */
666 switch (priv->firmware_type) {
667 case FIRMWARE_TYPE_AGERE:
668 /* case FIRMWARE_TYPE_INTERSIL: */
669 err = orinoco_dl_firmware(priv,
670 &orinoco_fw[priv->firmware_type], 0);
671 break;
672
673 case FIRMWARE_TYPE_SYMBOL:
674 err = symbol_dl_firmware(priv,
675 &orinoco_fw[priv->firmware_type]);
676 break;
677 case FIRMWARE_TYPE_INTERSIL:
678 break;
679 }
680 /* TODO: if we fail we probably need to reinitialise
681 * the driver */
682
683 return err;
302} 684}
303 685
304/********************************************************************/ 686/********************************************************************/
@@ -453,8 +835,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
453 int err = 0; 835 int err = 0;
454 u16 txfid = priv->txfid; 836 u16 txfid = priv->txfid;
455 struct ethhdr *eh; 837 struct ethhdr *eh;
456 int data_off; 838 int tx_control;
457 struct hermes_tx_descriptor desc;
458 unsigned long flags; 839 unsigned long flags;
459 840
460 if (! netif_running(dev)) { 841 if (! netif_running(dev)) {
@@ -486,23 +867,54 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
486 if (skb->len < ETH_HLEN) 867 if (skb->len < ETH_HLEN)
487 goto drop; 868 goto drop;
488 869
489 eh = (struct ethhdr *)skb->data; 870 tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
490 871
491 memset(&desc, 0, sizeof(desc)); 872 if (priv->encode_alg == IW_ENCODE_ALG_TKIP)
492 desc.tx_control = cpu_to_le16(HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX); 873 tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
493 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0); 874 HERMES_TXCTRL_MIC;
494 if (err) { 875
495 if (net_ratelimit()) 876 if (priv->has_alt_txcntl) {
496 printk(KERN_ERR "%s: Error %d writing Tx descriptor " 877 /* WPA enabled firmwares have tx_cntl at the end of
497 "to BAP\n", dev->name, err); 878 * the 802.11 header. So write zeroed descriptor and
498 goto busy; 879 * 802.11 header at the same time
880 */
881 char desc[HERMES_802_3_OFFSET];
882 __le16 *txcntl = (__le16 *) &desc[HERMES_TXCNTL2_OFFSET];
883
884 memset(&desc, 0, sizeof(desc));
885
886 *txcntl = cpu_to_le16(tx_control);
887 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
888 txfid, 0);
889 if (err) {
890 if (net_ratelimit())
891 printk(KERN_ERR "%s: Error %d writing Tx "
892 "descriptor to BAP\n", dev->name, err);
893 goto busy;
894 }
895 } else {
896 struct hermes_tx_descriptor desc;
897
898 memset(&desc, 0, sizeof(desc));
899
900 desc.tx_control = cpu_to_le16(tx_control);
901 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
902 txfid, 0);
903 if (err) {
904 if (net_ratelimit())
905 printk(KERN_ERR "%s: Error %d writing Tx "
906 "descriptor to BAP\n", dev->name, err);
907 goto busy;
908 }
909
910 /* Clear the 802.11 header and data length fields - some
911 * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
912 * if this isn't done. */
913 hermes_clear_words(hw, HERMES_DATA0,
914 HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
499 } 915 }
500 916
501 /* Clear the 802.11 header and data length fields - some 917 eh = (struct ethhdr *)skb->data;
502 * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused
503 * if this isn't done. */
504 hermes_clear_words(hw, HERMES_DATA0,
505 HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
506 918
507 /* Encapsulate Ethernet-II frames */ 919 /* Encapsulate Ethernet-II frames */
508 if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */ 920 if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
@@ -513,33 +925,65 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
513 925
514 /* Strip destination and source from the data */ 926 /* Strip destination and source from the data */
515 skb_pull(skb, 2 * ETH_ALEN); 927 skb_pull(skb, 2 * ETH_ALEN);
516 data_off = HERMES_802_2_OFFSET + sizeof(encaps_hdr);
517 928
518 /* And move them to a separate header */ 929 /* And move them to a separate header */
519 memcpy(&hdr.eth, eh, 2 * ETH_ALEN); 930 memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
520 hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len); 931 hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
521 memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr)); 932 memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
522 933
523 err = hermes_bap_pwrite(hw, USER_BAP, &hdr, sizeof(hdr), 934 /* Insert the SNAP header */
524 txfid, HERMES_802_3_OFFSET); 935 if (skb_headroom(skb) < sizeof(hdr)) {
525 if (err) { 936 printk(KERN_ERR
526 if (net_ratelimit()) 937 "%s: Not enough headroom for 802.2 headers %d\n",
527 printk(KERN_ERR "%s: Error %d writing packet " 938 dev->name, skb_headroom(skb));
528 "header to BAP\n", dev->name, err); 939 goto drop;
529 goto busy;
530 } 940 }
531 } else { /* IEEE 802.3 frame */ 941 eh = (struct ethhdr *) skb_push(skb, sizeof(hdr));
532 data_off = HERMES_802_3_OFFSET; 942 memcpy(eh, &hdr, sizeof(hdr));
533 } 943 }
534 944
535 err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len, 945 err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
536 txfid, data_off); 946 txfid, HERMES_802_3_OFFSET);
537 if (err) { 947 if (err) {
538 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 948 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
539 dev->name, err); 949 dev->name, err);
540 goto busy; 950 goto busy;
541 } 951 }
542 952
953 /* Calculate Michael MIC */
954 if (priv->encode_alg == IW_ENCODE_ALG_TKIP) {
955 u8 mic_buf[MICHAEL_MIC_LEN + 1];
956 u8 *mic;
957 size_t offset;
958 size_t len;
959
960 if (skb->len % 2) {
961 /* MIC start is on an odd boundary */
962 mic_buf[0] = skb->data[skb->len - 1];
963 mic = &mic_buf[1];
964 offset = skb->len - 1;
965 len = MICHAEL_MIC_LEN + 1;
966 } else {
967 mic = &mic_buf[0];
968 offset = skb->len;
969 len = MICHAEL_MIC_LEN;
970 }
971
972 michael_mic(priv->tx_tfm_mic,
973 priv->tkip_key[priv->tx_key].tx_mic,
974 eh->h_dest, eh->h_source, 0 /* priority */,
975 skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
976
977 /* Write the MIC */
978 err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
979 txfid, HERMES_802_3_OFFSET + offset);
980 if (err) {
981 printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
982 dev->name, err);
983 goto busy;
984 }
985 }
986
543 /* Finally, we actually initiate the send */ 987 /* Finally, we actually initiate the send */
544 netif_stop_queue(dev); 988 netif_stop_queue(dev);
545 989
@@ -554,7 +998,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
554 } 998 }
555 999
556 dev->trans_start = jiffies; 1000 dev->trans_start = jiffies;
557 stats->tx_bytes += data_off + skb->len; 1001 stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
558 goto ok; 1002 goto ok;
559 1003
560 drop: 1004 drop:
@@ -834,21 +1278,48 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
834 stats->rx_dropped++; 1278 stats->rx_dropped++;
835} 1279}
836 1280
1281/* Get tsc from the firmware */
1282static int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key,
1283 u8 *tsc)
1284{
1285 hermes_t *hw = &priv->hw;
1286 int err = 0;
1287 u8 tsc_arr[4][IW_ENCODE_SEQ_MAX_SIZE];
1288
1289 if ((key < 0) || (key > 4))
1290 return -EINVAL;
1291
1292 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
1293 sizeof(tsc_arr), NULL, &tsc_arr);
1294 if (!err)
1295 memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
1296
1297 return err;
1298}
1299
837static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw) 1300static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
838{ 1301{
839 struct orinoco_private *priv = netdev_priv(dev); 1302 struct orinoco_private *priv = netdev_priv(dev);
840 struct net_device_stats *stats = &priv->stats; 1303 struct net_device_stats *stats = &priv->stats;
841 struct iw_statistics *wstats = &priv->wstats; 1304 struct iw_statistics *wstats = &priv->wstats;
842 struct sk_buff *skb = NULL; 1305 struct sk_buff *skb = NULL;
843 u16 rxfid, status, fc; 1306 u16 rxfid, status;
844 int length; 1307 int length;
845 struct hermes_rx_descriptor desc; 1308 struct hermes_rx_descriptor *desc;
846 struct ethhdr *hdr; 1309 struct orinoco_rx_data *rx_data;
847 int err; 1310 int err;
848 1311
1312 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
1313 if (!desc) {
1314 printk(KERN_WARNING
1315 "%s: Can't allocate space for RX descriptor\n",
1316 dev->name);
1317 goto update_stats;
1318 }
1319
849 rxfid = hermes_read_regn(hw, RXFID); 1320 rxfid = hermes_read_regn(hw, RXFID);
850 1321
851 err = hermes_bap_pread(hw, IRQ_BAP, &desc, sizeof(desc), 1322 err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
852 rxfid, 0); 1323 rxfid, 0);
853 if (err) { 1324 if (err) {
854 printk(KERN_ERR "%s: error %d reading Rx descriptor. " 1325 printk(KERN_ERR "%s: error %d reading Rx descriptor. "
@@ -856,7 +1327,7 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
856 goto update_stats; 1327 goto update_stats;
857 } 1328 }
858 1329
859 status = le16_to_cpu(desc.status); 1330 status = le16_to_cpu(desc->status);
860 1331
861 if (status & HERMES_RXSTAT_BADCRC) { 1332 if (status & HERMES_RXSTAT_BADCRC) {
862 DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n", 1333 DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n",
@@ -867,8 +1338,8 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
867 1338
868 /* Handle frames in monitor mode */ 1339 /* Handle frames in monitor mode */
869 if (priv->iw_mode == IW_MODE_MONITOR) { 1340 if (priv->iw_mode == IW_MODE_MONITOR) {
870 orinoco_rx_monitor(dev, rxfid, &desc); 1341 orinoco_rx_monitor(dev, rxfid, desc);
871 return; 1342 goto out;
872 } 1343 }
873 1344
874 if (status & HERMES_RXSTAT_UNDECRYPTABLE) { 1345 if (status & HERMES_RXSTAT_UNDECRYPTABLE) {
@@ -878,15 +1349,14 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
878 goto update_stats; 1349 goto update_stats;
879 } 1350 }
880 1351
881 length = le16_to_cpu(desc.data_len); 1352 length = le16_to_cpu(desc->data_len);
882 fc = le16_to_cpu(desc.frame_ctl);
883 1353
884 /* Sanity checks */ 1354 /* Sanity checks */
885 if (length < 3) { /* No for even an 802.2 LLC header */ 1355 if (length < 3) { /* No for even an 802.2 LLC header */
886 /* At least on Symbol firmware with PCF we get quite a 1356 /* At least on Symbol firmware with PCF we get quite a
887 lot of these legitimately - Poll frames with no 1357 lot of these legitimately - Poll frames with no
888 data. */ 1358 data. */
889 return; 1359 goto out;
890 } 1360 }
891 if (length > IEEE80211_DATA_LEN) { 1361 if (length > IEEE80211_DATA_LEN) {
892 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n", 1362 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
@@ -895,6 +1365,11 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
895 goto update_stats; 1365 goto update_stats;
896 } 1366 }
897 1367
1368 /* Payload size does not include Michael MIC. Increase payload
1369 * size to read it together with the data. */
1370 if (status & HERMES_RXSTAT_MIC)
1371 length += MICHAEL_MIC_LEN;
1372
898 /* We need space for the packet data itself, plus an ethernet 1373 /* We need space for the packet data itself, plus an ethernet
899 header, plus 2 bytes so we can align the IP header on a 1374 header, plus 2 bytes so we can align the IP header on a
900 32bit boundary, plus 1 byte so we can read in odd length 1375 32bit boundary, plus 1 byte so we can read in odd length
@@ -921,6 +1396,100 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
921 goto drop; 1396 goto drop;
922 } 1397 }
923 1398
1399 /* Add desc and skb to rx queue */
1400 rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC);
1401 if (!rx_data) {
1402 printk(KERN_WARNING "%s: Can't allocate RX packet\n",
1403 dev->name);
1404 goto drop;
1405 }
1406 rx_data->desc = desc;
1407 rx_data->skb = skb;
1408 list_add_tail(&rx_data->list, &priv->rx_list);
1409 tasklet_schedule(&priv->rx_tasklet);
1410
1411 return;
1412
1413drop:
1414 dev_kfree_skb_irq(skb);
1415update_stats:
1416 stats->rx_errors++;
1417 stats->rx_dropped++;
1418out:
1419 kfree(desc);
1420}
1421
1422static void orinoco_rx(struct net_device *dev,
1423 struct hermes_rx_descriptor *desc,
1424 struct sk_buff *skb)
1425{
1426 struct orinoco_private *priv = netdev_priv(dev);
1427 struct net_device_stats *stats = &priv->stats;
1428 u16 status, fc;
1429 int length;
1430 struct ethhdr *hdr;
1431
1432 status = le16_to_cpu(desc->status);
1433 length = le16_to_cpu(desc->data_len);
1434 fc = le16_to_cpu(desc->frame_ctl);
1435
1436 /* Calculate and check MIC */
1437 if (status & HERMES_RXSTAT_MIC) {
1438 int key_id = ((status & HERMES_RXSTAT_MIC_KEY_ID) >>
1439 HERMES_MIC_KEY_ID_SHIFT);
1440 u8 mic[MICHAEL_MIC_LEN];
1441 u8 *rxmic;
1442 u8 *src = (fc & IEEE80211_FCTL_FROMDS) ?
1443 desc->addr3 : desc->addr2;
1444
1445 /* Extract Michael MIC from payload */
1446 rxmic = skb->data + skb->len - MICHAEL_MIC_LEN;
1447
1448 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
1449 length -= MICHAEL_MIC_LEN;
1450
1451 michael_mic(priv->rx_tfm_mic,
1452 priv->tkip_key[key_id].rx_mic,
1453 desc->addr1,
1454 src,
1455 0, /* priority or QoS? */
1456 skb->data,
1457 skb->len,
1458 &mic[0]);
1459
1460 if (memcmp(mic, rxmic,
1461 MICHAEL_MIC_LEN)) {
1462 union iwreq_data wrqu;
1463 struct iw_michaelmicfailure wxmic;
1464 DECLARE_MAC_BUF(mac);
1465
1466 printk(KERN_WARNING "%s: "
1467 "Invalid Michael MIC in data frame from %s, "
1468 "using key %i\n",
1469 dev->name, print_mac(mac, src), key_id);
1470
1471 /* TODO: update stats */
1472
1473 /* Notify userspace */
1474 memset(&wxmic, 0, sizeof(wxmic));
1475 wxmic.flags = key_id & IW_MICFAILURE_KEY_ID;
1476 wxmic.flags |= (desc->addr1[0] & 1) ?
1477 IW_MICFAILURE_GROUP : IW_MICFAILURE_PAIRWISE;
1478 wxmic.src_addr.sa_family = ARPHRD_ETHER;
1479 memcpy(wxmic.src_addr.sa_data, src, ETH_ALEN);
1480
1481 (void) orinoco_hw_get_tkip_iv(priv, key_id,
1482 &wxmic.tsc[0]);
1483
1484 memset(&wrqu, 0, sizeof(wrqu));
1485 wrqu.data.length = sizeof(wxmic);
1486 wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu,
1487 (char *) &wxmic);
1488
1489 goto drop;
1490 }
1491 }
1492
924 /* Handle decapsulation 1493 /* Handle decapsulation
925 * In most cases, the firmware tell us about SNAP frames. 1494 * In most cases, the firmware tell us about SNAP frames.
926 * For some reason, the SNAP frames sent by LinkSys APs 1495 * For some reason, the SNAP frames sent by LinkSys APs
@@ -939,11 +1508,11 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
939 hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN); 1508 hdr = (struct ethhdr *)skb_push(skb, ETH_HLEN);
940 hdr->h_proto = htons(length); 1509 hdr->h_proto = htons(length);
941 } 1510 }
942 memcpy(hdr->h_dest, desc.addr1, ETH_ALEN); 1511 memcpy(hdr->h_dest, desc->addr1, ETH_ALEN);
943 if (fc & IEEE80211_FCTL_FROMDS) 1512 if (fc & IEEE80211_FCTL_FROMDS)
944 memcpy(hdr->h_source, desc.addr3, ETH_ALEN); 1513 memcpy(hdr->h_source, desc->addr3, ETH_ALEN);
945 else 1514 else
946 memcpy(hdr->h_source, desc.addr2, ETH_ALEN); 1515 memcpy(hdr->h_source, desc->addr2, ETH_ALEN);
947 1516
948 dev->last_rx = jiffies; 1517 dev->last_rx = jiffies;
949 skb->protocol = eth_type_trans(skb, dev); 1518 skb->protocol = eth_type_trans(skb, dev);
@@ -952,7 +1521,7 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
952 skb->pkt_type = PACKET_OTHERHOST; 1521 skb->pkt_type = PACKET_OTHERHOST;
953 1522
954 /* Process the wireless stats if needed */ 1523 /* Process the wireless stats if needed */
955 orinoco_stat_gather(dev, skb, &desc); 1524 orinoco_stat_gather(dev, skb, desc);
956 1525
957 /* Pass the packet to the networking stack */ 1526 /* Pass the packet to the networking stack */
958 netif_rx(skb); 1527 netif_rx(skb);
@@ -961,13 +1530,33 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
961 1530
962 return; 1531 return;
963 1532
964 drop: 1533 drop:
965 dev_kfree_skb_irq(skb); 1534 dev_kfree_skb(skb);
966 update_stats:
967 stats->rx_errors++; 1535 stats->rx_errors++;
968 stats->rx_dropped++; 1536 stats->rx_dropped++;
969} 1537}
970 1538
1539static void orinoco_rx_isr_tasklet(unsigned long data)
1540{
1541 struct net_device *dev = (struct net_device *) data;
1542 struct orinoco_private *priv = netdev_priv(dev);
1543 struct orinoco_rx_data *rx_data, *temp;
1544 struct hermes_rx_descriptor *desc;
1545 struct sk_buff *skb;
1546
1547 /* extract desc and skb from queue */
1548 list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) {
1549 desc = rx_data->desc;
1550 skb = rx_data->skb;
1551 list_del(&rx_data->list);
1552 kfree(rx_data);
1553
1554 orinoco_rx(dev, desc, skb);
1555
1556 kfree(desc);
1557 }
1558}
1559
971/********************************************************************/ 1560/********************************************************************/
972/* Rx path (info frames) */ 1561/* Rx path (info frames) */
973/********************************************************************/ 1562/********************************************************************/
@@ -1087,52 +1676,172 @@ static void orinoco_join_ap(struct work_struct *work)
1087} 1676}
1088 1677
1089/* Send new BSSID to userspace */ 1678/* Send new BSSID to userspace */
1090static void orinoco_send_wevents(struct work_struct *work) 1679static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
1091{ 1680{
1092 struct orinoco_private *priv =
1093 container_of(work, struct orinoco_private, wevent_work);
1094 struct net_device *dev = priv->ndev; 1681 struct net_device *dev = priv->ndev;
1095 struct hermes *hw = &priv->hw; 1682 struct hermes *hw = &priv->hw;
1096 union iwreq_data wrqu; 1683 union iwreq_data wrqu;
1097 int err; 1684 int err;
1098 unsigned long flags;
1099
1100 if (orinoco_lock(priv, &flags) != 0)
1101 return;
1102 1685
1103 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID, 1686 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID,
1104 ETH_ALEN, NULL, wrqu.ap_addr.sa_data); 1687 ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
1105 if (err != 0) 1688 if (err != 0)
1106 goto out; 1689 return;
1107 1690
1108 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1691 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1109 1692
1110 /* Send event to user space */ 1693 /* Send event to user space */
1111 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 1694 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
1695}
1112 1696
1113 out: 1697static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
1114 orinoco_unlock(priv, &flags); 1698{
1699 struct net_device *dev = priv->ndev;
1700 struct hermes *hw = &priv->hw;
1701 union iwreq_data wrqu;
1702 int err;
1703 u8 buf[88];
1704 u8 *ie;
1705
1706 if (!priv->has_wpa)
1707 return;
1708
1709 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
1710 sizeof(buf), NULL, &buf);
1711 if (err != 0)
1712 return;
1713
1714 ie = orinoco_get_wpa_ie(buf, sizeof(buf));
1715 if (ie) {
1716 int rem = sizeof(buf) - (ie - &buf[0]);
1717 wrqu.data.length = ie[1] + 2;
1718 if (wrqu.data.length > rem)
1719 wrqu.data.length = rem;
1720
1721 if (wrqu.data.length)
1722 /* Send event to user space */
1723 wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, ie);
1724 }
1115} 1725}
1116 1726
1727static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
1728{
1729 struct net_device *dev = priv->ndev;
1730 struct hermes *hw = &priv->hw;
1731 union iwreq_data wrqu;
1732 int err;
1733 u8 buf[88]; /* TODO: verify max size or IW_GENERIC_IE_MAX */
1734 u8 *ie;
1735
1736 if (!priv->has_wpa)
1737 return;
1738
1739 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO,
1740 sizeof(buf), NULL, &buf);
1741 if (err != 0)
1742 return;
1743
1744 ie = orinoco_get_wpa_ie(buf, sizeof(buf));
1745 if (ie) {
1746 int rem = sizeof(buf) - (ie - &buf[0]);
1747 wrqu.data.length = ie[1] + 2;
1748 if (wrqu.data.length > rem)
1749 wrqu.data.length = rem;
1750
1751 if (wrqu.data.length)
1752 /* Send event to user space */
1753 wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, ie);
1754 }
1755}
1756
1757static void orinoco_send_wevents(struct work_struct *work)
1758{
1759 struct orinoco_private *priv =
1760 container_of(work, struct orinoco_private, wevent_work);
1761 unsigned long flags;
1762
1763 if (orinoco_lock(priv, &flags) != 0)
1764 return;
1765
1766 orinoco_send_assocreqie_wevent(priv);
1767 orinoco_send_assocrespie_wevent(priv);
1768 orinoco_send_bssid_wevent(priv);
1769
1770 orinoco_unlock(priv, &flags);
1771}
1117 1772
1118static inline void orinoco_clear_scan_results(struct orinoco_private *priv, 1773static inline void orinoco_clear_scan_results(struct orinoco_private *priv,
1119 unsigned long scan_age) 1774 unsigned long scan_age)
1120{ 1775{
1121 bss_element *bss; 1776 if (priv->has_ext_scan) {
1122 bss_element *tmp_bss; 1777 struct xbss_element *bss;
1123 1778 struct xbss_element *tmp_bss;
1124 /* Blow away current list of scan results */ 1779
1125 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) { 1780 /* Blow away current list of scan results */
1126 if (!scan_age || 1781 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
1127 time_after(jiffies, bss->last_scanned + scan_age)) { 1782 if (!scan_age ||
1128 list_move_tail(&bss->list, &priv->bss_free_list); 1783 time_after(jiffies, bss->last_scanned + scan_age)) {
1129 /* Don't blow away ->list, just BSS data */ 1784 list_move_tail(&bss->list,
1130 memset(bss, 0, sizeof(bss->bss)); 1785 &priv->bss_free_list);
1131 bss->last_scanned = 0; 1786 /* Don't blow away ->list, just BSS data */
1787 memset(&bss->bss, 0, sizeof(bss->bss));
1788 bss->last_scanned = 0;
1789 }
1790 }
1791 } else {
1792 struct bss_element *bss;
1793 struct bss_element *tmp_bss;
1794
1795 /* Blow away current list of scan results */
1796 list_for_each_entry_safe(bss, tmp_bss, &priv->bss_list, list) {
1797 if (!scan_age ||
1798 time_after(jiffies, bss->last_scanned + scan_age)) {
1799 list_move_tail(&bss->list,
1800 &priv->bss_free_list);
1801 /* Don't blow away ->list, just BSS data */
1802 memset(&bss->bss, 0, sizeof(bss->bss));
1803 bss->last_scanned = 0;
1804 }
1132 } 1805 }
1133 } 1806 }
1134} 1807}
1135 1808
1809static void orinoco_add_ext_scan_result(struct orinoco_private *priv,
1810 struct agere_ext_scan_info *atom)
1811{
1812 struct xbss_element *bss = NULL;
1813 int found = 0;
1814
1815 /* Try to update an existing bss first */
1816 list_for_each_entry(bss, &priv->bss_list, list) {
1817 if (compare_ether_addr(bss->bss.bssid, atom->bssid))
1818 continue;
1819 /* ESSID lengths */
1820 if (bss->bss.data[1] != atom->data[1])
1821 continue;
1822 if (memcmp(&bss->bss.data[2], &atom->data[2],
1823 atom->data[1]))
1824 continue;
1825 found = 1;
1826 break;
1827 }
1828
1829 /* Grab a bss off the free list */
1830 if (!found && !list_empty(&priv->bss_free_list)) {
1831 bss = list_entry(priv->bss_free_list.next,
1832 struct xbss_element, list);
1833 list_del(priv->bss_free_list.next);
1834
1835 list_add_tail(&bss->list, &priv->bss_list);
1836 }
1837
1838 if (bss) {
1839 /* Always update the BSS to get latest beacon info */
1840 memcpy(&bss->bss, atom, sizeof(bss->bss));
1841 bss->last_scanned = jiffies;
1842 }
1843}
1844
1136static int orinoco_process_scan_results(struct net_device *dev, 1845static int orinoco_process_scan_results(struct net_device *dev,
1137 unsigned char *buf, 1846 unsigned char *buf,
1138 int len) 1847 int len)
@@ -1194,7 +1903,7 @@ static int orinoco_process_scan_results(struct net_device *dev,
1194 /* Read the entries one by one */ 1903 /* Read the entries one by one */
1195 for (; offset + atom_len <= len; offset += atom_len) { 1904 for (; offset + atom_len <= len; offset += atom_len) {
1196 int found = 0; 1905 int found = 0;
1197 bss_element *bss = NULL; 1906 struct bss_element *bss = NULL;
1198 1907
1199 /* Get next atom */ 1908 /* Get next atom */
1200 atom = (union hermes_scan_info *) (buf + offset); 1909 atom = (union hermes_scan_info *) (buf + offset);
@@ -1216,7 +1925,7 @@ static int orinoco_process_scan_results(struct net_device *dev,
1216 /* Grab a bss off the free list */ 1925 /* Grab a bss off the free list */
1217 if (!found && !list_empty(&priv->bss_free_list)) { 1926 if (!found && !list_empty(&priv->bss_free_list)) {
1218 bss = list_entry(priv->bss_free_list.next, 1927 bss = list_entry(priv->bss_free_list.next,
1219 bss_element, list); 1928 struct bss_element, list);
1220 list_del(priv->bss_free_list.next); 1929 list_del(priv->bss_free_list.next);
1221 1930
1222 list_add_tail(&bss->list, &priv->bss_list); 1931 list_add_tail(&bss->list, &priv->bss_list);
@@ -1404,6 +2113,63 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1404 kfree(buf); 2113 kfree(buf);
1405 } 2114 }
1406 break; 2115 break;
2116 case HERMES_INQ_CHANNELINFO:
2117 {
2118 struct agere_ext_scan_info *bss;
2119
2120 if (!priv->scan_inprogress) {
2121 printk(KERN_DEBUG "%s: Got chaninfo without scan, "
2122 "len=%d\n", dev->name, len);
2123 break;
2124 }
2125
2126 /* An empty result indicates that the scan is complete */
2127 if (len == 0) {
2128 union iwreq_data wrqu;
2129
2130 /* Scan is no longer in progress */
2131 priv->scan_inprogress = 0;
2132
2133 wrqu.data.length = 0;
2134 wrqu.data.flags = 0;
2135 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
2136 break;
2137 }
2138
2139 /* Sanity check */
2140 else if (len > sizeof(*bss)) {
2141 printk(KERN_WARNING
2142 "%s: Ext scan results too large (%d bytes). "
2143 "Truncating results to %zd bytes.\n",
2144 dev->name, len, sizeof(*bss));
2145 len = sizeof(*bss);
2146 } else if (len < (offsetof(struct agere_ext_scan_info,
2147 data) + 2)) {
2148 /* Drop this result now so we don't have to
2149 * keep checking later */
2150 printk(KERN_WARNING
2151 "%s: Ext scan results too short (%d bytes)\n",
2152 dev->name, len);
2153 break;
2154 }
2155
2156 bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
2157 if (bss == NULL)
2158 break;
2159
2160 /* Read scan data */
2161 err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len,
2162 infofid, sizeof(info));
2163 if (err) {
2164 kfree(bss);
2165 break;
2166 }
2167
2168 orinoco_add_ext_scan_result(priv, bss);
2169
2170 kfree(bss);
2171 break;
2172 }
1407 case HERMES_INQ_SEC_STAT_AGERE: 2173 case HERMES_INQ_SEC_STAT_AGERE:
1408 /* Security status (Agere specific) */ 2174 /* Security status (Agere specific) */
1409 /* Ignore this frame for now */ 2175 /* Ignore this frame for now */
@@ -1586,7 +2352,7 @@ static int __orinoco_hw_set_wap(struct orinoco_private *priv)
1586} 2352}
1587 2353
1588/* Change the WEP keys and/or the current keys. Can be called 2354/* Change the WEP keys and/or the current keys. Can be called
1589 * either from __orinoco_hw_setup_wep() or directly from 2355 * either from __orinoco_hw_setup_enc() or directly from
1590 * orinoco_ioctl_setiwencode(). In the later case the association 2356 * orinoco_ioctl_setiwencode(). In the later case the association
1591 * with the AP is not broken (if the firmware can handle it), 2357 * with the AP is not broken (if the firmware can handle it),
1592 * which is needed for 802.1x implementations. */ 2358 * which is needed for 802.1x implementations. */
@@ -1646,14 +2412,16 @@ static int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
1646 return 0; 2412 return 0;
1647} 2413}
1648 2414
1649static int __orinoco_hw_setup_wep(struct orinoco_private *priv) 2415static int __orinoco_hw_setup_enc(struct orinoco_private *priv)
1650{ 2416{
1651 hermes_t *hw = &priv->hw; 2417 hermes_t *hw = &priv->hw;
1652 int err = 0; 2418 int err = 0;
1653 int master_wep_flag; 2419 int master_wep_flag;
1654 int auth_flag; 2420 int auth_flag;
2421 int enc_flag;
1655 2422
1656 if (priv->wep_on) 2423 /* Setup WEP keys for WEP and WPA */
2424 if (priv->encode_alg)
1657 __orinoco_hw_setup_wepkeys(priv); 2425 __orinoco_hw_setup_wepkeys(priv);
1658 2426
1659 if (priv->wep_restrict) 2427 if (priv->wep_restrict)
@@ -1661,9 +2429,16 @@ static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
1661 else 2429 else
1662 auth_flag = HERMES_AUTH_OPEN; 2430 auth_flag = HERMES_AUTH_OPEN;
1663 2431
2432 if (priv->wpa_enabled)
2433 enc_flag = 2;
2434 else if (priv->encode_alg == IW_ENCODE_ALG_WEP)
2435 enc_flag = 1;
2436 else
2437 enc_flag = 0;
2438
1664 switch (priv->firmware_type) { 2439 switch (priv->firmware_type) {
1665 case FIRMWARE_TYPE_AGERE: /* Agere style WEP */ 2440 case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
1666 if (priv->wep_on) { 2441 if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
1667 /* Enable the shared-key authentication. */ 2442 /* Enable the shared-key authentication. */
1668 err = hermes_write_wordrec(hw, USER_BAP, 2443 err = hermes_write_wordrec(hw, USER_BAP,
1669 HERMES_RID_CNFAUTHENTICATION_AGERE, 2444 HERMES_RID_CNFAUTHENTICATION_AGERE,
@@ -1671,14 +2446,24 @@ static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
1671 } 2446 }
1672 err = hermes_write_wordrec(hw, USER_BAP, 2447 err = hermes_write_wordrec(hw, USER_BAP,
1673 HERMES_RID_CNFWEPENABLED_AGERE, 2448 HERMES_RID_CNFWEPENABLED_AGERE,
1674 priv->wep_on); 2449 enc_flag);
1675 if (err) 2450 if (err)
1676 return err; 2451 return err;
2452
2453 if (priv->has_wpa) {
2454 /* Set WPA key management */
2455 err = hermes_write_wordrec(hw, USER_BAP,
2456 HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE,
2457 priv->key_mgmt);
2458 if (err)
2459 return err;
2460 }
2461
1677 break; 2462 break;
1678 2463
1679 case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */ 2464 case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
1680 case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */ 2465 case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
1681 if (priv->wep_on) { 2466 if (priv->encode_alg == IW_ENCODE_ALG_WEP) {
1682 if (priv->wep_restrict || 2467 if (priv->wep_restrict ||
1683 (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)) 2468 (priv->firmware_type == FIRMWARE_TYPE_SYMBOL))
1684 master_wep_flag = HERMES_WEP_PRIVACY_INVOKED | 2469 master_wep_flag = HERMES_WEP_PRIVACY_INVOKED |
@@ -1710,6 +2495,84 @@ static int __orinoco_hw_setup_wep(struct orinoco_private *priv)
1710 return 0; 2495 return 0;
1711} 2496}
1712 2497
2498/* key must be 32 bytes, including the tx and rx MIC keys.
2499 * rsc must be 8 bytes
2500 * tsc must be 8 bytes or NULL
2501 */
2502static int __orinoco_hw_set_tkip_key(hermes_t *hw, int key_idx, int set_tx,
2503 u8 *key, u8 *rsc, u8 *tsc)
2504{
2505 struct {
2506 __le16 idx;
2507 u8 rsc[IW_ENCODE_SEQ_MAX_SIZE];
2508 u8 key[TKIP_KEYLEN];
2509 u8 tx_mic[MIC_KEYLEN];
2510 u8 rx_mic[MIC_KEYLEN];
2511 u8 tsc[IW_ENCODE_SEQ_MAX_SIZE];
2512 } __attribute__ ((packed)) buf;
2513 int ret;
2514 int err;
2515 int k;
2516 u16 xmitting;
2517
2518 key_idx &= 0x3;
2519
2520 if (set_tx)
2521 key_idx |= 0x8000;
2522
2523 buf.idx = cpu_to_le16(key_idx);
2524 memcpy(buf.key, key,
2525 sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
2526
2527 if (rsc == NULL)
2528 memset(buf.rsc, 0, sizeof(buf.rsc));
2529 else
2530 memcpy(buf.rsc, rsc, sizeof(buf.rsc));
2531
2532 if (tsc == NULL) {
2533 memset(buf.tsc, 0, sizeof(buf.tsc));
2534 buf.tsc[4] = 0x10;
2535 } else {
2536 memcpy(buf.tsc, tsc, sizeof(buf.tsc));
2537 }
2538
2539 /* Wait upto 100ms for tx queue to empty */
2540 k = 100;
2541 do {
2542 k--;
2543 udelay(1000);
2544 ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY,
2545 &xmitting);
2546 if (ret)
2547 break;
2548 } while ((k > 0) && xmitting);
2549
2550 if (k == 0)
2551 ret = -ETIMEDOUT;
2552
2553 err = HERMES_WRITE_RECORD(hw, USER_BAP,
2554 HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE,
2555 &buf);
2556
2557 return ret ? ret : err;
2558}
2559
2560static int orinoco_clear_tkip_key(struct orinoco_private *priv,
2561 int key_idx)
2562{
2563 hermes_t *hw = &priv->hw;
2564 int err;
2565
2566 memset(&priv->tkip_key[key_idx], 0, sizeof(priv->tkip_key[key_idx]));
2567 err = hermes_write_wordrec(hw, USER_BAP,
2568 HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE,
2569 key_idx);
2570 if (err)
2571 printk(KERN_WARNING "%s: Error %d clearing TKIP key %d\n",
2572 priv->ndev->name, err, key_idx);
2573 return err;
2574}
2575
1713static int __orinoco_program_rids(struct net_device *dev) 2576static int __orinoco_program_rids(struct net_device *dev)
1714{ 2577{
1715 struct orinoco_private *priv = netdev_priv(dev); 2578 struct orinoco_private *priv = netdev_priv(dev);
@@ -1906,10 +2769,10 @@ static int __orinoco_program_rids(struct net_device *dev)
1906 } 2769 }
1907 2770
1908 /* Set up encryption */ 2771 /* Set up encryption */
1909 if (priv->has_wep) { 2772 if (priv->has_wep || priv->has_wpa) {
1910 err = __orinoco_hw_setup_wep(priv); 2773 err = __orinoco_hw_setup_enc(priv);
1911 if (err) { 2774 if (err) {
1912 printk(KERN_ERR "%s: Error %d activating WEP\n", 2775 printk(KERN_ERR "%s: Error %d activating encryption\n",
1913 dev->name, err); 2776 dev->name, err);
1914 return err; 2777 return err;
1915 } 2778 }
@@ -2047,6 +2910,12 @@ static void orinoco_reset(struct work_struct *work)
2047 } 2910 }
2048 } 2911 }
2049 2912
2913 if (priv->do_fw_download) {
2914 err = orinoco_download(priv);
2915 if (err)
2916 priv->do_fw_download = 0;
2917 }
2918
2050 err = orinoco_reinit_firmware(dev); 2919 err = orinoco_reinit_firmware(dev);
2051 if (err) { 2920 if (err) {
2052 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n", 2921 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
@@ -2258,6 +3127,10 @@ static int determine_firmware(struct net_device *dev)
2258 priv->has_ibss = 1; 3127 priv->has_ibss = 1;
2259 priv->has_wep = 0; 3128 priv->has_wep = 0;
2260 priv->has_big_wep = 0; 3129 priv->has_big_wep = 0;
3130 priv->has_alt_txcntl = 0;
3131 priv->has_ext_scan = 0;
3132 priv->has_wpa = 0;
3133 priv->do_fw_download = 0;
2261 3134
2262 /* Determine capabilities from the firmware version */ 3135 /* Determine capabilities from the firmware version */
2263 switch (priv->firmware_type) { 3136 switch (priv->firmware_type) {
@@ -2277,8 +3150,11 @@ static int determine_firmware(struct net_device *dev)
2277 priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */ 3150 priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
2278 priv->ibss_port = 1; 3151 priv->ibss_port = 1;
2279 priv->has_hostscan = (firmver >= 0x8000a); 3152 priv->has_hostscan = (firmver >= 0x8000a);
3153 priv->do_fw_download = 1;
2280 priv->broken_monitor = (firmver >= 0x80000); 3154 priv->broken_monitor = (firmver >= 0x80000);
2281 3155 priv->has_alt_txcntl = (firmver >= 0x90000); /* All 9.x ? */
3156 priv->has_ext_scan = (firmver >= 0x90000); /* All 9.x ? */
3157 priv->has_wpa = (firmver >= 0x9002a);
2282 /* Tested with Agere firmware : 3158 /* Tested with Agere firmware :
2283 * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II 3159 * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
2284 * Tested CableTron firmware : 4.32 => Anton */ 3160 * Tested CableTron firmware : 4.32 => Anton */
@@ -2321,6 +3197,21 @@ static int determine_firmware(struct net_device *dev)
2321 firmver >= 0x31000; 3197 firmver >= 0x31000;
2322 priv->has_preamble = (firmver >= 0x20000); 3198 priv->has_preamble = (firmver >= 0x20000);
2323 priv->ibss_port = 4; 3199 priv->ibss_port = 4;
3200
3201 /* Symbol firmware is found on various cards, but
3202 * there has been no attempt to check firmware
3203 * download on non-spectrum_cs based cards.
3204 *
3205 * Given that the Agere firmware download works
3206 * differently, we should avoid doing a firmware
3207 * download with the Symbol algorithm on non-spectrum
3208 * cards.
3209 *
3210 * For now we can identify a spectrum_cs based card
3211 * because it has a firmware reset function.
3212 */
3213 priv->do_fw_download = (priv->stop_fw != NULL);
3214
2324 priv->broken_disableport = (firmver == 0x25013) || 3215 priv->broken_disableport = (firmver == 0x25013) ||
2325 (firmver >= 0x30000 && firmver <= 0x31000); 3216 (firmver >= 0x30000 && firmver <= 0x31000);
2326 priv->has_hostscan = (firmver >= 0x31001) || 3217 priv->has_hostscan = (firmver >= 0x31001) ||
@@ -2391,6 +3282,20 @@ static int orinoco_init(struct net_device *dev)
2391 goto out; 3282 goto out;
2392 } 3283 }
2393 3284
3285 if (priv->do_fw_download) {
3286 err = orinoco_download(priv);
3287 if (err)
3288 priv->do_fw_download = 0;
3289
3290 /* Check firmware version again */
3291 err = determine_firmware(dev);
3292 if (err != 0) {
3293 printk(KERN_ERR "%s: Incompatible firmware, aborting\n",
3294 dev->name);
3295 goto out;
3296 }
3297 }
3298
2394 if (priv->has_port3) 3299 if (priv->has_port3)
2395 printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n", dev->name); 3300 printk(KERN_DEBUG "%s: Ad-hoc demo mode supported\n", dev->name);
2396 if (priv->has_ibss) 3301 if (priv->has_ibss)
@@ -2403,6 +3308,20 @@ static int orinoco_init(struct net_device *dev)
2403 else 3308 else
2404 printk("40-bit key\n"); 3309 printk("40-bit key\n");
2405 } 3310 }
3311 if (priv->has_wpa) {
3312 printk(KERN_DEBUG "%s: WPA-PSK supported\n", dev->name);
3313 if (orinoco_mic_init(priv)) {
3314 printk(KERN_ERR "%s: Failed to setup MIC crypto "
3315 "algorithm. Disabling WPA support\n", dev->name);
3316 priv->has_wpa = 0;
3317 }
3318 }
3319
3320 /* Now we have the firmware capabilities, allocate appropiate
3321 * sized scan buffers */
3322 if (orinoco_bss_data_allocate(priv))
3323 goto out;
3324 orinoco_bss_data_init(priv);
2406 3325
2407 /* Get the MAC address */ 3326 /* Get the MAC address */
2408 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, 3327 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
@@ -2518,8 +3437,13 @@ static int orinoco_init(struct net_device *dev)
2518 priv->channel = 0; /* use firmware default */ 3437 priv->channel = 0; /* use firmware default */
2519 3438
2520 priv->promiscuous = 0; 3439 priv->promiscuous = 0;
2521 priv->wep_on = 0; 3440 priv->encode_alg = IW_ENCODE_ALG_NONE;
2522 priv->tx_key = 0; 3441 priv->tx_key = 0;
3442 priv->wpa_enabled = 0;
3443 priv->tkip_cm_active = 0;
3444 priv->key_mgmt = 0;
3445 priv->wpa_ie_len = 0;
3446 priv->wpa_ie = NULL;
2523 3447
2524 /* Make the hardware available, as long as it hasn't been 3448 /* Make the hardware available, as long as it hasn't been
2525 * removed elsewhere (e.g. by PCMCIA hot unplug) */ 3449 * removed elsewhere (e.g. by PCMCIA hot unplug) */
@@ -2533,8 +3457,11 @@ static int orinoco_init(struct net_device *dev)
2533 return err; 3457 return err;
2534} 3458}
2535 3459
2536struct net_device *alloc_orinocodev(int sizeof_card, 3460struct net_device
2537 int (*hard_reset)(struct orinoco_private *)) 3461*alloc_orinocodev(int sizeof_card,
3462 struct device *device,
3463 int (*hard_reset)(struct orinoco_private *),
3464 int (*stop_fw)(struct orinoco_private *, int))
2538{ 3465{
2539 struct net_device *dev; 3466 struct net_device *dev;
2540 struct orinoco_private *priv; 3467 struct orinoco_private *priv;
@@ -2549,10 +3476,7 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2549 + sizeof(struct orinoco_private)); 3476 + sizeof(struct orinoco_private));
2550 else 3477 else
2551 priv->card = NULL; 3478 priv->card = NULL;
2552 3479 priv->dev = device;
2553 if (orinoco_bss_data_allocate(priv))
2554 goto err_out_free;
2555 orinoco_bss_data_init(priv);
2556 3480
2557 /* Setup / override net_device fields */ 3481 /* Setup / override net_device fields */
2558 dev->init = orinoco_init; 3482 dev->init = orinoco_init;
@@ -2570,10 +3494,14 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2570 dev->set_multicast_list = orinoco_set_multicast_list; 3494 dev->set_multicast_list = orinoco_set_multicast_list;
2571 /* we use the default eth_mac_addr for setting the MAC addr */ 3495 /* we use the default eth_mac_addr for setting the MAC addr */
2572 3496
3497 /* Reserve space in skb for the SNAP header */
3498 dev->hard_header_len += ENCAPS_OVERHEAD;
3499
2573 /* Set up default callbacks */ 3500 /* Set up default callbacks */
2574 dev->open = orinoco_open; 3501 dev->open = orinoco_open;
2575 dev->stop = orinoco_stop; 3502 dev->stop = orinoco_stop;
2576 priv->hard_reset = hard_reset; 3503 priv->hard_reset = hard_reset;
3504 priv->stop_fw = stop_fw;
2577 3505
2578 spin_lock_init(&priv->lock); 3506 spin_lock_init(&priv->lock);
2579 priv->open = 0; 3507 priv->open = 0;
@@ -2584,20 +3512,27 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2584 INIT_WORK(&priv->join_work, orinoco_join_ap); 3512 INIT_WORK(&priv->join_work, orinoco_join_ap);
2585 INIT_WORK(&priv->wevent_work, orinoco_send_wevents); 3513 INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
2586 3514
3515 INIT_LIST_HEAD(&priv->rx_list);
3516 tasklet_init(&priv->rx_tasklet, orinoco_rx_isr_tasklet,
3517 (unsigned long) dev);
3518
2587 netif_carrier_off(dev); 3519 netif_carrier_off(dev);
2588 priv->last_linkstatus = 0xffff; 3520 priv->last_linkstatus = 0xffff;
2589 3521
2590 return dev; 3522 return dev;
2591
2592err_out_free:
2593 free_netdev(dev);
2594 return NULL;
2595} 3523}
2596 3524
2597void free_orinocodev(struct net_device *dev) 3525void free_orinocodev(struct net_device *dev)
2598{ 3526{
2599 struct orinoco_private *priv = netdev_priv(dev); 3527 struct orinoco_private *priv = netdev_priv(dev);
2600 3528
3529 /* No need to empty priv->rx_list: if the tasklet is scheduled
3530 * when we call tasklet_kill it will run one final time,
3531 * emptying the list */
3532 tasklet_kill(&priv->rx_tasklet);
3533 priv->wpa_ie_len = 0;
3534 kfree(priv->wpa_ie);
3535 orinoco_mic_free(priv);
2601 orinoco_bss_data_free(priv); 3536 orinoco_bss_data_free(priv);
2602 free_netdev(dev); 3537 free_netdev(dev);
2603} 3538}
@@ -2909,7 +3844,7 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
2909 memset(range, 0, sizeof(struct iw_range)); 3844 memset(range, 0, sizeof(struct iw_range));
2910 3845
2911 range->we_version_compiled = WIRELESS_EXT; 3846 range->we_version_compiled = WIRELESS_EXT;
2912 range->we_version_source = 14; 3847 range->we_version_source = 22;
2913 3848
2914 /* Set available channels/frequencies */ 3849 /* Set available channels/frequencies */
2915 range->num_channels = NUM_CHANNELS; 3850 range->num_channels = NUM_CHANNELS;
@@ -2939,6 +3874,9 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
2939 } 3874 }
2940 } 3875 }
2941 3876
3877 if (priv->has_wpa)
3878 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_CIPHER_TKIP;
3879
2942 if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){ 3880 if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){
2943 /* Quality stats meaningless in ad-hoc mode */ 3881 /* Quality stats meaningless in ad-hoc mode */
2944 } else { 3882 } else {
@@ -2986,6 +3924,11 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
2986 range->min_r_time = 0; 3924 range->min_r_time = 0;
2987 range->max_r_time = 65535 * 1000; /* ??? */ 3925 range->max_r_time = 65535 * 1000; /* ??? */
2988 3926
3927 if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
3928 range->scan_capa = IW_SCAN_CAPA_ESSID;
3929 else
3930 range->scan_capa = IW_SCAN_CAPA_NONE;
3931
2989 /* Event capability (kernel) */ 3932 /* Event capability (kernel) */
2990 IW_EVENT_CAPA_SET_KERNEL(range->event_capa); 3933 IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
2991 /* Event capability (driver) */ 3934 /* Event capability (driver) */
@@ -3005,7 +3948,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3005 struct orinoco_private *priv = netdev_priv(dev); 3948 struct orinoco_private *priv = netdev_priv(dev);
3006 int index = (erq->flags & IW_ENCODE_INDEX) - 1; 3949 int index = (erq->flags & IW_ENCODE_INDEX) - 1;
3007 int setindex = priv->tx_key; 3950 int setindex = priv->tx_key;
3008 int enable = priv->wep_on; 3951 int encode_alg = priv->encode_alg;
3009 int restricted = priv->wep_restrict; 3952 int restricted = priv->wep_restrict;
3010 u16 xlen = 0; 3953 u16 xlen = 0;
3011 int err = -EINPROGRESS; /* Call commit handler */ 3954 int err = -EINPROGRESS; /* Call commit handler */
@@ -3026,6 +3969,10 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3026 if (orinoco_lock(priv, &flags) != 0) 3969 if (orinoco_lock(priv, &flags) != 0)
3027 return -EBUSY; 3970 return -EBUSY;
3028 3971
3972 /* Clear any TKIP key we have */
3973 if ((priv->has_wpa) && (priv->encode_alg == IW_ENCODE_ALG_TKIP))
3974 (void) orinoco_clear_tkip_key(priv, setindex);
3975
3029 if (erq->length > 0) { 3976 if (erq->length > 0) {
3030 if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) 3977 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
3031 index = priv->tx_key; 3978 index = priv->tx_key;
@@ -3039,9 +3986,9 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3039 xlen = 0; 3986 xlen = 0;
3040 3987
3041 /* Switch on WEP if off */ 3988 /* Switch on WEP if off */
3042 if ((!enable) && (xlen > 0)) { 3989 if ((encode_alg != IW_ENCODE_ALG_WEP) && (xlen > 0)) {
3043 setindex = index; 3990 setindex = index;
3044 enable = 1; 3991 encode_alg = IW_ENCODE_ALG_WEP;
3045 } 3992 }
3046 } else { 3993 } else {
3047 /* Important note : if the user do "iwconfig eth0 enc off", 3994 /* Important note : if the user do "iwconfig eth0 enc off",
@@ -3063,7 +4010,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3063 } 4010 }
3064 4011
3065 if (erq->flags & IW_ENCODE_DISABLED) 4012 if (erq->flags & IW_ENCODE_DISABLED)
3066 enable = 0; 4013 encode_alg = IW_ENCODE_ALG_NONE;
3067 if (erq->flags & IW_ENCODE_OPEN) 4014 if (erq->flags & IW_ENCODE_OPEN)
3068 restricted = 0; 4015 restricted = 0;
3069 if (erq->flags & IW_ENCODE_RESTRICTED) 4016 if (erq->flags & IW_ENCODE_RESTRICTED)
@@ -3078,14 +4025,15 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
3078 priv->tx_key = setindex; 4025 priv->tx_key = setindex;
3079 4026
3080 /* Try fast key change if connected and only keys are changed */ 4027 /* Try fast key change if connected and only keys are changed */
3081 if (priv->wep_on && enable && (priv->wep_restrict == restricted) && 4028 if ((priv->encode_alg == encode_alg) &&
4029 (priv->wep_restrict == restricted) &&
3082 netif_carrier_ok(dev)) { 4030 netif_carrier_ok(dev)) {
3083 err = __orinoco_hw_setup_wepkeys(priv); 4031 err = __orinoco_hw_setup_wepkeys(priv);
3084 /* No need to commit if successful */ 4032 /* No need to commit if successful */
3085 goto out; 4033 goto out;
3086 } 4034 }
3087 4035
3088 priv->wep_on = enable; 4036 priv->encode_alg = encode_alg;
3089 priv->wep_restrict = restricted; 4037 priv->wep_restrict = restricted;
3090 4038
3091 out: 4039 out:
@@ -3114,7 +4062,7 @@ static int orinoco_ioctl_getiwencode(struct net_device *dev,
3114 index = priv->tx_key; 4062 index = priv->tx_key;
3115 4063
3116 erq->flags = 0; 4064 erq->flags = 0;
3117 if (! priv->wep_on) 4065 if (!priv->encode_alg)
3118 erq->flags |= IW_ENCODE_DISABLED; 4066 erq->flags |= IW_ENCODE_DISABLED;
3119 erq->flags |= index + 1; 4067 erq->flags |= index + 1;
3120 4068
@@ -3689,6 +4637,399 @@ static int orinoco_ioctl_getpower(struct net_device *dev,
3689 return err; 4637 return err;
3690} 4638}
3691 4639
4640static int orinoco_ioctl_set_encodeext(struct net_device *dev,
4641 struct iw_request_info *info,
4642 union iwreq_data *wrqu,
4643 char *extra)
4644{
4645 struct orinoco_private *priv = netdev_priv(dev);
4646 struct iw_point *encoding = &wrqu->encoding;
4647 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
4648 int idx, alg = ext->alg, set_key = 1;
4649 unsigned long flags;
4650 int err = -EINVAL;
4651 u16 key_len;
4652
4653 if (orinoco_lock(priv, &flags) != 0)
4654 return -EBUSY;
4655
4656 /* Determine and validate the key index */
4657 idx = encoding->flags & IW_ENCODE_INDEX;
4658 if (idx) {
4659 if ((idx < 1) || (idx > WEP_KEYS))
4660 goto out;
4661 idx--;
4662 } else
4663 idx = priv->tx_key;
4664
4665 if (encoding->flags & IW_ENCODE_DISABLED)
4666 alg = IW_ENCODE_ALG_NONE;
4667
4668 if (priv->has_wpa && (alg != IW_ENCODE_ALG_TKIP)) {
4669 /* Clear any TKIP TX key we had */
4670 (void) orinoco_clear_tkip_key(priv, priv->tx_key);
4671 }
4672
4673 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
4674 priv->tx_key = idx;
4675 set_key = ((alg == IW_ENCODE_ALG_TKIP) ||
4676 (ext->key_len > 0)) ? 1 : 0;
4677 }
4678
4679 if (set_key) {
4680 /* Set the requested key first */
4681 switch (alg) {
4682 case IW_ENCODE_ALG_NONE:
4683 priv->encode_alg = alg;
4684 priv->keys[idx].len = 0;
4685 break;
4686
4687 case IW_ENCODE_ALG_WEP:
4688 if (ext->key_len > SMALL_KEY_SIZE)
4689 key_len = LARGE_KEY_SIZE;
4690 else if (ext->key_len > 0)
4691 key_len = SMALL_KEY_SIZE;
4692 else
4693 goto out;
4694
4695 priv->encode_alg = alg;
4696 priv->keys[idx].len = cpu_to_le16(key_len);
4697
4698 key_len = min(ext->key_len, key_len);
4699
4700 memset(priv->keys[idx].data, 0, ORINOCO_MAX_KEY_SIZE);
4701 memcpy(priv->keys[idx].data, ext->key, key_len);
4702 break;
4703
4704 case IW_ENCODE_ALG_TKIP:
4705 {
4706 hermes_t *hw = &priv->hw;
4707 u8 *tkip_iv = NULL;
4708
4709 if (!priv->has_wpa ||
4710 (ext->key_len > sizeof(priv->tkip_key[0])))
4711 goto out;
4712
4713 priv->encode_alg = alg;
4714 memset(&priv->tkip_key[idx], 0,
4715 sizeof(priv->tkip_key[idx]));
4716 memcpy(&priv->tkip_key[idx], ext->key, ext->key_len);
4717
4718 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
4719 tkip_iv = &ext->rx_seq[0];
4720
4721 err = __orinoco_hw_set_tkip_key(hw, idx,
4722 ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
4723 (u8 *) &priv->tkip_key[idx],
4724 tkip_iv, NULL);
4725 if (err)
4726 printk(KERN_ERR "%s: Error %d setting TKIP key"
4727 "\n", dev->name, err);
4728
4729 goto out;
4730 }
4731 default:
4732 goto out;
4733 }
4734 }
4735 err = -EINPROGRESS;
4736 out:
4737 orinoco_unlock(priv, &flags);
4738
4739 return err;
4740}
4741
4742static int orinoco_ioctl_get_encodeext(struct net_device *dev,
4743 struct iw_request_info *info,
4744 union iwreq_data *wrqu,
4745 char *extra)
4746{
4747 struct orinoco_private *priv = netdev_priv(dev);
4748 struct iw_point *encoding = &wrqu->encoding;
4749 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
4750 int idx, max_key_len;
4751 unsigned long flags;
4752 int err;
4753
4754 if (orinoco_lock(priv, &flags) != 0)
4755 return -EBUSY;
4756
4757 err = -EINVAL;
4758 max_key_len = encoding->length - sizeof(*ext);
4759 if (max_key_len < 0)
4760 goto out;
4761
4762 idx = encoding->flags & IW_ENCODE_INDEX;
4763 if (idx) {
4764 if ((idx < 1) || (idx > WEP_KEYS))
4765 goto out;
4766 idx--;
4767 } else
4768 idx = priv->tx_key;
4769
4770 encoding->flags = idx + 1;
4771 memset(ext, 0, sizeof(*ext));
4772
4773 ext->alg = priv->encode_alg;
4774 switch (priv->encode_alg) {
4775 case IW_ENCODE_ALG_NONE:
4776 ext->key_len = 0;
4777 encoding->flags |= IW_ENCODE_DISABLED;
4778 break;
4779 case IW_ENCODE_ALG_WEP:
4780 ext->key_len = min_t(u16, le16_to_cpu(priv->keys[idx].len),
4781 max_key_len);
4782 memcpy(ext->key, priv->keys[idx].data, ext->key_len);
4783 encoding->flags |= IW_ENCODE_ENABLED;
4784 break;
4785 case IW_ENCODE_ALG_TKIP:
4786 ext->key_len = min_t(u16, sizeof(struct orinoco_tkip_key),
4787 max_key_len);
4788 memcpy(ext->key, &priv->tkip_key[idx], ext->key_len);
4789 encoding->flags |= IW_ENCODE_ENABLED;
4790 break;
4791 }
4792
4793 err = 0;
4794 out:
4795 orinoco_unlock(priv, &flags);
4796
4797 return err;
4798}
4799
4800static int orinoco_ioctl_set_auth(struct net_device *dev,
4801 struct iw_request_info *info,
4802 union iwreq_data *wrqu, char *extra)
4803{
4804 struct orinoco_private *priv = netdev_priv(dev);
4805 hermes_t *hw = &priv->hw;
4806 struct iw_param *param = &wrqu->param;
4807 unsigned long flags;
4808 int ret = -EINPROGRESS;
4809
4810 if (orinoco_lock(priv, &flags) != 0)
4811 return -EBUSY;
4812
4813 switch (param->flags & IW_AUTH_INDEX) {
4814 case IW_AUTH_WPA_VERSION:
4815 case IW_AUTH_CIPHER_PAIRWISE:
4816 case IW_AUTH_CIPHER_GROUP:
4817 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
4818 case IW_AUTH_PRIVACY_INVOKED:
4819 case IW_AUTH_DROP_UNENCRYPTED:
4820 /*
4821 * orinoco does not use these parameters
4822 */
4823 break;
4824
4825 case IW_AUTH_KEY_MGMT:
4826 /* wl_lkm implies value 2 == PSK for Hermes I
4827 * which ties in with WEXT
4828 * no other hints tho :(
4829 */
4830 priv->key_mgmt = param->value;
4831 break;
4832
4833 case IW_AUTH_TKIP_COUNTERMEASURES:
4834 /* When countermeasures are enabled, shut down the
4835 * card; when disabled, re-enable the card. This must
4836 * take effect immediately.
4837 *
4838 * TODO: Make sure that the EAPOL message is getting
4839 * out before card disabled
4840 */
4841 if (param->value) {
4842 priv->tkip_cm_active = 1;
4843 ret = hermes_enable_port(hw, 0);
4844 } else {
4845 priv->tkip_cm_active = 0;
4846 ret = hermes_disable_port(hw, 0);
4847 }
4848 break;
4849
4850 case IW_AUTH_80211_AUTH_ALG:
4851 if (param->value & IW_AUTH_ALG_SHARED_KEY)
4852 priv->wep_restrict = 1;
4853 else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
4854 priv->wep_restrict = 0;
4855 else
4856 ret = -EINVAL;
4857 break;
4858
4859 case IW_AUTH_WPA_ENABLED:
4860 if (priv->has_wpa) {
4861 priv->wpa_enabled = param->value ? 1 : 0;
4862 } else {
4863 if (param->value)
4864 ret = -EOPNOTSUPP;
4865 /* else silently accept disable of WPA */
4866 priv->wpa_enabled = 0;
4867 }
4868 break;
4869
4870 default:
4871 ret = -EOPNOTSUPP;
4872 }
4873
4874 orinoco_unlock(priv, &flags);
4875 return ret;
4876}
4877
4878static int orinoco_ioctl_get_auth(struct net_device *dev,
4879 struct iw_request_info *info,
4880 union iwreq_data *wrqu, char *extra)
4881{
4882 struct orinoco_private *priv = netdev_priv(dev);
4883 struct iw_param *param = &wrqu->param;
4884 unsigned long flags;
4885 int ret = 0;
4886
4887 if (orinoco_lock(priv, &flags) != 0)
4888 return -EBUSY;
4889
4890 switch (param->flags & IW_AUTH_INDEX) {
4891 case IW_AUTH_KEY_MGMT:
4892 param->value = priv->key_mgmt;
4893 break;
4894
4895 case IW_AUTH_TKIP_COUNTERMEASURES:
4896 param->value = priv->tkip_cm_active;
4897 break;
4898
4899 case IW_AUTH_80211_AUTH_ALG:
4900 if (priv->wep_restrict)
4901 param->value = IW_AUTH_ALG_SHARED_KEY;
4902 else
4903 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
4904 break;
4905
4906 case IW_AUTH_WPA_ENABLED:
4907 param->value = priv->wpa_enabled;
4908 break;
4909
4910 default:
4911 ret = -EOPNOTSUPP;
4912 }
4913
4914 orinoco_unlock(priv, &flags);
4915 return ret;
4916}
4917
4918static int orinoco_ioctl_set_genie(struct net_device *dev,
4919 struct iw_request_info *info,
4920 union iwreq_data *wrqu, char *extra)
4921{
4922 struct orinoco_private *priv = netdev_priv(dev);
4923 u8 *buf;
4924 unsigned long flags;
4925 int err = 0;
4926
4927 if ((wrqu->data.length > MAX_WPA_IE_LEN) ||
4928 (wrqu->data.length && (extra == NULL)))
4929 return -EINVAL;
4930
4931 if (orinoco_lock(priv, &flags) != 0)
4932 return -EBUSY;
4933
4934 if (wrqu->data.length) {
4935 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
4936 if (buf == NULL) {
4937 err = -ENOMEM;
4938 goto out;
4939 }
4940
4941 memcpy(buf, extra, wrqu->data.length);
4942 kfree(priv->wpa_ie);
4943 priv->wpa_ie = buf;
4944 priv->wpa_ie_len = wrqu->data.length;
4945 } else {
4946 kfree(priv->wpa_ie);
4947 priv->wpa_ie = NULL;
4948 priv->wpa_ie_len = 0;
4949 }
4950
4951 if (priv->wpa_ie) {
4952 /* Looks like wl_lkm wants to check the auth alg, and
4953 * somehow pass it to the firmware.
4954 * Instead it just calls the key mgmt rid
4955 * - we do this in set auth.
4956 */
4957 }
4958
4959out:
4960 orinoco_unlock(priv, &flags);
4961 return err;
4962}
4963
4964static int orinoco_ioctl_get_genie(struct net_device *dev,
4965 struct iw_request_info *info,
4966 union iwreq_data *wrqu, char *extra)
4967{
4968 struct orinoco_private *priv = netdev_priv(dev);
4969 unsigned long flags;
4970 int err = 0;
4971
4972 if (orinoco_lock(priv, &flags) != 0)
4973 return -EBUSY;
4974
4975 if ((priv->wpa_ie_len == 0) || (priv->wpa_ie == NULL)) {
4976 wrqu->data.length = 0;
4977 goto out;
4978 }
4979
4980 if (wrqu->data.length < priv->wpa_ie_len) {
4981 err = -E2BIG;
4982 goto out;
4983 }
4984
4985 wrqu->data.length = priv->wpa_ie_len;
4986 memcpy(extra, priv->wpa_ie, priv->wpa_ie_len);
4987
4988out:
4989 orinoco_unlock(priv, &flags);
4990 return err;
4991}
4992
4993static int orinoco_ioctl_set_mlme(struct net_device *dev,
4994 struct iw_request_info *info,
4995 union iwreq_data *wrqu, char *extra)
4996{
4997 struct orinoco_private *priv = netdev_priv(dev);
4998 hermes_t *hw = &priv->hw;
4999 struct iw_mlme *mlme = (struct iw_mlme *)extra;
5000 unsigned long flags;
5001 int ret = 0;
5002
5003 if (orinoco_lock(priv, &flags) != 0)
5004 return -EBUSY;
5005
5006 switch (mlme->cmd) {
5007 case IW_MLME_DEAUTH:
5008 /* silently ignore */
5009 break;
5010
5011 case IW_MLME_DISASSOC:
5012 {
5013 struct {
5014 u8 addr[ETH_ALEN];
5015 __le16 reason_code;
5016 } __attribute__ ((packed)) buf;
5017
5018 memcpy(buf.addr, mlme->addr.sa_data, ETH_ALEN);
5019 buf.reason_code = cpu_to_le16(mlme->reason_code);
5020 ret = HERMES_WRITE_RECORD(hw, USER_BAP,
5021 HERMES_RID_CNFDISASSOCIATE,
5022 &buf);
5023 break;
5024 }
5025 default:
5026 ret = -EOPNOTSUPP;
5027 }
5028
5029 orinoco_unlock(priv, &flags);
5030 return ret;
5031}
5032
3692static int orinoco_ioctl_getretry(struct net_device *dev, 5033static int orinoco_ioctl_getretry(struct net_device *dev,
3693 struct iw_request_info *info, 5034 struct iw_request_info *info,
3694 struct iw_param *rrq, 5035 struct iw_param *rrq,
@@ -3947,14 +5288,15 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
3947 return err; 5288 return err;
3948} 5289}
3949 5290
3950/* Trigger a scan (look for other cells in the vicinity */ 5291/* Trigger a scan (look for other cells in the vicinity) */
3951static int orinoco_ioctl_setscan(struct net_device *dev, 5292static int orinoco_ioctl_setscan(struct net_device *dev,
3952 struct iw_request_info *info, 5293 struct iw_request_info *info,
3953 struct iw_param *srq, 5294 struct iw_point *srq,
3954 char *extra) 5295 char *extra)
3955{ 5296{
3956 struct orinoco_private *priv = netdev_priv(dev); 5297 struct orinoco_private *priv = netdev_priv(dev);
3957 hermes_t *hw = &priv->hw; 5298 hermes_t *hw = &priv->hw;
5299 struct iw_scan_req *si = (struct iw_scan_req *) extra;
3958 int err = 0; 5300 int err = 0;
3959 unsigned long flags; 5301 unsigned long flags;
3960 5302
@@ -3986,7 +5328,6 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
3986 * we access scan variables in priv is critical. 5328 * we access scan variables in priv is critical.
3987 * o scan_inprogress : not touched by irq handler 5329 * o scan_inprogress : not touched by irq handler
3988 * o scan_mode : not touched by irq handler 5330 * o scan_mode : not touched by irq handler
3989 * o scan_len : synchronised with scan_result
3990 * Before modifying anything on those variables, please think hard ! 5331 * Before modifying anything on those variables, please think hard !
3991 * Jean II */ 5332 * Jean II */
3992 5333
@@ -4016,13 +5357,43 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
4016 } 5357 }
4017 break; 5358 break;
4018 case FIRMWARE_TYPE_AGERE: 5359 case FIRMWARE_TYPE_AGERE:
4019 err = hermes_write_wordrec(hw, USER_BAP, 5360 if (priv->scan_mode & IW_SCAN_THIS_ESSID) {
5361 struct hermes_idstring idbuf;
5362 size_t len = min(sizeof(idbuf.val),
5363 (size_t) si->essid_len);
5364 idbuf.len = cpu_to_le16(len);
5365 memcpy(idbuf.val, si->essid, len);
5366
5367 err = hermes_write_ltv(hw, USER_BAP,
5368 HERMES_RID_CNFSCANSSID_AGERE,
5369 HERMES_BYTES_TO_RECLEN(len + 2),
5370 &idbuf);
5371 } else
5372 err = hermes_write_wordrec(hw, USER_BAP,
4020 HERMES_RID_CNFSCANSSID_AGERE, 5373 HERMES_RID_CNFSCANSSID_AGERE,
4021 0); /* Any ESSID */ 5374 0); /* Any ESSID */
4022 if (err) 5375 if (err)
4023 break; 5376 break;
4024 5377
4025 err = hermes_inquire(hw, HERMES_INQ_SCAN); 5378 if (priv->has_ext_scan) {
5379 /* Clear scan results at the start of
5380 * an extended scan */
5381 orinoco_clear_scan_results(priv,
5382 msecs_to_jiffies(15000));
5383
5384 /* TODO: Is this available on older firmware?
5385 * Can we use it to scan specific channels
5386 * for IW_SCAN_THIS_FREQ? */
5387 err = hermes_write_wordrec(hw, USER_BAP,
5388 HERMES_RID_CNFSCANCHANNELS2GHZ,
5389 0x7FFF);
5390 if (err)
5391 goto out;
5392
5393 err = hermes_inquire(hw,
5394 HERMES_INQ_CHANNELINFO);
5395 } else
5396 err = hermes_inquire(hw, HERMES_INQ_SCAN);
4026 break; 5397 break;
4027 } 5398 }
4028 } else 5399 } else
@@ -4040,8 +5411,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
4040#define MAX_CUSTOM_LEN 64 5411#define MAX_CUSTOM_LEN 64
4041 5412
4042/* Translate scan data returned from the card to a card independant 5413/* Translate scan data returned from the card to a card independant
4043 * format that the Wireless Tools will understand - Jean II 5414 * format that the Wireless Tools will understand - Jean II */
4044 * Return message length or -errno for fatal errors */
4045static inline char *orinoco_translate_scan(struct net_device *dev, 5415static inline char *orinoco_translate_scan(struct net_device *dev,
4046 struct iw_request_info *info, 5416 struct iw_request_info *info,
4047 char *current_ev, 5417 char *current_ev,
@@ -4053,9 +5423,10 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4053 u16 capabilities; 5423 u16 capabilities;
4054 u16 channel; 5424 u16 channel;
4055 struct iw_event iwe; /* Temporary buffer */ 5425 struct iw_event iwe; /* Temporary buffer */
4056 char *p;
4057 char custom[MAX_CUSTOM_LEN]; 5426 char custom[MAX_CUSTOM_LEN];
4058 5427
5428 memset(&iwe, 0, sizeof(iwe));
5429
4059 /* First entry *MUST* be the AP MAC address */ 5430 /* First entry *MUST* be the AP MAC address */
4060 iwe.cmd = SIOCGIWAP; 5431 iwe.cmd = SIOCGIWAP;
4061 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 5432 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
@@ -4077,8 +5448,8 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4077 /* Add mode */ 5448 /* Add mode */
4078 iwe.cmd = SIOCGIWMODE; 5449 iwe.cmd = SIOCGIWMODE;
4079 capabilities = le16_to_cpu(bss->a.capabilities); 5450 capabilities = le16_to_cpu(bss->a.capabilities);
4080 if (capabilities & 0x3) { 5451 if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
4081 if (capabilities & 0x1) 5452 if (capabilities & WLAN_CAPABILITY_ESS)
4082 iwe.u.mode = IW_MODE_MASTER; 5453 iwe.u.mode = IW_MODE_MASTER;
4083 else 5454 else
4084 iwe.u.mode = IW_MODE_ADHOC; 5455 iwe.u.mode = IW_MODE_ADHOC;
@@ -4088,17 +5459,22 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4088 5459
4089 channel = bss->s.channel; 5460 channel = bss->s.channel;
4090 if ((channel >= 1) && (channel <= NUM_CHANNELS)) { 5461 if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
4091 /* Add frequency */ 5462 /* Add channel and frequency */
4092 iwe.cmd = SIOCGIWFREQ; 5463 iwe.cmd = SIOCGIWFREQ;
5464 iwe.u.freq.m = channel;
5465 iwe.u.freq.e = 0;
5466 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5467 &iwe, IW_EV_FREQ_LEN);
5468
4093 iwe.u.freq.m = channel_frequency[channel-1] * 100000; 5469 iwe.u.freq.m = channel_frequency[channel-1] * 100000;
4094 iwe.u.freq.e = 1; 5470 iwe.u.freq.e = 1;
4095 current_ev = iwe_stream_add_event(info, current_ev, end_buf, 5471 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
4096 &iwe, IW_EV_FREQ_LEN); 5472 &iwe, IW_EV_FREQ_LEN);
4097 } 5473 }
4098 5474
4099 /* Add quality statistics */ 5475 /* Add quality statistics. level and noise in dB. No link quality */
4100 iwe.cmd = IWEVQUAL; 5476 iwe.cmd = IWEVQUAL;
4101 iwe.u.qual.updated = 0x10; /* no link quality */ 5477 iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
4102 iwe.u.qual.level = (__u8) le16_to_cpu(bss->a.level) - 0x95; 5478 iwe.u.qual.level = (__u8) le16_to_cpu(bss->a.level) - 0x95;
4103 iwe.u.qual.noise = (__u8) le16_to_cpu(bss->a.noise) - 0x95; 5479 iwe.u.qual.noise = (__u8) le16_to_cpu(bss->a.noise) - 0x95;
4104 /* Wireless tools prior to 27.pre22 will show link quality 5480 /* Wireless tools prior to 27.pre22 will show link quality
@@ -4112,25 +5488,13 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4112 5488
4113 /* Add encryption capability */ 5489 /* Add encryption capability */
4114 iwe.cmd = SIOCGIWENCODE; 5490 iwe.cmd = SIOCGIWENCODE;
4115 if (capabilities & 0x10) 5491 if (capabilities & WLAN_CAPABILITY_PRIVACY)
4116 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; 5492 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
4117 else 5493 else
4118 iwe.u.data.flags = IW_ENCODE_DISABLED; 5494 iwe.u.data.flags = IW_ENCODE_DISABLED;
4119 iwe.u.data.length = 0; 5495 iwe.u.data.length = 0;
4120 current_ev = iwe_stream_add_point(info, current_ev, end_buf, 5496 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4121 &iwe, bss->a.essid); 5497 &iwe, NULL);
4122
4123 /* Add EXTRA: Age to display seconds since last beacon/probe response
4124 * for given network. */
4125 iwe.cmd = IWEVCUSTOM;
4126 p = custom;
4127 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
4128 " Last beacon: %dms ago",
4129 jiffies_to_msecs(jiffies - last_scanned));
4130 iwe.u.data.length = p - custom;
4131 if (iwe.u.data.length)
4132 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4133 &iwe, custom);
4134 5498
4135 /* Bit rate is not available in Lucent/Agere firmwares */ 5499 /* Bit rate is not available in Lucent/Agere firmwares */
4136 if (priv->firmware_type != FIRMWARE_TYPE_AGERE) { 5500 if (priv->firmware_type != FIRMWARE_TYPE_AGERE) {
@@ -4152,7 +5516,8 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4152 if (bss->p.rates[i] == 0x0) 5516 if (bss->p.rates[i] == 0x0)
4153 break; 5517 break;
4154 /* Bit rate given in 500 kb/s units (+ 0x80) */ 5518 /* Bit rate given in 500 kb/s units (+ 0x80) */
4155 iwe.u.bitrate.value = ((bss->p.rates[i] & 0x7f) * 500000); 5519 iwe.u.bitrate.value =
5520 ((bss->p.rates[i] & 0x7f) * 500000);
4156 current_val = iwe_stream_add_value(info, current_ev, 5521 current_val = iwe_stream_add_value(info, current_ev,
4157 current_val, 5522 current_val,
4158 end_buf, &iwe, 5523 end_buf, &iwe,
@@ -4163,6 +5528,199 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
4163 current_ev = current_val; 5528 current_ev = current_val;
4164 } 5529 }
4165 5530
5531 /* Beacon interval */
5532 iwe.cmd = IWEVCUSTOM;
5533 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5534 "bcn_int=%d",
5535 le16_to_cpu(bss->a.beacon_interv));
5536 if (iwe.u.data.length)
5537 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5538 &iwe, custom);
5539
5540 /* Capabilites */
5541 iwe.cmd = IWEVCUSTOM;
5542 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5543 "capab=0x%04x",
5544 capabilities);
5545 if (iwe.u.data.length)
5546 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5547 &iwe, custom);
5548
5549 /* Add EXTRA: Age to display seconds since last beacon/probe response
5550 * for given network. */
5551 iwe.cmd = IWEVCUSTOM;
5552 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5553 " Last beacon: %dms ago",
5554 jiffies_to_msecs(jiffies - last_scanned));
5555 if (iwe.u.data.length)
5556 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5557 &iwe, custom);
5558
5559 return current_ev;
5560}
5561
5562static inline char *orinoco_translate_ext_scan(struct net_device *dev,
5563 struct iw_request_info *info,
5564 char *current_ev,
5565 char *end_buf,
5566 struct agere_ext_scan_info *bss,
5567 unsigned int last_scanned)
5568{
5569 u16 capabilities;
5570 u16 channel;
5571 struct iw_event iwe; /* Temporary buffer */
5572 char custom[MAX_CUSTOM_LEN];
5573 u8 *ie;
5574
5575 memset(&iwe, 0, sizeof(iwe));
5576
5577 /* First entry *MUST* be the AP MAC address */
5578 iwe.cmd = SIOCGIWAP;
5579 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
5580 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
5581 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5582 &iwe, IW_EV_ADDR_LEN);
5583
5584 /* Other entries will be displayed in the order we give them */
5585
5586 /* Add the ESSID */
5587 ie = bss->data;
5588 iwe.u.data.length = ie[1];
5589 if (iwe.u.data.length) {
5590 if (iwe.u.data.length > 32)
5591 iwe.u.data.length = 32;
5592 iwe.cmd = SIOCGIWESSID;
5593 iwe.u.data.flags = 1;
5594 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5595 &iwe, &ie[2]);
5596 }
5597
5598 /* Add mode */
5599 capabilities = le16_to_cpu(bss->capabilities);
5600 if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
5601 iwe.cmd = SIOCGIWMODE;
5602 if (capabilities & WLAN_CAPABILITY_ESS)
5603 iwe.u.mode = IW_MODE_MASTER;
5604 else
5605 iwe.u.mode = IW_MODE_ADHOC;
5606 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5607 &iwe, IW_EV_UINT_LEN);
5608 }
5609
5610 ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_DS_SET);
5611 channel = ie ? ie[2] : 0;
5612 if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
5613 /* Add channel and frequency */
5614 iwe.cmd = SIOCGIWFREQ;
5615 iwe.u.freq.m = channel;
5616 iwe.u.freq.e = 0;
5617 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5618 &iwe, IW_EV_FREQ_LEN);
5619
5620 iwe.u.freq.m = channel_frequency[channel-1] * 100000;
5621 iwe.u.freq.e = 1;
5622 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5623 &iwe, IW_EV_FREQ_LEN);
5624 }
5625
5626 /* Add quality statistics. level and noise in dB. No link quality */
5627 iwe.cmd = IWEVQUAL;
5628 iwe.u.qual.updated = IW_QUAL_DBM | IW_QUAL_QUAL_INVALID;
5629 iwe.u.qual.level = bss->level - 0x95;
5630 iwe.u.qual.noise = bss->noise - 0x95;
5631 /* Wireless tools prior to 27.pre22 will show link quality
5632 * anyway, so we provide a reasonable value. */
5633 if (iwe.u.qual.level > iwe.u.qual.noise)
5634 iwe.u.qual.qual = iwe.u.qual.level - iwe.u.qual.noise;
5635 else
5636 iwe.u.qual.qual = 0;
5637 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
5638 &iwe, IW_EV_QUAL_LEN);
5639
5640 /* Add encryption capability */
5641 iwe.cmd = SIOCGIWENCODE;
5642 if (capabilities & WLAN_CAPABILITY_PRIVACY)
5643 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
5644 else
5645 iwe.u.data.flags = IW_ENCODE_DISABLED;
5646 iwe.u.data.length = 0;
5647 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5648 &iwe, NULL);
5649
5650 /* WPA IE */
5651 ie = orinoco_get_wpa_ie(bss->data, sizeof(bss->data));
5652 if (ie) {
5653 iwe.cmd = IWEVGENIE;
5654 iwe.u.data.length = ie[1] + 2;
5655 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5656 &iwe, ie);
5657 }
5658
5659 /* RSN IE */
5660 ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_RSN);
5661 if (ie) {
5662 iwe.cmd = IWEVGENIE;
5663 iwe.u.data.length = ie[1] + 2;
5664 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5665 &iwe, ie);
5666 }
5667
5668 ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_RATES);
5669 if (ie) {
5670 char *p = current_ev + iwe_stream_lcp_len(info);
5671 int i;
5672
5673 iwe.cmd = SIOCGIWRATE;
5674 /* Those two flags are ignored... */
5675 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
5676
5677 for (i = 2; i < (ie[1] + 2); i++) {
5678 iwe.u.bitrate.value = ((ie[i] & 0x7F) * 500000);
5679 p = iwe_stream_add_value(info, current_ev, p, end_buf,
5680 &iwe, IW_EV_PARAM_LEN);
5681 }
5682 /* Check if we added any event */
5683 if (p > (current_ev + iwe_stream_lcp_len(info)))
5684 current_ev = p;
5685 }
5686
5687 /* Timestamp */
5688 iwe.cmd = IWEVCUSTOM;
5689 iwe.u.data.length =
5690 snprintf(custom, MAX_CUSTOM_LEN, "tsf=%016llx",
5691 (unsigned long long) le64_to_cpu(bss->timestamp));
5692 if (iwe.u.data.length)
5693 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5694 &iwe, custom);
5695
5696 /* Beacon interval */
5697 iwe.cmd = IWEVCUSTOM;
5698 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5699 "bcn_int=%d",
5700 le16_to_cpu(bss->beacon_interval));
5701 if (iwe.u.data.length)
5702 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5703 &iwe, custom);
5704
5705 /* Capabilites */
5706 iwe.cmd = IWEVCUSTOM;
5707 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5708 "capab=0x%04x",
5709 capabilities);
5710 if (iwe.u.data.length)
5711 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5712 &iwe, custom);
5713
5714 /* Add EXTRA: Age to display seconds since last beacon/probe response
5715 * for given network. */
5716 iwe.cmd = IWEVCUSTOM;
5717 iwe.u.data.length = snprintf(custom, MAX_CUSTOM_LEN,
5718 " Last beacon: %dms ago",
5719 jiffies_to_msecs(jiffies - last_scanned));
5720 if (iwe.u.data.length)
5721 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
5722 &iwe, custom);
5723
4166 return current_ev; 5724 return current_ev;
4167} 5725}
4168 5726
@@ -4173,7 +5731,6 @@ static int orinoco_ioctl_getscan(struct net_device *dev,
4173 char *extra) 5731 char *extra)
4174{ 5732{
4175 struct orinoco_private *priv = netdev_priv(dev); 5733 struct orinoco_private *priv = netdev_priv(dev);
4176 bss_element *bss;
4177 int err = 0; 5734 int err = 0;
4178 unsigned long flags; 5735 unsigned long flags;
4179 char *current_ev = extra; 5736 char *current_ev = extra;
@@ -4193,18 +5750,47 @@ static int orinoco_ioctl_getscan(struct net_device *dev,
4193 goto out; 5750 goto out;
4194 } 5751 }
4195 5752
4196 list_for_each_entry(bss, &priv->bss_list, list) { 5753 if (priv->has_ext_scan) {
4197 /* Translate to WE format this entry */ 5754 struct xbss_element *bss;
4198 current_ev = orinoco_translate_scan(dev, info, current_ev, 5755
4199 extra + srq->length, 5756 list_for_each_entry(bss, &priv->bss_list, list) {
4200 &bss->bss, 5757 /* Translate this entry to WE format */
4201 bss->last_scanned); 5758 current_ev =
4202 5759 orinoco_translate_ext_scan(dev, info,
4203 /* Check if there is space for one more entry */ 5760 current_ev,
4204 if ((extra + srq->length - current_ev) <= IW_EV_ADDR_LEN) { 5761 extra + srq->length,
4205 /* Ask user space to try again with a bigger buffer */ 5762 &bss->bss,
4206 err = -E2BIG; 5763 bss->last_scanned);
4207 goto out; 5764
5765 /* Check if there is space for one more entry */
5766 if ((extra + srq->length - current_ev)
5767 <= IW_EV_ADDR_LEN) {
5768 /* Ask user space to try again with a
5769 * bigger buffer */
5770 err = -E2BIG;
5771 goto out;
5772 }
5773 }
5774
5775 } else {
5776 struct bss_element *bss;
5777
5778 list_for_each_entry(bss, &priv->bss_list, list) {
5779 /* Translate this entry to WE format */
5780 current_ev = orinoco_translate_scan(dev, info,
5781 current_ev,
5782 extra + srq->length,
5783 &bss->bss,
5784 bss->last_scanned);
5785
5786 /* Check if there is space for one more entry */
5787 if ((extra + srq->length - current_ev)
5788 <= IW_EV_ADDR_LEN) {
5789 /* Ask user space to try again with a
5790 * bigger buffer */
5791 err = -E2BIG;
5792 goto out;
5793 }
4208 } 5794 }
4209 } 5795 }
4210 5796
@@ -4295,39 +5881,48 @@ static const struct iw_priv_args orinoco_privtab[] = {
4295 * Structures to export the Wireless Handlers 5881 * Structures to export the Wireless Handlers
4296 */ 5882 */
4297 5883
5884#define STD_IW_HANDLER(id, func) \
5885 [IW_IOCTL_IDX(id)] = (iw_handler) func
4298static const iw_handler orinoco_handler[] = { 5886static const iw_handler orinoco_handler[] = {
4299 [SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_commit, 5887 STD_IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit),
4300 [SIOCGIWNAME -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getname, 5888 STD_IW_HANDLER(SIOCGIWNAME, orinoco_ioctl_getname),
4301 [SIOCSIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfreq, 5889 STD_IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq),
4302 [SIOCGIWFREQ -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfreq, 5890 STD_IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq),
4303 [SIOCSIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setmode, 5891 STD_IW_HANDLER(SIOCSIWMODE, orinoco_ioctl_setmode),
4304 [SIOCGIWMODE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getmode, 5892 STD_IW_HANDLER(SIOCGIWMODE, orinoco_ioctl_getmode),
4305 [SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens, 5893 STD_IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens),
4306 [SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens, 5894 STD_IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens),
4307 [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange, 5895 STD_IW_HANDLER(SIOCGIWRANGE, orinoco_ioctl_getiwrange),
4308 [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy, 5896 STD_IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
4309 [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy, 5897 STD_IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
4310 [SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy, 5898 STD_IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
4311 [SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy, 5899 STD_IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
4312 [SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap, 5900 STD_IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap),
4313 [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap, 5901 STD_IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap),
4314 [SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan, 5902 STD_IW_HANDLER(SIOCSIWSCAN, orinoco_ioctl_setscan),
4315 [SIOCGIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getscan, 5903 STD_IW_HANDLER(SIOCGIWSCAN, orinoco_ioctl_getscan),
4316 [SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setessid, 5904 STD_IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid),
4317 [SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getessid, 5905 STD_IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid),
4318 [SIOCSIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setnick, 5906 STD_IW_HANDLER(SIOCSIWNICKN, orinoco_ioctl_setnick),
4319 [SIOCGIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getnick, 5907 STD_IW_HANDLER(SIOCGIWNICKN, orinoco_ioctl_getnick),
4320 [SIOCSIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrate, 5908 STD_IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate),
4321 [SIOCGIWRATE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrate, 5909 STD_IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate),
4322 [SIOCSIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrts, 5910 STD_IW_HANDLER(SIOCSIWRTS, orinoco_ioctl_setrts),
4323 [SIOCGIWRTS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrts, 5911 STD_IW_HANDLER(SIOCGIWRTS, orinoco_ioctl_getrts),
4324 [SIOCSIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfrag, 5912 STD_IW_HANDLER(SIOCSIWFRAG, orinoco_ioctl_setfrag),
4325 [SIOCGIWFRAG -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfrag, 5913 STD_IW_HANDLER(SIOCGIWFRAG, orinoco_ioctl_getfrag),
4326 [SIOCGIWRETRY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getretry, 5914 STD_IW_HANDLER(SIOCGIWRETRY, orinoco_ioctl_getretry),
4327 [SIOCSIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setiwencode, 5915 STD_IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode),
4328 [SIOCGIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwencode, 5916 STD_IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode),
4329 [SIOCSIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setpower, 5917 STD_IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower),
4330 [SIOCGIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getpower, 5918 STD_IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower),
5919 STD_IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
5920 STD_IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
5921 STD_IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
5922 STD_IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
5923 STD_IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
5924 STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
5925 STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
4331}; 5926};
4332 5927
4333 5928
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index c6b1858abde8..981570bd3b9d 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -9,6 +9,7 @@
9 9
10#define DRIVER_VERSION "0.15" 10#define DRIVER_VERSION "0.15"
11 11
12#include <linux/interrupt.h>
12#include <linux/netdevice.h> 13#include <linux/netdevice.h>
13#include <linux/wireless.h> 14#include <linux/wireless.h>
14#include <net/iw_handler.h> 15#include <net/iw_handler.h>
@@ -30,27 +31,57 @@ struct orinoco_key {
30 char data[ORINOCO_MAX_KEY_SIZE]; 31 char data[ORINOCO_MAX_KEY_SIZE];
31} __attribute__ ((packed)); 32} __attribute__ ((packed));
32 33
34#define TKIP_KEYLEN 16
35#define MIC_KEYLEN 8
36
37struct orinoco_tkip_key {
38 u8 tkip[TKIP_KEYLEN];
39 u8 tx_mic[MIC_KEYLEN];
40 u8 rx_mic[MIC_KEYLEN];
41};
42
33typedef enum { 43typedef enum {
34 FIRMWARE_TYPE_AGERE, 44 FIRMWARE_TYPE_AGERE,
35 FIRMWARE_TYPE_INTERSIL, 45 FIRMWARE_TYPE_INTERSIL,
36 FIRMWARE_TYPE_SYMBOL 46 FIRMWARE_TYPE_SYMBOL
37} fwtype_t; 47} fwtype_t;
38 48
39typedef struct { 49struct bss_element {
40 union hermes_scan_info bss; 50 union hermes_scan_info bss;
41 unsigned long last_scanned; 51 unsigned long last_scanned;
42 struct list_head list; 52 struct list_head list;
43} bss_element; 53};
54
55struct xbss_element {
56 struct agere_ext_scan_info bss;
57 unsigned long last_scanned;
58 struct list_head list;
59};
60
61struct hermes_rx_descriptor;
62
63struct orinoco_rx_data {
64 struct hermes_rx_descriptor *desc;
65 struct sk_buff *skb;
66 struct list_head list;
67};
44 68
45struct orinoco_private { 69struct orinoco_private {
46 void *card; /* Pointer to card dependent structure */ 70 void *card; /* Pointer to card dependent structure */
71 struct device *dev;
47 int (*hard_reset)(struct orinoco_private *); 72 int (*hard_reset)(struct orinoco_private *);
73 int (*stop_fw)(struct orinoco_private *, int);
48 74
49 /* Synchronisation stuff */ 75 /* Synchronisation stuff */
50 spinlock_t lock; 76 spinlock_t lock;
51 int hw_unavailable; 77 int hw_unavailable;
52 struct work_struct reset_work; 78 struct work_struct reset_work;
53 79
80 /* Interrupt tasklets */
81 struct tasklet_struct rx_tasklet;
82 struct list_head rx_list;
83 struct orinoco_rx_data *rx_data;
84
54 /* driver state */ 85 /* driver state */
55 int open; 86 int open;
56 u16 last_linkstatus; 87 u16 last_linkstatus;
@@ -83,13 +114,17 @@ struct orinoco_private {
83 unsigned int has_preamble:1; 114 unsigned int has_preamble:1;
84 unsigned int has_sensitivity:1; 115 unsigned int has_sensitivity:1;
85 unsigned int has_hostscan:1; 116 unsigned int has_hostscan:1;
117 unsigned int has_alt_txcntl:1;
118 unsigned int has_ext_scan:1;
119 unsigned int has_wpa:1;
120 unsigned int do_fw_download:1;
86 unsigned int broken_disableport:1; 121 unsigned int broken_disableport:1;
87 unsigned int broken_monitor:1; 122 unsigned int broken_monitor:1;
88 123
89 /* Configuration paramaters */ 124 /* Configuration paramaters */
90 u32 iw_mode; 125 u32 iw_mode;
91 int prefer_port3; 126 int prefer_port3;
92 u16 wep_on, wep_restrict, tx_key; 127 u16 encode_alg, wep_restrict, tx_key;
93 struct orinoco_key keys[ORINOCO_MAX_KEYS]; 128 struct orinoco_key keys[ORINOCO_MAX_KEYS];
94 int bitratemode; 129 int bitratemode;
95 char nick[IW_ESSID_MAX_SIZE+1]; 130 char nick[IW_ESSID_MAX_SIZE+1];
@@ -113,10 +148,22 @@ struct orinoco_private {
113 /* Scanning support */ 148 /* Scanning support */
114 struct list_head bss_list; 149 struct list_head bss_list;
115 struct list_head bss_free_list; 150 struct list_head bss_free_list;
116 bss_element *bss_data; 151 void *bss_xbss_data;
117 152
118 int scan_inprogress; /* Scan pending... */ 153 int scan_inprogress; /* Scan pending... */
119 u32 scan_mode; /* Type of scan done */ 154 u32 scan_mode; /* Type of scan done */
155
156 /* WPA support */
157 u8 *wpa_ie;
158 int wpa_ie_len;
159
160 struct orinoco_tkip_key tkip_key[ORINOCO_MAX_KEYS];
161 struct crypto_hash *rx_tfm_mic;
162 struct crypto_hash *tx_tfm_mic;
163
164 unsigned int wpa_enabled:1;
165 unsigned int tkip_cm_active:1;
166 unsigned int key_mgmt:3;
120}; 167};
121 168
122#ifdef ORINOCO_DEBUG 169#ifdef ORINOCO_DEBUG
@@ -130,8 +177,10 @@ extern int orinoco_debug;
130/* Exported prototypes */ 177/* Exported prototypes */
131/********************************************************************/ 178/********************************************************************/
132 179
133extern struct net_device *alloc_orinocodev(int sizeof_card, 180extern struct net_device *alloc_orinocodev(
134 int (*hard_reset)(struct orinoco_private *)); 181 int sizeof_card, struct device *device,
182 int (*hard_reset)(struct orinoco_private *),
183 int (*stop_fw)(struct orinoco_private *, int));
135extern void free_orinocodev(struct net_device *dev); 184extern void free_orinocodev(struct net_device *dev);
136extern int __orinoco_up(struct net_device *dev); 185extern int __orinoco_up(struct net_device *dev);
137extern int __orinoco_down(struct net_device *dev); 186extern int __orinoco_down(struct net_device *dev);
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index 1c216e015f64..9eaa252c2430 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -109,7 +109,8 @@ orinoco_cs_probe(struct pcmcia_device *link)
109 struct orinoco_private *priv; 109 struct orinoco_private *priv;
110 struct orinoco_pccard *card; 110 struct orinoco_pccard *card;
111 111
112 dev = alloc_orinocodev(sizeof(*card), orinoco_cs_hard_reset); 112 dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link),
113 orinoco_cs_hard_reset, NULL);
113 if (! dev) 114 if (! dev)
114 return -ENOMEM; 115 return -ENOMEM;
115 priv = netdev_priv(dev); 116 priv = netdev_priv(dev);
@@ -120,7 +121,7 @@ orinoco_cs_probe(struct pcmcia_device *link)
120 link->priv = dev; 121 link->priv = dev;
121 122
122 /* Interrupt setup */ 123 /* Interrupt setup */
123 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 124 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
124 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 125 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
125 link->irq.Handler = orinoco_interrupt; 126 link->irq.Handler = orinoco_interrupt;
126 link->irq.Instance = dev; 127 link->irq.Instance = dev;
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index 35ec5fcf81a6..2fc86596302e 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -182,7 +182,8 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
182 } 182 }
183 183
184 /* Allocate network device */ 184 /* Allocate network device */
185 dev = alloc_orinocodev(sizeof(*card), orinoco_nortel_cor_reset); 185 dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
186 orinoco_nortel_cor_reset, NULL);
186 if (!dev) { 187 if (!dev) {
187 printk(KERN_ERR PFX "Cannot allocate network device\n"); 188 printk(KERN_ERR PFX "Cannot allocate network device\n");
188 err = -ENOMEM; 189 err = -ENOMEM;
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 2547d5dac0d3..4ebd638a073e 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -139,7 +139,8 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
139 } 139 }
140 140
141 /* Allocate network device */ 141 /* Allocate network device */
142 dev = alloc_orinocodev(sizeof(*card), orinoco_pci_cor_reset); 142 dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
143 orinoco_pci_cor_reset, NULL);
143 if (!dev) { 144 if (!dev) {
144 printk(KERN_ERR PFX "Cannot allocate network device\n"); 145 printk(KERN_ERR PFX "Cannot allocate network device\n");
145 err = -ENOMEM; 146 err = -ENOMEM;
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index 98fe165337d1..ef761857bb38 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -221,7 +221,8 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
221 } 221 }
222 222
223 /* Allocate network device */ 223 /* Allocate network device */
224 dev = alloc_orinocodev(sizeof(*card), orinoco_plx_cor_reset); 224 dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
225 orinoco_plx_cor_reset, NULL);
225 if (!dev) { 226 if (!dev) {
226 printk(KERN_ERR PFX "Cannot allocate network device\n"); 227 printk(KERN_ERR PFX "Cannot allocate network device\n");
227 err = -ENOMEM; 228 err = -ENOMEM;
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index df493185a4af..ede24ec309c0 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -124,7 +124,8 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
124 } 124 }
125 125
126 /* Allocate network device */ 126 /* Allocate network device */
127 dev = alloc_orinocodev(sizeof(*card), orinoco_tmd_cor_reset); 127 dev = alloc_orinocodev(sizeof(*card), &pdev->dev,
128 orinoco_tmd_cor_reset, NULL);
128 if (!dev) { 129 if (!dev) {
129 printk(KERN_ERR PFX "Cannot allocate network device\n"); 130 printk(KERN_ERR PFX "Cannot allocate network device\n");
130 err = -ENOMEM; 131 err = -ENOMEM;
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 4801a363507b..1d0704fe146f 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -1,5 +1,5 @@
1#ifndef PRISM54_H 1#ifndef P54_H
2#define PRISM54_H 2#define P54_H
3 3
4/* 4/*
5 * Shared defines for all mac80211 Prism54 code 5 * Shared defines for all mac80211 Prism54 code
@@ -19,13 +19,24 @@ enum control_frame_types {
19 P54_CONTROL_TYPE_CHANNEL_CHANGE, 19 P54_CONTROL_TYPE_CHANNEL_CHANGE,
20 P54_CONTROL_TYPE_FREQDONE, 20 P54_CONTROL_TYPE_FREQDONE,
21 P54_CONTROL_TYPE_DCFINIT, 21 P54_CONTROL_TYPE_DCFINIT,
22 P54_CONTROL_TYPE_FREEQUEUE = 7, 22 P54_CONTROL_TYPE_ENCRYPTION,
23 P54_CONTROL_TYPE_TIM,
24 P54_CONTROL_TYPE_POWERMGT,
25 P54_CONTROL_TYPE_FREEQUEUE,
23 P54_CONTROL_TYPE_TXDONE, 26 P54_CONTROL_TYPE_TXDONE,
24 P54_CONTROL_TYPE_PING, 27 P54_CONTROL_TYPE_PING,
25 P54_CONTROL_TYPE_STAT_READBACK, 28 P54_CONTROL_TYPE_STAT_READBACK,
26 P54_CONTROL_TYPE_BBP, 29 P54_CONTROL_TYPE_BBP,
27 P54_CONTROL_TYPE_EEPROM_READBACK, 30 P54_CONTROL_TYPE_EEPROM_READBACK,
28 P54_CONTROL_TYPE_LED 31 P54_CONTROL_TYPE_LED,
32 P54_CONTROL_TYPE_GPIO,
33 P54_CONTROL_TYPE_TIMER,
34 P54_CONTROL_TYPE_MODULATION,
35 P54_CONTROL_TYPE_SYNTH_CONFIG,
36 P54_CONTROL_TYPE_DETECTOR_VALUE,
37 P54_CONTROL_TYPE_XBOW_SYNTH_CFG,
38 P54_CONTROL_TYPE_CCE_QUIET,
39 P54_CONTROL_TYPE_PSM_STA_UNLOCK,
29}; 40};
30 41
31struct p54_control_hdr { 42struct p54_control_hdr {
@@ -38,11 +49,15 @@ struct p54_control_hdr {
38 u8 data[0]; 49 u8 data[0];
39} __attribute__ ((packed)); 50} __attribute__ ((packed));
40 51
41#define EEPROM_READBACK_LEN (sizeof(struct p54_control_hdr) + 4 /* p54_eeprom_lm86 */) 52#define EEPROM_READBACK_LEN 0x3fc
42#define MAX_RX_SIZE (IEEE80211_MAX_RTS_THRESHOLD + sizeof(struct p54_control_hdr) + 20 /* length of struct p54_rx_hdr */ + 16 )
43 53
44#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000 54#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000
45 55
56#define FW_FMAC 0x464d4143
57#define FW_LM86 0x4c4d3836
58#define FW_LM87 0x4c4d3837
59#define FW_LM20 0x4c4d3230
60
46struct p54_common { 61struct p54_common {
47 u32 rx_start; 62 u32 rx_start;
48 u32 rx_end; 63 u32 rx_end;
@@ -53,27 +68,43 @@ struct p54_common {
53 void (*stop)(struct ieee80211_hw *dev); 68 void (*stop)(struct ieee80211_hw *dev);
54 int mode; 69 int mode;
55 u16 seqno; 70 u16 seqno;
71 u16 rx_mtu;
72 u8 headroom;
73 u8 tailroom;
56 struct mutex conf_mutex; 74 struct mutex conf_mutex;
57 u8 mac_addr[ETH_ALEN]; 75 u8 mac_addr[ETH_ALEN];
58 u8 bssid[ETH_ALEN]; 76 u8 bssid[ETH_ALEN];
77 __le16 filter_type;
59 struct pda_iq_autocal_entry *iq_autocal; 78 struct pda_iq_autocal_entry *iq_autocal;
60 unsigned int iq_autocal_len; 79 unsigned int iq_autocal_len;
61 struct pda_channel_output_limit *output_limit; 80 struct pda_channel_output_limit *output_limit;
62 unsigned int output_limit_len; 81 unsigned int output_limit_len;
63 struct pda_pa_curve_data *curve_data; 82 struct pda_pa_curve_data *curve_data;
64 __le16 rxhw; 83 unsigned int filter_flags;
84 u16 rxhw;
65 u8 version; 85 u8 version;
86 u8 rx_antenna;
66 unsigned int tx_hdr_len; 87 unsigned int tx_hdr_len;
67 void *cached_vdcf; 88 void *cached_vdcf;
68 unsigned int fw_var; 89 unsigned int fw_var;
69 struct ieee80211_tx_queue_stats tx_stats[4]; 90 unsigned int fw_interface;
91 unsigned int output_power;
92 u32 tsf_low32;
93 u32 tsf_high32;
94 struct ieee80211_tx_queue_stats tx_stats[8];
95 struct ieee80211_low_level_stats stats;
96 struct timer_list stats_timer;
97 struct completion stats_comp;
98 void *cached_stats;
99 int noise;
100 void *eeprom;
101 struct completion eeprom_comp;
70}; 102};
71 103
72int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb); 104int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb);
73void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw); 105int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw);
74int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len); 106int p54_read_eeprom(struct ieee80211_hw *dev);
75void p54_fill_eeprom_readback(struct p54_control_hdr *hdr);
76struct ieee80211_hw *p54_init_common(size_t priv_data_len); 107struct ieee80211_hw *p54_init_common(size_t priv_data_len);
77void p54_free_common(struct ieee80211_hw *dev); 108void p54_free_common(struct ieee80211_hw *dev);
78 109
79#endif /* PRISM54_H */ 110#endif /* P54_H */
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 29be3dc8ee09..de5e8f44b202 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("Softmac Prism54 common code");
27MODULE_LICENSE("GPL"); 27MODULE_LICENSE("GPL");
28MODULE_ALIAS("prism54common"); 28MODULE_ALIAS("prism54common");
29 29
30static struct ieee80211_rate p54_rates[] = { 30static struct ieee80211_rate p54_bgrates[] = {
31 { .bitrate = 10, .hw_value = 0, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 31 { .bitrate = 10, .hw_value = 0, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
32 { .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 32 { .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
33 { .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 33 { .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
@@ -42,7 +42,7 @@ static struct ieee80211_rate p54_rates[] = {
42 { .bitrate = 540, .hw_value = 11, }, 42 { .bitrate = 540, .hw_value = 11, },
43}; 43};
44 44
45static struct ieee80211_channel p54_channels[] = { 45static struct ieee80211_channel p54_bgchannels[] = {
46 { .center_freq = 2412, .hw_value = 1, }, 46 { .center_freq = 2412, .hw_value = 1, },
47 { .center_freq = 2417, .hw_value = 2, }, 47 { .center_freq = 2417, .hw_value = 2, },
48 { .center_freq = 2422, .hw_value = 3, }, 48 { .center_freq = 2422, .hw_value = 3, },
@@ -60,14 +60,69 @@ static struct ieee80211_channel p54_channels[] = {
60}; 60};
61 61
62static struct ieee80211_supported_band band_2GHz = { 62static struct ieee80211_supported_band band_2GHz = {
63 .channels = p54_channels, 63 .channels = p54_bgchannels,
64 .n_channels = ARRAY_SIZE(p54_channels), 64 .n_channels = ARRAY_SIZE(p54_bgchannels),
65 .bitrates = p54_rates, 65 .bitrates = p54_bgrates,
66 .n_bitrates = ARRAY_SIZE(p54_rates), 66 .n_bitrates = ARRAY_SIZE(p54_bgrates),
67}; 67};
68 68
69static struct ieee80211_rate p54_arates[] = {
70 { .bitrate = 60, .hw_value = 4, },
71 { .bitrate = 90, .hw_value = 5, },
72 { .bitrate = 120, .hw_value = 6, },
73 { .bitrate = 180, .hw_value = 7, },
74 { .bitrate = 240, .hw_value = 8, },
75 { .bitrate = 360, .hw_value = 9, },
76 { .bitrate = 480, .hw_value = 10, },
77 { .bitrate = 540, .hw_value = 11, },
78};
79
80static struct ieee80211_channel p54_achannels[] = {
81 { .center_freq = 4920 },
82 { .center_freq = 4940 },
83 { .center_freq = 4960 },
84 { .center_freq = 4980 },
85 { .center_freq = 5040 },
86 { .center_freq = 5060 },
87 { .center_freq = 5080 },
88 { .center_freq = 5170 },
89 { .center_freq = 5180 },
90 { .center_freq = 5190 },
91 { .center_freq = 5200 },
92 { .center_freq = 5210 },
93 { .center_freq = 5220 },
94 { .center_freq = 5230 },
95 { .center_freq = 5240 },
96 { .center_freq = 5260 },
97 { .center_freq = 5280 },
98 { .center_freq = 5300 },
99 { .center_freq = 5320 },
100 { .center_freq = 5500 },
101 { .center_freq = 5520 },
102 { .center_freq = 5540 },
103 { .center_freq = 5560 },
104 { .center_freq = 5580 },
105 { .center_freq = 5600 },
106 { .center_freq = 5620 },
107 { .center_freq = 5640 },
108 { .center_freq = 5660 },
109 { .center_freq = 5680 },
110 { .center_freq = 5700 },
111 { .center_freq = 5745 },
112 { .center_freq = 5765 },
113 { .center_freq = 5785 },
114 { .center_freq = 5805 },
115 { .center_freq = 5825 },
116};
117
118static struct ieee80211_supported_band band_5GHz = {
119 .channels = p54_achannels,
120 .n_channels = ARRAY_SIZE(p54_achannels),
121 .bitrates = p54_arates,
122 .n_bitrates = ARRAY_SIZE(p54_arates),
123};
69 124
70void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) 125int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
71{ 126{
72 struct p54_common *priv = dev->priv; 127 struct p54_common *priv = dev->priv;
73 struct bootrec_exp_if *exp_if; 128 struct bootrec_exp_if *exp_if;
@@ -79,7 +134,7 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
79 int i; 134 int i;
80 135
81 if (priv->rx_start) 136 if (priv->rx_start)
82 return; 137 return 0;
83 138
84 while (data < end_data && *data) 139 while (data < end_data && *data)
85 data++; 140 data++;
@@ -94,7 +149,9 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
94 u32 code = le32_to_cpu(bootrec->code); 149 u32 code = le32_to_cpu(bootrec->code);
95 switch (code) { 150 switch (code) {
96 case BR_CODE_COMPONENT_ID: 151 case BR_CODE_COMPONENT_ID:
97 switch (be32_to_cpu(*(__be32 *)bootrec->data)) { 152 priv->fw_interface = be32_to_cpup((__be32 *)
153 bootrec->data);
154 switch (priv->fw_interface) {
98 case FW_FMAC: 155 case FW_FMAC:
99 printk(KERN_INFO "p54: FreeMAC firmware\n"); 156 printk(KERN_INFO "p54: FreeMAC firmware\n");
100 break; 157 break;
@@ -105,7 +162,7 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
105 printk(KERN_INFO "p54: LM86 firmware\n"); 162 printk(KERN_INFO "p54: LM86 firmware\n");
106 break; 163 break;
107 case FW_LM87: 164 case FW_LM87:
108 printk(KERN_INFO "p54: LM87 firmware - not supported yet!\n"); 165 printk(KERN_INFO "p54: LM87 firmware\n");
109 break; 166 break;
110 default: 167 default:
111 printk(KERN_INFO "p54: unknown firmware\n"); 168 printk(KERN_INFO "p54: unknown firmware\n");
@@ -117,11 +174,21 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
117 if (strnlen((unsigned char*)bootrec->data, 24) < 24) 174 if (strnlen((unsigned char*)bootrec->data, 24) < 24)
118 fw_version = (unsigned char*)bootrec->data; 175 fw_version = (unsigned char*)bootrec->data;
119 break; 176 break;
120 case BR_CODE_DESCR: 177 case BR_CODE_DESCR: {
121 priv->rx_start = le32_to_cpu(((__le32 *)bootrec->data)[1]); 178 struct bootrec_desc *desc =
179 (struct bootrec_desc *)bootrec->data;
180 priv->rx_start = le32_to_cpu(desc->rx_start);
122 /* FIXME add sanity checking */ 181 /* FIXME add sanity checking */
123 priv->rx_end = le32_to_cpu(((__le32 *)bootrec->data)[2]) - 0x3500; 182 priv->rx_end = le32_to_cpu(desc->rx_end) - 0x3500;
183 priv->headroom = desc->headroom;
184 priv->tailroom = desc->tailroom;
185 if (le32_to_cpu(bootrec->len) == 11)
186 priv->rx_mtu = le16_to_cpu(bootrec->rx_mtu);
187 else
188 priv->rx_mtu = (size_t)
189 0x620 - priv->tx_hdr_len;
124 break; 190 break;
191 }
125 case BR_CODE_EXPOSED_IF: 192 case BR_CODE_EXPOSED_IF:
126 exp_if = (struct bootrec_exp_if *) bootrec->data; 193 exp_if = (struct bootrec_exp_if *) bootrec->data;
127 for (i = 0; i < (len * sizeof(*exp_if) / 4); i++) 194 for (i = 0; i < (len * sizeof(*exp_if) / 4); i++)
@@ -146,23 +213,25 @@ void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
146 213
147 if (priv->fw_var >= 0x300) { 214 if (priv->fw_var >= 0x300) {
148 /* Firmware supports QoS, use it! */ 215 /* Firmware supports QoS, use it! */
149 priv->tx_stats[0].limit = 3; 216 priv->tx_stats[4].limit = 3;
150 priv->tx_stats[1].limit = 4; 217 priv->tx_stats[5].limit = 4;
151 priv->tx_stats[2].limit = 3; 218 priv->tx_stats[6].limit = 3;
152 priv->tx_stats[3].limit = 1; 219 priv->tx_stats[7].limit = 1;
153 dev->queues = 4; 220 dev->queues = 4;
154 } 221 }
222
223 return 0;
155} 224}
156EXPORT_SYMBOL_GPL(p54_parse_firmware); 225EXPORT_SYMBOL_GPL(p54_parse_firmware);
157 226
158static int p54_convert_rev0_to_rev1(struct ieee80211_hw *dev, 227static int p54_convert_rev0(struct ieee80211_hw *dev,
159 struct pda_pa_curve_data *curve_data) 228 struct pda_pa_curve_data *curve_data)
160{ 229{
161 struct p54_common *priv = dev->priv; 230 struct p54_common *priv = dev->priv;
162 struct pda_pa_curve_data_sample_rev1 *rev1; 231 struct p54_pa_curve_data_sample *dst;
163 struct pda_pa_curve_data_sample_rev0 *rev0; 232 struct pda_pa_curve_data_sample_rev0 *src;
164 size_t cd_len = sizeof(*curve_data) + 233 size_t cd_len = sizeof(*curve_data) +
165 (curve_data->points_per_channel*sizeof(*rev1) + 2) * 234 (curve_data->points_per_channel*sizeof(*dst) + 2) *
166 curve_data->channels; 235 curve_data->channels;
167 unsigned int i, j; 236 unsigned int i, j;
168 void *source, *target; 237 void *source, *target;
@@ -180,28 +249,68 @@ static int p54_convert_rev0_to_rev1(struct ieee80211_hw *dev,
180 *((__le16 *)target) = *freq; 249 *((__le16 *)target) = *freq;
181 target += sizeof(__le16); 250 target += sizeof(__le16);
182 for (j = 0; j < curve_data->points_per_channel; j++) { 251 for (j = 0; j < curve_data->points_per_channel; j++) {
183 rev1 = target; 252 dst = target;
184 rev0 = source; 253 src = source;
185 254
186 rev1->rf_power = rev0->rf_power; 255 dst->rf_power = src->rf_power;
187 rev1->pa_detector = rev0->pa_detector; 256 dst->pa_detector = src->pa_detector;
188 rev1->data_64qam = rev0->pcv; 257 dst->data_64qam = src->pcv;
189 /* "invent" the points for the other modulations */ 258 /* "invent" the points for the other modulations */
190#define SUB(x,y) (u8)((x) - (y)) > (x) ? 0 : (x) - (y) 259#define SUB(x,y) (u8)((x) - (y)) > (x) ? 0 : (x) - (y)
191 rev1->data_16qam = SUB(rev0->pcv, 12); 260 dst->data_16qam = SUB(src->pcv, 12);
192 rev1->data_qpsk = SUB(rev1->data_16qam, 12); 261 dst->data_qpsk = SUB(dst->data_16qam, 12);
193 rev1->data_bpsk = SUB(rev1->data_qpsk, 12); 262 dst->data_bpsk = SUB(dst->data_qpsk, 12);
194 rev1->data_barker= SUB(rev1->data_bpsk, 14); 263 dst->data_barker = SUB(dst->data_bpsk, 14);
195#undef SUB 264#undef SUB
196 target += sizeof(*rev1); 265 target += sizeof(*dst);
197 source += sizeof(*rev0); 266 source += sizeof(*src);
198 } 267 }
199 } 268 }
200 269
201 return 0; 270 return 0;
202} 271}
203 272
204int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) 273static int p54_convert_rev1(struct ieee80211_hw *dev,
274 struct pda_pa_curve_data *curve_data)
275{
276 struct p54_common *priv = dev->priv;
277 struct p54_pa_curve_data_sample *dst;
278 struct pda_pa_curve_data_sample_rev1 *src;
279 size_t cd_len = sizeof(*curve_data) +
280 (curve_data->points_per_channel*sizeof(*dst) + 2) *
281 curve_data->channels;
282 unsigned int i, j;
283 void *source, *target;
284
285 priv->curve_data = kmalloc(cd_len, GFP_KERNEL);
286 if (!priv->curve_data)
287 return -ENOMEM;
288
289 memcpy(priv->curve_data, curve_data, sizeof(*curve_data));
290 source = curve_data->data;
291 target = priv->curve_data->data;
292 for (i = 0; i < curve_data->channels; i++) {
293 __le16 *freq = source;
294 source += sizeof(__le16);
295 *((__le16 *)target) = *freq;
296 target += sizeof(__le16);
297 for (j = 0; j < curve_data->points_per_channel; j++) {
298 memcpy(target, source, sizeof(*src));
299
300 target += sizeof(*dst);
301 source += sizeof(*src);
302 }
303 source++;
304 }
305
306 return 0;
307}
308
309static const char *p54_rf_chips[] = { "NULL", "Indigo?", "Duette",
310 "Frisbee", "Xbow", "Longbow" };
311static int p54_init_xbow_synth(struct ieee80211_hw *dev);
312
313static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
205{ 314{
206 struct p54_common *priv = dev->priv; 315 struct p54_common *priv = dev->priv;
207 struct eeprom_pda_wrap *wrap = NULL; 316 struct eeprom_pda_wrap *wrap = NULL;
@@ -210,6 +319,7 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
210 void *tmp; 319 void *tmp;
211 int err; 320 int err;
212 u8 *end = (u8 *)eeprom + len; 321 u8 *end = (u8 *)eeprom + len;
322 DECLARE_MAC_BUF(mac);
213 323
214 wrap = (struct eeprom_pda_wrap *) eeprom; 324 wrap = (struct eeprom_pda_wrap *) eeprom;
215 entry = (void *)wrap->data + le16_to_cpu(wrap->len); 325 entry = (void *)wrap->data + le16_to_cpu(wrap->len);
@@ -250,27 +360,32 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
250 entry->data[1]*sizeof(*priv->output_limit)); 360 entry->data[1]*sizeof(*priv->output_limit));
251 priv->output_limit_len = entry->data[1]; 361 priv->output_limit_len = entry->data[1];
252 break; 362 break;
253 case PDR_PRISM_PA_CAL_CURVE_DATA: 363 case PDR_PRISM_PA_CAL_CURVE_DATA: {
254 if (data_len < sizeof(struct pda_pa_curve_data)) { 364 struct pda_pa_curve_data *curve_data =
365 (struct pda_pa_curve_data *)entry->data;
366 if (data_len < sizeof(*curve_data)) {
255 err = -EINVAL; 367 err = -EINVAL;
256 goto err; 368 goto err;
257 } 369 }
258 370
259 if (((struct pda_pa_curve_data *)entry->data)->cal_method_rev) { 371 switch (curve_data->cal_method_rev) {
260 priv->curve_data = kmalloc(data_len, GFP_KERNEL); 372 case 0:
261 if (!priv->curve_data) { 373 err = p54_convert_rev0(dev, curve_data);
262 err = -ENOMEM; 374 break;
263 goto err; 375 case 1:
264 } 376 err = p54_convert_rev1(dev, curve_data);
265 377 break;
266 memcpy(priv->curve_data, entry->data, data_len); 378 default:
267 } else { 379 printk(KERN_ERR "p54: unknown curve data "
268 err = p54_convert_rev0_to_rev1(dev, (struct pda_pa_curve_data *)entry->data); 380 "revision %d\n",
269 if (err) 381 curve_data->cal_method_rev);
270 goto err; 382 err = -ENODEV;
383 break;
271 } 384 }
385 if (err)
386 goto err;
272 387
273 break; 388 }
274 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: 389 case PDR_PRISM_ZIF_TX_IQ_CALIBRATION:
275 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL); 390 priv->iq_autocal = kmalloc(data_len, GFP_KERNEL);
276 if (!priv->iq_autocal) { 391 if (!priv->iq_autocal) {
@@ -286,7 +401,7 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
286 while ((u8 *)tmp < entry->data + data_len) { 401 while ((u8 *)tmp < entry->data + data_len) {
287 struct bootrec_exp_if *exp_if = tmp; 402 struct bootrec_exp_if *exp_if = tmp;
288 if (le16_to_cpu(exp_if->if_id) == 0xF) 403 if (le16_to_cpu(exp_if->if_id) == 0xF)
289 priv->rxhw = exp_if->variant & cpu_to_le16(0x07); 404 priv->rxhw = le16_to_cpu(exp_if->variant) & 0x07;
290 tmp += sizeof(struct bootrec_exp_if); 405 tmp += sizeof(struct bootrec_exp_if);
291 } 406 }
292 break; 407 break;
@@ -312,6 +427,37 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
312 goto err; 427 goto err;
313 } 428 }
314 429
430 switch (priv->rxhw) {
431 case 4: /* XBow */
432 p54_init_xbow_synth(dev);
433 case 1: /* Indigo? */
434 case 2: /* Duette */
435 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
436 case 3: /* Frisbee */
437 case 5: /* Longbow */
438 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
439 break;
440 default:
441 printk(KERN_ERR "%s: unsupported RF-Chip\n",
442 wiphy_name(dev->wiphy));
443 err = -EINVAL;
444 goto err;
445 }
446
447 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
448 u8 perm_addr[ETH_ALEN];
449
450 printk(KERN_WARNING "%s: Invalid hwaddr! Using randomly generated MAC addr\n",
451 wiphy_name(dev->wiphy));
452 random_ether_addr(perm_addr);
453 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
454 }
455
456 printk(KERN_INFO "%s: hwaddr %s, MAC:isl38%02x RF:%s\n",
457 wiphy_name(dev->wiphy),
458 print_mac(mac, dev->wiphy->perm_addr),
459 priv->version, p54_rf_chips[priv->rxhw]);
460
315 return 0; 461 return 0;
316 462
317 err: 463 err:
@@ -335,40 +481,54 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
335} 481}
336EXPORT_SYMBOL_GPL(p54_parse_eeprom); 482EXPORT_SYMBOL_GPL(p54_parse_eeprom);
337 483
338void p54_fill_eeprom_readback(struct p54_control_hdr *hdr) 484static int p54_rssi_to_dbm(struct ieee80211_hw *dev, int rssi)
339{ 485{
340 struct p54_eeprom_lm86 *eeprom_hdr; 486 /* TODO: get the rssi_add & rssi_mul data from the eeprom */
341 487 return ((rssi * 0x83) / 64 - 400) / 4;
342 hdr->magic1 = cpu_to_le16(0x8000);
343 hdr->len = cpu_to_le16(sizeof(*eeprom_hdr) + 0x2000);
344 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_EEPROM_READBACK);
345 hdr->retry1 = hdr->retry2 = 0;
346 eeprom_hdr = (struct p54_eeprom_lm86 *) hdr->data;
347 eeprom_hdr->offset = 0x0;
348 eeprom_hdr->len = cpu_to_le16(0x2000);
349} 488}
350EXPORT_SYMBOL_GPL(p54_fill_eeprom_readback);
351 489
352static void p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb) 490static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
353{ 491{
492 struct p54_common *priv = dev->priv;
354 struct p54_rx_hdr *hdr = (struct p54_rx_hdr *) skb->data; 493 struct p54_rx_hdr *hdr = (struct p54_rx_hdr *) skb->data;
355 struct ieee80211_rx_status rx_status = {0}; 494 struct ieee80211_rx_status rx_status = {0};
356 u16 freq = le16_to_cpu(hdr->freq); 495 u16 freq = le16_to_cpu(hdr->freq);
496 size_t header_len = sizeof(*hdr);
497 u32 tsf32;
498
499 if (!(hdr->magic & cpu_to_le16(0x0001))) {
500 if (priv->filter_flags & FIF_FCSFAIL)
501 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
502 else
503 return 0;
504 }
357 505
358 rx_status.signal = hdr->rssi; 506 rx_status.signal = p54_rssi_to_dbm(dev, hdr->rssi);
507 rx_status.noise = priv->noise;
359 /* XX correct? */ 508 /* XX correct? */
360 rx_status.qual = (100 * hdr->rssi) / 127; 509 rx_status.qual = (100 * hdr->rssi) / 127;
361 rx_status.rate_idx = hdr->rate & 0xf; 510 rx_status.rate_idx = hdr->rate & 0xf;
362 rx_status.freq = freq; 511 rx_status.freq = freq;
363 rx_status.band = IEEE80211_BAND_2GHZ; 512 rx_status.band = IEEE80211_BAND_2GHZ;
364 rx_status.antenna = hdr->antenna; 513 rx_status.antenna = hdr->antenna;
365 rx_status.mactime = le64_to_cpu(hdr->timestamp); 514
515 tsf32 = le32_to_cpu(hdr->tsf32);
516 if (tsf32 < priv->tsf_low32)
517 priv->tsf_high32++;
518 rx_status.mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
519 priv->tsf_low32 = tsf32;
520
366 rx_status.flag |= RX_FLAG_TSFT; 521 rx_status.flag |= RX_FLAG_TSFT;
367 522
368 skb_pull(skb, sizeof(*hdr)); 523 if (hdr->magic & cpu_to_le16(0x4000))
524 header_len += hdr->align[0];
525
526 skb_pull(skb, header_len);
369 skb_trim(skb, le16_to_cpu(hdr->len)); 527 skb_trim(skb, le16_to_cpu(hdr->len));
370 528
371 ieee80211_rx_irqsafe(dev, skb, &rx_status); 529 ieee80211_rx_irqsafe(dev, skb, &rx_status);
530
531 return -1;
372} 532}
373 533
374static void inline p54_wake_free_queues(struct ieee80211_hw *dev) 534static void inline p54_wake_free_queues(struct ieee80211_hw *dev)
@@ -377,7 +537,7 @@ static void inline p54_wake_free_queues(struct ieee80211_hw *dev)
377 int i; 537 int i;
378 538
379 for (i = 0; i < dev->queues; i++) 539 for (i = 0; i < dev->queues; i++)
380 if (priv->tx_stats[i].len < priv->tx_stats[i].limit) 540 if (priv->tx_stats[i + 4].len < priv->tx_stats[i + 4].limit)
381 ieee80211_wake_queue(dev, i); 541 ieee80211_wake_queue(dev, i);
382} 542}
383 543
@@ -387,11 +547,13 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
387 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; 547 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
388 struct p54_frame_sent_hdr *payload = (struct p54_frame_sent_hdr *) hdr->data; 548 struct p54_frame_sent_hdr *payload = (struct p54_frame_sent_hdr *) hdr->data;
389 struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next; 549 struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next;
390 u32 addr = le32_to_cpu(hdr->req_id) - 0x70; 550 u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom;
391 struct memrecord *range = NULL; 551 struct memrecord *range = NULL;
392 u32 freed = 0; 552 u32 freed = 0;
393 u32 last_addr = priv->rx_start; 553 u32 last_addr = priv->rx_start;
554 unsigned long flags;
394 555
556 spin_lock_irqsave(&priv->tx_queue.lock, flags);
395 while (entry != (struct sk_buff *)&priv->tx_queue) { 557 while (entry != (struct sk_buff *)&priv->tx_queue) {
396 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 558 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
397 range = (void *)info->driver_data; 559 range = (void *)info->driver_data;
@@ -412,13 +574,15 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
412 574
413 last_addr = range->end_addr; 575 last_addr = range->end_addr;
414 __skb_unlink(entry, &priv->tx_queue); 576 __skb_unlink(entry, &priv->tx_queue);
577 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
578
415 memset(&info->status, 0, sizeof(info->status)); 579 memset(&info->status, 0, sizeof(info->status));
416 entry_hdr = (struct p54_control_hdr *) entry->data; 580 entry_hdr = (struct p54_control_hdr *) entry->data;
417 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; 581 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data;
418 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) 582 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0)
419 pad = entry_data->align[0]; 583 pad = entry_data->align[0];
420 584
421 priv->tx_stats[entry_data->hw_queue - 4].len--; 585 priv->tx_stats[entry_data->hw_queue].len--;
422 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 586 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
423 if (!(payload->status & 0x01)) 587 if (!(payload->status & 0x01))
424 info->flags |= IEEE80211_TX_STAT_ACK; 588 info->flags |= IEEE80211_TX_STAT_ACK;
@@ -426,21 +590,60 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
426 info->status.excessive_retries = 1; 590 info->status.excessive_retries = 1;
427 } 591 }
428 info->status.retry_count = payload->retries - 1; 592 info->status.retry_count = payload->retries - 1;
429 info->status.ack_signal = le16_to_cpu(payload->ack_rssi); 593 info->status.ack_signal = p54_rssi_to_dbm(dev,
594 le16_to_cpu(payload->ack_rssi));
430 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); 595 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
431 ieee80211_tx_status_irqsafe(dev, entry); 596 ieee80211_tx_status_irqsafe(dev, entry);
432 break; 597 goto out;
433 } else 598 } else
434 last_addr = range->end_addr; 599 last_addr = range->end_addr;
435 entry = entry->next; 600 entry = entry->next;
436 } 601 }
602 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
437 603
604out:
438 if (freed >= IEEE80211_MAX_RTS_THRESHOLD + 0x170 + 605 if (freed >= IEEE80211_MAX_RTS_THRESHOLD + 0x170 +
439 sizeof(struct p54_control_hdr)) 606 sizeof(struct p54_control_hdr))
440 p54_wake_free_queues(dev); 607 p54_wake_free_queues(dev);
441} 608}
442 609
443static void p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb) 610static void p54_rx_eeprom_readback(struct ieee80211_hw *dev,
611 struct sk_buff *skb)
612{
613 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
614 struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data;
615 struct p54_common *priv = dev->priv;
616
617 if (!priv->eeprom)
618 return ;
619
620 memcpy(priv->eeprom, eeprom->data, le16_to_cpu(eeprom->len));
621
622 complete(&priv->eeprom_comp);
623}
624
625static void p54_rx_stats(struct ieee80211_hw *dev, struct sk_buff *skb)
626{
627 struct p54_common *priv = dev->priv;
628 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
629 struct p54_statistics *stats = (struct p54_statistics *) hdr->data;
630 u32 tsf32 = le32_to_cpu(stats->tsf32);
631
632 if (tsf32 < priv->tsf_low32)
633 priv->tsf_high32++;
634 priv->tsf_low32 = tsf32;
635
636 priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail);
637 priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success);
638 priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs);
639
640 priv->noise = p54_rssi_to_dbm(dev, le32_to_cpu(stats->noise));
641 complete(&priv->stats_comp);
642
643 mod_timer(&priv->stats_timer, jiffies + 5 * HZ);
644}
645
646static int p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb)
444{ 647{
445 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; 648 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data;
446 649
@@ -450,36 +653,30 @@ static void p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb)
450 break; 653 break;
451 case P54_CONTROL_TYPE_BBP: 654 case P54_CONTROL_TYPE_BBP:
452 break; 655 break;
656 case P54_CONTROL_TYPE_STAT_READBACK:
657 p54_rx_stats(dev, skb);
658 break;
659 case P54_CONTROL_TYPE_EEPROM_READBACK:
660 p54_rx_eeprom_readback(dev, skb);
661 break;
453 default: 662 default:
454 printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n", 663 printk(KERN_DEBUG "%s: not handling 0x%02x type control frame\n",
455 wiphy_name(dev->wiphy), le16_to_cpu(hdr->type)); 664 wiphy_name(dev->wiphy), le16_to_cpu(hdr->type));
456 break; 665 break;
457 } 666 }
667
668 return 0;
458} 669}
459 670
460/* returns zero if skb can be reused */ 671/* returns zero if skb can be reused */
461int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) 672int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
462{ 673{
463 u8 type = le16_to_cpu(*((__le16 *)skb->data)) >> 8; 674 u8 type = le16_to_cpu(*((__le16 *)skb->data)) >> 8;
464 switch (type) { 675
465 case 0x00: 676 if (type == 0x80)
466 case 0x01: 677 return p54_rx_control(dev, skb);
467 p54_rx_data(dev, skb); 678 else
468 return -1; 679 return p54_rx_data(dev, skb);
469 case 0x4d:
470 /* TODO: do something better... but then again, I've never seen this happen */
471 printk(KERN_ERR "%s: Received fault. Probably need to restart hardware now..\n",
472 wiphy_name(dev->wiphy));
473 break;
474 case 0x80:
475 p54_rx_control(dev, skb);
476 break;
477 default:
478 printk(KERN_ERR "%s: unknown frame RXed (0x%02x)\n",
479 wiphy_name(dev->wiphy), type);
480 break;
481 }
482 return 0;
483} 680}
484EXPORT_SYMBOL_GPL(p54_rx); 681EXPORT_SYMBOL_GPL(p54_rx);
485 682
@@ -503,7 +700,7 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
503 u32 target_addr = priv->rx_start; 700 u32 target_addr = priv->rx_start;
504 unsigned long flags; 701 unsigned long flags;
505 unsigned int left; 702 unsigned int left;
506 len = (len + 0x170 + 3) & ~0x3; /* 0x70 headroom, 0x100 tailroom */ 703 len = (len + priv->headroom + priv->tailroom + 3) & ~0x3;
507 704
508 spin_lock_irqsave(&priv->tx_queue.lock, flags); 705 spin_lock_irqsave(&priv->tx_queue.lock, flags);
509 left = skb_queue_len(&priv->tx_queue); 706 left = skb_queue_len(&priv->tx_queue);
@@ -538,14 +735,74 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
538 range->start_addr = target_addr; 735 range->start_addr = target_addr;
539 range->end_addr = target_addr + len; 736 range->end_addr = target_addr + len;
540 __skb_queue_after(&priv->tx_queue, target_skb, skb); 737 __skb_queue_after(&priv->tx_queue, target_skb, skb);
541 if (largest_hole < IEEE80211_MAX_RTS_THRESHOLD + 0x170 + 738 if (largest_hole < priv->rx_mtu + priv->headroom +
739 priv->tailroom +
542 sizeof(struct p54_control_hdr)) 740 sizeof(struct p54_control_hdr))
543 ieee80211_stop_queues(dev); 741 ieee80211_stop_queues(dev);
544 } 742 }
545 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 743 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
546 744
547 data->req_id = cpu_to_le32(target_addr + 0x70); 745 data->req_id = cpu_to_le32(target_addr + priv->headroom);
746}
747
748int p54_read_eeprom(struct ieee80211_hw *dev)
749{
750 struct p54_common *priv = dev->priv;
751 struct p54_control_hdr *hdr = NULL;
752 struct p54_eeprom_lm86 *eeprom_hdr;
753 size_t eeprom_size = 0x2020, offset = 0, blocksize;
754 int ret = -ENOMEM;
755 void *eeprom = NULL;
756
757 hdr = (struct p54_control_hdr *)kzalloc(sizeof(*hdr) +
758 sizeof(*eeprom_hdr) + EEPROM_READBACK_LEN, GFP_KERNEL);
759 if (!hdr)
760 goto free;
761
762 priv->eeprom = kzalloc(EEPROM_READBACK_LEN, GFP_KERNEL);
763 if (!priv->eeprom)
764 goto free;
765
766 eeprom = kzalloc(eeprom_size, GFP_KERNEL);
767 if (!eeprom)
768 goto free;
769
770 hdr->magic1 = cpu_to_le16(0x8000);
771 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_EEPROM_READBACK);
772 hdr->retry1 = hdr->retry2 = 0;
773 eeprom_hdr = (struct p54_eeprom_lm86 *) hdr->data;
774
775 while (eeprom_size) {
776 blocksize = min(eeprom_size, (size_t)EEPROM_READBACK_LEN);
777 hdr->len = cpu_to_le16(blocksize + sizeof(*eeprom_hdr));
778 eeprom_hdr->offset = cpu_to_le16(offset);
779 eeprom_hdr->len = cpu_to_le16(blocksize);
780 p54_assign_address(dev, NULL, hdr, le16_to_cpu(hdr->len) +
781 sizeof(*hdr));
782 priv->tx(dev, hdr, le16_to_cpu(hdr->len) + sizeof(*hdr), 0);
783
784 if (!wait_for_completion_interruptible_timeout(&priv->eeprom_comp, HZ)) {
785 printk(KERN_ERR "%s: device does not respond!\n",
786 wiphy_name(dev->wiphy));
787 ret = -EBUSY;
788 goto free;
789 }
790
791 memcpy(eeprom + offset, priv->eeprom, blocksize);
792 offset += blocksize;
793 eeprom_size -= blocksize;
794 }
795
796 ret = p54_parse_eeprom(dev, eeprom, offset);
797free:
798 kfree(priv->eeprom);
799 priv->eeprom = NULL;
800 kfree(hdr);
801 kfree(eeprom);
802
803 return ret;
548} 804}
805EXPORT_SYMBOL_GPL(p54_read_eeprom);
549 806
550static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 807static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
551{ 808{
@@ -559,7 +816,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
559 u8 rate; 816 u8 rate;
560 u8 cts_rate = 0x20; 817 u8 cts_rate = 0x20;
561 818
562 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb)]; 819 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb) + 4];
563 if (unlikely(current_queue->len > current_queue->limit)) 820 if (unlikely(current_queue->len > current_queue->limit))
564 return NETDEV_TX_BUSY; 821 return NETDEV_TX_BUSY;
565 current_queue->len++; 822 current_queue->len++;
@@ -601,7 +858,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
601 txhdr->hw_queue = skb_get_queue_mapping(skb) + 4; 858 txhdr->hw_queue = skb_get_queue_mapping(skb) + 4;
602 txhdr->tx_antenna = (info->antenna_sel_tx == 0) ? 859 txhdr->tx_antenna = (info->antenna_sel_tx == 0) ?
603 2 : info->antenna_sel_tx - 1; 860 2 : info->antenna_sel_tx - 1;
604 txhdr->output_power = 0x7f; // HW Maximum 861 txhdr->output_power = priv->output_power;
605 txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 862 txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
606 0 : cts_rate; 863 0 : cts_rate;
607 if (padding) 864 if (padding)
@@ -628,12 +885,12 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
628} 885}
629 886
630static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type, 887static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type,
631 const u8 *dst, const u8 *src, u8 antenna, 888 const u8 *bssid)
632 u32 magic3, u32 magic8, u32 magic9)
633{ 889{
634 struct p54_common *priv = dev->priv; 890 struct p54_common *priv = dev->priv;
635 struct p54_control_hdr *hdr; 891 struct p54_control_hdr *hdr;
636 struct p54_tx_control_filter *filter; 892 struct p54_tx_control_filter *filter;
893 size_t data_len;
637 894
638 hdr = kzalloc(sizeof(*hdr) + sizeof(*filter) + 895 hdr = kzalloc(sizeof(*hdr) + sizeof(*filter) +
639 priv->tx_hdr_len, GFP_ATOMIC); 896 priv->tx_hdr_len, GFP_ATOMIC);
@@ -644,25 +901,35 @@ static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type,
644 901
645 filter = (struct p54_tx_control_filter *) hdr->data; 902 filter = (struct p54_tx_control_filter *) hdr->data;
646 hdr->magic1 = cpu_to_le16(0x8001); 903 hdr->magic1 = cpu_to_le16(0x8001);
647 hdr->len = cpu_to_le16(sizeof(*filter));
648 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*filter));
649 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET); 904 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET);
650 905
651 filter->filter_type = cpu_to_le16(filter_type); 906 priv->filter_type = filter->filter_type = cpu_to_le16(filter_type);
652 memcpy(filter->dst, dst, ETH_ALEN); 907 memcpy(filter->mac_addr, priv->mac_addr, ETH_ALEN);
653 if (!src) 908 if (!bssid)
654 memset(filter->src, ~0, ETH_ALEN); 909 memset(filter->bssid, ~0, ETH_ALEN);
655 else 910 else
656 memcpy(filter->src, src, ETH_ALEN); 911 memcpy(filter->bssid, bssid, ETH_ALEN);
657 filter->antenna = antenna; 912
658 filter->magic3 = cpu_to_le32(magic3); 913 filter->rx_antenna = priv->rx_antenna;
659 filter->rx_addr = cpu_to_le32(priv->rx_end); 914
660 filter->max_rx = cpu_to_le16(0x0620); /* FIXME: for usb ver 1.. maybe */ 915 if (priv->fw_var < 0x500) {
661 filter->rxhw = priv->rxhw; 916 data_len = P54_TX_CONTROL_FILTER_V1_LEN;
662 filter->magic8 = cpu_to_le16(magic8); 917 filter->v1.basic_rate_mask = cpu_to_le32(0x15F);
663 filter->magic9 = cpu_to_le16(magic9); 918 filter->v1.rx_addr = cpu_to_le32(priv->rx_end);
664 919 filter->v1.max_rx = cpu_to_le16(priv->rx_mtu);
665 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*filter), 1); 920 filter->v1.rxhw = cpu_to_le16(priv->rxhw);
921 filter->v1.wakeup_timer = cpu_to_le16(500);
922 } else {
923 data_len = P54_TX_CONTROL_FILTER_V2_LEN;
924 filter->v2.rx_addr = cpu_to_le32(priv->rx_end);
925 filter->v2.max_rx = cpu_to_le16(priv->rx_mtu);
926 filter->v2.rxhw = cpu_to_le16(priv->rxhw);
927 filter->v2.timer = cpu_to_le16(1000);
928 }
929
930 hdr->len = cpu_to_le16(data_len);
931 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + data_len);
932 priv->tx(dev, hdr, sizeof(*hdr) + data_len, 1);
666 return 0; 933 return 0;
667} 934}
668 935
@@ -672,12 +939,10 @@ static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq)
672 struct p54_control_hdr *hdr; 939 struct p54_control_hdr *hdr;
673 struct p54_tx_control_channel *chan; 940 struct p54_tx_control_channel *chan;
674 unsigned int i; 941 unsigned int i;
675 size_t payload_len = sizeof(*chan) + sizeof(u32)*2 + 942 size_t data_len;
676 sizeof(*chan->curve_data) *
677 priv->curve_data->points_per_channel;
678 void *entry; 943 void *entry;
679 944
680 hdr = kzalloc(sizeof(*hdr) + payload_len + 945 hdr = kzalloc(sizeof(*hdr) + sizeof(*chan) +
681 priv->tx_hdr_len, GFP_KERNEL); 946 priv->tx_hdr_len, GFP_KERNEL);
682 if (!hdr) 947 if (!hdr)
683 return -ENOMEM; 948 return -ENOMEM;
@@ -687,12 +952,11 @@ static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq)
687 chan = (struct p54_tx_control_channel *) hdr->data; 952 chan = (struct p54_tx_control_channel *) hdr->data;
688 953
689 hdr->magic1 = cpu_to_le16(0x8001); 954 hdr->magic1 = cpu_to_le16(0x8001);
690 hdr->len = cpu_to_le16(sizeof(*chan)); 955
691 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE); 956 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE);
692 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + payload_len);
693 957
694 chan->magic1 = cpu_to_le16(0x1); 958 chan->flags = cpu_to_le16(0x1);
695 chan->magic2 = cpu_to_le16(0x0); 959 chan->dwell = cpu_to_le16(0x0);
696 960
697 for (i = 0; i < priv->iq_autocal_len; i++) { 961 for (i = 0; i < priv->iq_autocal_len; i++) {
698 if (priv->iq_autocal[i].freq != freq) 962 if (priv->iq_autocal[i].freq != freq)
@@ -710,35 +974,51 @@ static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq)
710 continue; 974 continue;
711 975
712 chan->val_barker = 0x38; 976 chan->val_barker = 0x38;
713 chan->val_bpsk = priv->output_limit[i].val_bpsk; 977 chan->val_bpsk = chan->dup_bpsk =
714 chan->val_qpsk = priv->output_limit[i].val_qpsk; 978 priv->output_limit[i].val_bpsk;
715 chan->val_16qam = priv->output_limit[i].val_16qam; 979 chan->val_qpsk = chan->dup_qpsk =
716 chan->val_64qam = priv->output_limit[i].val_64qam; 980 priv->output_limit[i].val_qpsk;
981 chan->val_16qam = chan->dup_16qam =
982 priv->output_limit[i].val_16qam;
983 chan->val_64qam = chan->dup_64qam =
984 priv->output_limit[i].val_64qam;
717 break; 985 break;
718 } 986 }
719 if (i == priv->output_limit_len) 987 if (i == priv->output_limit_len)
720 goto err; 988 goto err;
721 989
722 chan->pa_points_per_curve = priv->curve_data->points_per_channel;
723
724 entry = priv->curve_data->data; 990 entry = priv->curve_data->data;
725 for (i = 0; i < priv->curve_data->channels; i++) { 991 for (i = 0; i < priv->curve_data->channels; i++) {
726 if (*((__le16 *)entry) != freq) { 992 if (*((__le16 *)entry) != freq) {
727 entry += sizeof(__le16); 993 entry += sizeof(__le16);
728 entry += sizeof(struct pda_pa_curve_data_sample_rev1) * 994 entry += sizeof(struct p54_pa_curve_data_sample) *
729 chan->pa_points_per_curve; 995 priv->curve_data->points_per_channel;
730 continue; 996 continue;
731 } 997 }
732 998
733 entry += sizeof(__le16); 999 entry += sizeof(__le16);
1000 chan->pa_points_per_curve =
1001 min(priv->curve_data->points_per_channel, (u8) 8);
1002
734 memcpy(chan->curve_data, entry, sizeof(*chan->curve_data) * 1003 memcpy(chan->curve_data, entry, sizeof(*chan->curve_data) *
735 chan->pa_points_per_curve); 1004 chan->pa_points_per_curve);
736 break; 1005 break;
737 } 1006 }
738 1007
739 memcpy(hdr->data + payload_len - 4, &chan->val_bpsk, 4); 1008 if (priv->fw_var < 0x500) {
1009 data_len = P54_TX_CONTROL_CHANNEL_V1_LEN;
1010 chan->v1.rssical_mul = cpu_to_le16(130);
1011 chan->v1.rssical_add = cpu_to_le16(0xfe70);
1012 } else {
1013 data_len = P54_TX_CONTROL_CHANNEL_V2_LEN;
1014 chan->v2.rssical_mul = cpu_to_le16(130);
1015 chan->v2.rssical_add = cpu_to_le16(0xfe70);
1016 chan->v2.basic_rate_mask = cpu_to_le32(0x15f);
1017 }
740 1018
741 priv->tx(dev, hdr, sizeof(*hdr) + payload_len, 1); 1019 hdr->len = cpu_to_le16(data_len);
1020 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + data_len);
1021 priv->tx(dev, hdr, sizeof(*hdr) + data_len, 1);
742 return 0; 1022 return 0;
743 1023
744 err: 1024 err:
@@ -846,12 +1126,25 @@ static int p54_start(struct ieee80211_hw *dev)
846 return -ENOMEM; 1126 return -ENOMEM;
847 } 1127 }
848 1128
1129 if (!priv->cached_stats) {
1130 priv->cached_stats = kzalloc(sizeof(struct p54_statistics) +
1131 priv->tx_hdr_len + sizeof(struct p54_control_hdr),
1132 GFP_KERNEL);
1133
1134 if (!priv->cached_stats) {
1135 kfree(priv->cached_vdcf);
1136 priv->cached_vdcf = NULL;
1137 return -ENOMEM;
1138 }
1139 }
1140
849 err = priv->open(dev); 1141 err = priv->open(dev);
850 if (!err) 1142 if (!err)
851 priv->mode = IEEE80211_IF_TYPE_MNTR; 1143 priv->mode = NL80211_IFTYPE_MONITOR;
852 1144
853 p54_init_vdcf(dev); 1145 p54_init_vdcf(dev);
854 1146
1147 mod_timer(&priv->stats_timer, jiffies + HZ);
855 return err; 1148 return err;
856} 1149}
857 1150
@@ -859,10 +1152,13 @@ static void p54_stop(struct ieee80211_hw *dev)
859{ 1152{
860 struct p54_common *priv = dev->priv; 1153 struct p54_common *priv = dev->priv;
861 struct sk_buff *skb; 1154 struct sk_buff *skb;
1155
1156 del_timer(&priv->stats_timer);
862 while ((skb = skb_dequeue(&priv->tx_queue))) 1157 while ((skb = skb_dequeue(&priv->tx_queue)))
863 kfree_skb(skb); 1158 kfree_skb(skb);
864 priv->stop(dev); 1159 priv->stop(dev);
865 priv->mode = IEEE80211_IF_TYPE_INVALID; 1160 priv->tsf_high32 = priv->tsf_low32 = 0;
1161 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
866} 1162}
867 1163
868static int p54_add_interface(struct ieee80211_hw *dev, 1164static int p54_add_interface(struct ieee80211_hw *dev,
@@ -870,11 +1166,11 @@ static int p54_add_interface(struct ieee80211_hw *dev,
870{ 1166{
871 struct p54_common *priv = dev->priv; 1167 struct p54_common *priv = dev->priv;
872 1168
873 if (priv->mode != IEEE80211_IF_TYPE_MNTR) 1169 if (priv->mode != NL80211_IFTYPE_MONITOR)
874 return -EOPNOTSUPP; 1170 return -EOPNOTSUPP;
875 1171
876 switch (conf->type) { 1172 switch (conf->type) {
877 case IEEE80211_IF_TYPE_STA: 1173 case NL80211_IFTYPE_STATION:
878 priv->mode = conf->type; 1174 priv->mode = conf->type;
879 break; 1175 break;
880 default: 1176 default:
@@ -883,12 +1179,11 @@ static int p54_add_interface(struct ieee80211_hw *dev,
883 1179
884 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 1180 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
885 1181
886 p54_set_filter(dev, 0, priv->mac_addr, NULL, 0, 1, 0, 0xF642); 1182 p54_set_filter(dev, 0, NULL);
887 p54_set_filter(dev, 0, priv->mac_addr, NULL, 1, 0, 0, 0xF642);
888 1183
889 switch (conf->type) { 1184 switch (conf->type) {
890 case IEEE80211_IF_TYPE_STA: 1185 case NL80211_IFTYPE_STATION:
891 p54_set_filter(dev, 1, priv->mac_addr, NULL, 0, 0x15F, 0x1F4, 0); 1186 p54_set_filter(dev, 1, NULL);
892 break; 1187 break;
893 default: 1188 default:
894 BUG(); /* impossible */ 1189 BUG(); /* impossible */
@@ -904,9 +1199,9 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
904 struct ieee80211_if_init_conf *conf) 1199 struct ieee80211_if_init_conf *conf)
905{ 1200{
906 struct p54_common *priv = dev->priv; 1201 struct p54_common *priv = dev->priv;
907 priv->mode = IEEE80211_IF_TYPE_MNTR; 1202 priv->mode = NL80211_IFTYPE_MONITOR;
908 memset(priv->mac_addr, 0, ETH_ALEN); 1203 memset(priv->mac_addr, 0, ETH_ALEN);
909 p54_set_filter(dev, 0, priv->mac_addr, NULL, 2, 0, 0, 0); 1204 p54_set_filter(dev, 0, NULL);
910} 1205}
911 1206
912static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 1207static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
@@ -915,6 +1210,9 @@ static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
915 struct p54_common *priv = dev->priv; 1210 struct p54_common *priv = dev->priv;
916 1211
917 mutex_lock(&priv->conf_mutex); 1212 mutex_lock(&priv->conf_mutex);
1213 priv->rx_antenna = (conf->antenna_sel_rx == 0) ?
1214 2 : conf->antenna_sel_tx - 1;
1215 priv->output_power = conf->power_level << 2;
918 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq)); 1216 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq));
919 p54_set_vdcf(dev); 1217 p54_set_vdcf(dev);
920 mutex_unlock(&priv->conf_mutex); 1218 mutex_unlock(&priv->conf_mutex);
@@ -928,8 +1226,7 @@ static int p54_config_interface(struct ieee80211_hw *dev,
928 struct p54_common *priv = dev->priv; 1226 struct p54_common *priv = dev->priv;
929 1227
930 mutex_lock(&priv->conf_mutex); 1228 mutex_lock(&priv->conf_mutex);
931 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 0, 1, 0, 0xF642); 1229 p54_set_filter(dev, 0, conf->bssid);
932 p54_set_filter(dev, 0, priv->mac_addr, conf->bssid, 2, 0, 0, 0);
933 p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0); 1230 p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0);
934 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 1231 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
935 mutex_unlock(&priv->conf_mutex); 1232 mutex_unlock(&priv->conf_mutex);
@@ -943,15 +1240,28 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
943{ 1240{
944 struct p54_common *priv = dev->priv; 1241 struct p54_common *priv = dev->priv;
945 1242
946 *total_flags &= FIF_BCN_PRBRESP_PROMISC; 1243 *total_flags &= FIF_BCN_PRBRESP_PROMISC |
1244 FIF_PROMISC_IN_BSS |
1245 FIF_FCSFAIL;
1246
1247 priv->filter_flags = *total_flags;
947 1248
948 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 1249 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
949 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) 1250 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
950 p54_set_filter(dev, 0, priv->mac_addr, 1251 p54_set_filter(dev, le16_to_cpu(priv->filter_type),
951 NULL, 2, 0, 0, 0); 1252 NULL);
1253 else
1254 p54_set_filter(dev, le16_to_cpu(priv->filter_type),
1255 priv->bssid);
1256 }
1257
1258 if (changed_flags & FIF_PROMISC_IN_BSS) {
1259 if (*total_flags & FIF_PROMISC_IN_BSS)
1260 p54_set_filter(dev, le16_to_cpu(priv->filter_type) |
1261 0x8, NULL);
952 else 1262 else
953 p54_set_filter(dev, 0, priv->mac_addr, 1263 p54_set_filter(dev, le16_to_cpu(priv->filter_type) &
954 priv->bssid, 2, 0, 0, 0); 1264 ~0x8, priv->bssid);
955 } 1265 }
956} 1266}
957 1267
@@ -975,10 +1285,67 @@ static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue,
975 return 0; 1285 return 0;
976} 1286}
977 1287
1288static int p54_init_xbow_synth(struct ieee80211_hw *dev)
1289{
1290 struct p54_common *priv = dev->priv;
1291 struct p54_control_hdr *hdr;
1292 struct p54_tx_control_xbow_synth *xbow;
1293
1294 hdr = kzalloc(sizeof(*hdr) + sizeof(*xbow) +
1295 priv->tx_hdr_len, GFP_KERNEL);
1296 if (!hdr)
1297 return -ENOMEM;
1298
1299 hdr = (void *)hdr + priv->tx_hdr_len;
1300 hdr->magic1 = cpu_to_le16(0x8001);
1301 hdr->len = cpu_to_le16(sizeof(*xbow));
1302 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_XBOW_SYNTH_CFG);
1303 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*xbow));
1304
1305 xbow = (struct p54_tx_control_xbow_synth *) hdr->data;
1306 xbow->magic1 = cpu_to_le16(0x1);
1307 xbow->magic2 = cpu_to_le16(0x2);
1308 xbow->freq = cpu_to_le16(5390);
1309
1310 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*xbow), 1);
1311
1312 return 0;
1313}
1314
1315static void p54_statistics_timer(unsigned long data)
1316{
1317 struct ieee80211_hw *dev = (struct ieee80211_hw *) data;
1318 struct p54_common *priv = dev->priv;
1319 struct p54_control_hdr *hdr;
1320 struct p54_statistics *stats;
1321
1322 BUG_ON(!priv->cached_stats);
1323
1324 hdr = (void *)priv->cached_stats + priv->tx_hdr_len;
1325 hdr->magic1 = cpu_to_le16(0x8000);
1326 hdr->len = cpu_to_le16(sizeof(*stats));
1327 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_STAT_READBACK);
1328 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*stats));
1329
1330 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*stats), 0);
1331}
1332
978static int p54_get_stats(struct ieee80211_hw *dev, 1333static int p54_get_stats(struct ieee80211_hw *dev,
979 struct ieee80211_low_level_stats *stats) 1334 struct ieee80211_low_level_stats *stats)
980{ 1335{
981 /* TODO */ 1336 struct p54_common *priv = dev->priv;
1337
1338 del_timer(&priv->stats_timer);
1339 p54_statistics_timer((unsigned long)dev);
1340
1341 if (!wait_for_completion_interruptible_timeout(&priv->stats_comp, HZ)) {
1342 printk(KERN_ERR "%s: device does not respond!\n",
1343 wiphy_name(dev->wiphy));
1344 return -EBUSY;
1345 }
1346
1347 memcpy(stats, &priv->stats, sizeof(*stats));
1348
982 return 0; 1349 return 0;
983} 1350}
984 1351
@@ -987,7 +1354,7 @@ static int p54_get_tx_stats(struct ieee80211_hw *dev,
987{ 1354{
988 struct p54_common *priv = dev->priv; 1355 struct p54_common *priv = dev->priv;
989 1356
990 memcpy(stats, &priv->tx_stats, sizeof(stats[0]) * dev->queues); 1357 memcpy(stats, &priv->tx_stats[4], sizeof(stats[0]) * dev->queues);
991 1358
992 return 0; 1359 return 0;
993} 1360}
@@ -1016,22 +1383,32 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
1016 return NULL; 1383 return NULL;
1017 1384
1018 priv = dev->priv; 1385 priv = dev->priv;
1019 priv->mode = IEEE80211_IF_TYPE_INVALID; 1386 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1020 skb_queue_head_init(&priv->tx_queue); 1387 skb_queue_head_init(&priv->tx_queue);
1021 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
1022 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */ 1388 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */
1023 IEEE80211_HW_RX_INCLUDES_FCS | 1389 IEEE80211_HW_RX_INCLUDES_FCS |
1024 IEEE80211_HW_SIGNAL_UNSPEC; 1390 IEEE80211_HW_SIGNAL_DBM |
1391 IEEE80211_HW_NOISE_DBM;
1392
1393 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1394
1025 dev->channel_change_time = 1000; /* TODO: find actual value */ 1395 dev->channel_change_time = 1000; /* TODO: find actual value */
1026 dev->max_signal = 127;
1027 1396
1028 priv->tx_stats[0].limit = 5; 1397 priv->tx_stats[0].limit = 1;
1398 priv->tx_stats[1].limit = 1;
1399 priv->tx_stats[2].limit = 1;
1400 priv->tx_stats[3].limit = 1;
1401 priv->tx_stats[4].limit = 5;
1029 dev->queues = 1; 1402 dev->queues = 1;
1030 1403 priv->noise = -94;
1031 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + 1404 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 +
1032 sizeof(struct p54_tx_control_allocdata); 1405 sizeof(struct p54_tx_control_allocdata);
1033 1406
1034 mutex_init(&priv->conf_mutex); 1407 mutex_init(&priv->conf_mutex);
1408 init_completion(&priv->eeprom_comp);
1409 init_completion(&priv->stats_comp);
1410 setup_timer(&priv->stats_timer, p54_statistics_timer,
1411 (unsigned long)dev);
1035 1412
1036 return dev; 1413 return dev;
1037} 1414}
@@ -1040,6 +1417,7 @@ EXPORT_SYMBOL_GPL(p54_init_common);
1040void p54_free_common(struct ieee80211_hw *dev) 1417void p54_free_common(struct ieee80211_hw *dev)
1041{ 1418{
1042 struct p54_common *priv = dev->priv; 1419 struct p54_common *priv = dev->priv;
1420 kfree(priv->cached_stats);
1043 kfree(priv->iq_autocal); 1421 kfree(priv->iq_autocal);
1044 kfree(priv->output_limit); 1422 kfree(priv->output_limit);
1045 kfree(priv->curve_data); 1423 kfree(priv->curve_data);
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h
index 8db6c0e8e540..2fa994cfcfed 100644
--- a/drivers/net/wireless/p54/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -1,5 +1,5 @@
1#ifndef PRISM54COMMON_H 1#ifndef P54COMMON_H
2#define PRISM54COMMON_H 2#define P54COMMON_H
3 3
4/* 4/*
5 * Common code specific definitions for mac80211 Prism54 drivers 5 * Common code specific definitions for mac80211 Prism54 drivers
@@ -18,7 +18,8 @@
18struct bootrec { 18struct bootrec {
19 __le32 code; 19 __le32 code;
20 __le32 len; 20 __le32 len;
21 u32 data[0]; 21 u32 data[10];
22 __le16 rx_mtu;
22} __attribute__((packed)); 23} __attribute__((packed));
23 24
24struct bootrec_exp_if { 25struct bootrec_exp_if {
@@ -29,6 +30,17 @@ struct bootrec_exp_if {
29 __le16 top_compat; 30 __le16 top_compat;
30} __attribute__((packed)); 31} __attribute__((packed));
31 32
33struct bootrec_desc {
34 __le16 modes;
35 __le16 flags;
36 __le32 rx_start;
37 __le32 rx_end;
38 u8 headroom;
39 u8 tailroom;
40 u8 unimportant[6];
41 u8 rates[16];
42} __attribute__((packed));
43
32#define BR_CODE_MIN 0x80000000 44#define BR_CODE_MIN 0x80000000
33#define BR_CODE_COMPONENT_ID 0x80000001 45#define BR_CODE_COMPONENT_ID 0x80000001
34#define BR_CODE_COMPONENT_VERSION 0x80000002 46#define BR_CODE_COMPONENT_VERSION 0x80000002
@@ -39,11 +51,6 @@ struct bootrec_exp_if {
39#define BR_CODE_END_OF_BRA 0xFF0000FF 51#define BR_CODE_END_OF_BRA 0xFF0000FF
40#define LEGACY_BR_CODE_END_OF_BRA 0xFFFFFFFF 52#define LEGACY_BR_CODE_END_OF_BRA 0xFFFFFFFF
41 53
42#define FW_FMAC 0x464d4143
43#define FW_LM86 0x4c4d3836
44#define FW_LM87 0x4c4d3837
45#define FW_LM20 0x4c4d3230
46
47/* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */ 54/* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */
48 55
49struct pda_entry { 56struct pda_entry {
@@ -89,6 +96,16 @@ struct pda_pa_curve_data_sample_rev1 {
89 u8 data_qpsk; 96 u8 data_qpsk;
90 u8 data_16qam; 97 u8 data_16qam;
91 u8 data_64qam; 98 u8 data_64qam;
99} __attribute__ ((packed));
100
101struct p54_pa_curve_data_sample {
102 u8 rf_power;
103 u8 pa_detector;
104 u8 data_barker;
105 u8 data_bpsk;
106 u8 data_qpsk;
107 u8 data_16qam;
108 u8 data_64qam;
92 u8 padding; 109 u8 padding;
93} __attribute__ ((packed)); 110} __attribute__ ((packed));
94 111
@@ -169,8 +186,9 @@ struct p54_rx_hdr {
169 u8 rssi; 186 u8 rssi;
170 u8 quality; 187 u8 quality;
171 u16 unknown2; 188 u16 unknown2;
172 __le64 timestamp; 189 __le32 tsf32;
173 u8 data[0]; 190 __le32 unalloc0;
191 u8 align[0];
174} __attribute__ ((packed)); 192} __attribute__ ((packed));
175 193
176struct p54_frame_sent_hdr { 194struct p54_frame_sent_hdr {
@@ -198,22 +216,37 @@ struct p54_tx_control_allocdata {
198 216
199struct p54_tx_control_filter { 217struct p54_tx_control_filter {
200 __le16 filter_type; 218 __le16 filter_type;
201 u8 dst[ETH_ALEN]; 219 u8 mac_addr[ETH_ALEN];
202 u8 src[ETH_ALEN]; 220 u8 bssid[ETH_ALEN];
203 u8 antenna; 221 u8 rx_antenna;
204 u8 debug; 222 u8 rx_align;
205 __le32 magic3; 223 union {
206 u8 rates[8]; // FIXME: what's this for? 224 struct {
207 __le32 rx_addr; 225 __le32 basic_rate_mask;
208 __le16 max_rx; 226 u8 rts_rates[8];
209 __le16 rxhw; 227 __le32 rx_addr;
210 __le16 magic8; 228 __le16 max_rx;
211 __le16 magic9; 229 __le16 rxhw;
230 __le16 wakeup_timer;
231 __le16 unalloc0;
232 } v1 __attribute__ ((packed));
233 struct {
234 __le32 rx_addr;
235 __le16 max_rx;
236 __le16 rxhw;
237 __le16 timer;
238 __le16 unalloc0;
239 __le32 unalloc1;
240 } v2 __attribute__ ((packed));
241 } __attribute__ ((packed));
212} __attribute__ ((packed)); 242} __attribute__ ((packed));
213 243
244#define P54_TX_CONTROL_FILTER_V1_LEN (sizeof(struct p54_tx_control_filter))
245#define P54_TX_CONTROL_FILTER_V2_LEN (sizeof(struct p54_tx_control_filter)-8)
246
214struct p54_tx_control_channel { 247struct p54_tx_control_channel {
215 __le16 magic1; 248 __le16 flags;
216 __le16 magic2; 249 __le16 dwell;
217 u8 padding1[20]; 250 u8 padding1[20];
218 struct pda_iq_autocal_entry iq_autocal; 251 struct pda_iq_autocal_entry iq_autocal;
219 u8 pa_points_per_curve; 252 u8 pa_points_per_curve;
@@ -222,10 +255,29 @@ struct p54_tx_control_channel {
222 u8 val_qpsk; 255 u8 val_qpsk;
223 u8 val_16qam; 256 u8 val_16qam;
224 u8 val_64qam; 257 u8 val_64qam;
225 struct pda_pa_curve_data_sample_rev1 curve_data[0]; 258 struct p54_pa_curve_data_sample curve_data[8];
226 /* additional padding/data after curve_data */ 259 u8 dup_bpsk;
260 u8 dup_qpsk;
261 u8 dup_16qam;
262 u8 dup_64qam;
263 union {
264 struct {
265 __le16 rssical_mul;
266 __le16 rssical_add;
267 } v1 __attribute__ ((packed));
268
269 struct {
270 __le32 basic_rate_mask;
271 u8 rts_rates[8];
272 __le16 rssical_mul;
273 __le16 rssical_add;
274 } v2 __attribute__ ((packed));
275 } __attribute__ ((packed));
227} __attribute__ ((packed)); 276} __attribute__ ((packed));
228 277
278#define P54_TX_CONTROL_CHANNEL_V1_LEN (sizeof(struct p54_tx_control_channel)-12)
279#define P54_TX_CONTROL_CHANNEL_V2_LEN (sizeof(struct p54_tx_control_channel))
280
229struct p54_tx_control_led { 281struct p54_tx_control_led {
230 __le16 mode; 282 __le16 mode;
231 __le16 led_temporary; 283 __le16 led_temporary;
@@ -250,4 +302,24 @@ struct p54_tx_control_vdcf {
250 __le16 frameburst; 302 __le16 frameburst;
251} __attribute__ ((packed)); 303} __attribute__ ((packed));
252 304
253#endif /* PRISM54COMMON_H */ 305struct p54_statistics {
306 __le32 rx_success;
307 __le32 rx_bad_fcs;
308 __le32 rx_abort;
309 __le32 rx_abort_phy;
310 __le32 rts_success;
311 __le32 rts_fail;
312 __le32 tsf32;
313 __le32 airtime;
314 __le32 noise;
315 __le32 unkn[10]; /* CCE / CCA / RADAR */
316} __attribute__ ((packed));
317
318struct p54_tx_control_xbow_synth {
319 __le16 magic1;
320 __le16 magic2;
321 __le16 freq;
322 u32 padding[5];
323} __attribute__ ((packed));
324
325#endif /* P54COMMON_H */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 7dd4add4bf4e..1c2a02a741af 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -3,6 +3,7 @@
3 * Linux device driver for PCI based Prism54 3 * Linux device driver for PCI based Prism54
4 * 4 *
5 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> 5 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
6 * Copyright (c) 2008, Christian Lamparter <chunkeey@web.de>
6 * 7 *
7 * Based on the islsm (softmac prism54) driver, which is: 8 * Based on the islsm (softmac prism54) driver, which is:
8 * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al. 9 * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al.
@@ -71,16 +72,18 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
71 P54P_WRITE(ctrl_stat, reg); 72 P54P_WRITE(ctrl_stat, reg);
72 wmb(); 73 wmb();
73 74
74 mdelay(50);
75
76 err = request_firmware(&fw_entry, "isl3886", &priv->pdev->dev); 75 err = request_firmware(&fw_entry, "isl3886", &priv->pdev->dev);
77 if (err) { 76 if (err) {
78 printk(KERN_ERR "%s (prism54pci): cannot find firmware " 77 printk(KERN_ERR "%s (p54pci): cannot find firmware "
79 "(isl3886)\n", pci_name(priv->pdev)); 78 "(isl3886)\n", pci_name(priv->pdev));
80 return err; 79 return err;
81 } 80 }
82 81
83 p54_parse_firmware(dev, fw_entry); 82 err = p54_parse_firmware(dev, fw_entry);
83 if (err) {
84 release_firmware(fw_entry);
85 return err;
86 }
84 87
85 data = (__le32 *) fw_entry->data; 88 data = (__le32 *) fw_entry->data;
86 remains = fw_entry->size; 89 remains = fw_entry->size;
@@ -121,162 +124,147 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
121 wmb(); 124 wmb();
122 udelay(10); 125 udelay(10);
123 126
127 /* wait for the firmware to boot properly */
128 mdelay(100);
129
124 return 0; 130 return 0;
125} 131}
126 132
127static irqreturn_t p54p_simple_interrupt(int irq, void *dev_id) 133static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
134 int ring_index, struct p54p_desc *ring, u32 ring_limit,
135 struct sk_buff **rx_buf)
128{ 136{
129 struct p54p_priv *priv = (struct p54p_priv *) dev_id; 137 struct p54p_priv *priv = dev->priv;
130 __le32 reg; 138 struct p54p_ring_control *ring_control = priv->ring_control;
131 139 u32 limit, idx, i;
132 reg = P54P_READ(int_ident);
133 P54P_WRITE(int_ack, reg);
134 140
135 if (reg & P54P_READ(int_enable)) 141 idx = le32_to_cpu(ring_control->host_idx[ring_index]);
136 complete(&priv->boot_comp); 142 limit = idx;
143 limit -= le32_to_cpu(ring_control->device_idx[ring_index]);
144 limit = ring_limit - limit;
137 145
138 return IRQ_HANDLED; 146 i = idx % ring_limit;
139} 147 while (limit-- > 1) {
148 struct p54p_desc *desc = &ring[i];
140 149
141static int p54p_read_eeprom(struct ieee80211_hw *dev) 150 if (!desc->host_addr) {
142{ 151 struct sk_buff *skb;
143 struct p54p_priv *priv = dev->priv; 152 dma_addr_t mapping;
144 struct p54p_ring_control *ring_control = priv->ring_control; 153 skb = dev_alloc_skb(priv->common.rx_mtu + 32);
145 int err; 154 if (!skb)
146 struct p54_control_hdr *hdr; 155 break;
147 void *eeprom;
148 dma_addr_t rx_mapping, tx_mapping;
149 u16 alen;
150 156
151 init_completion(&priv->boot_comp); 157 mapping = pci_map_single(priv->pdev,
152 err = request_irq(priv->pdev->irq, &p54p_simple_interrupt, 158 skb_tail_pointer(skb),
153 IRQF_SHARED, "prism54pci", priv); 159 priv->common.rx_mtu + 32,
154 if (err) { 160 PCI_DMA_FROMDEVICE);
155 printk(KERN_ERR "%s (prism54pci): failed to register IRQ handler\n", 161 desc->host_addr = cpu_to_le32(mapping);
156 pci_name(priv->pdev)); 162 desc->device_addr = 0; // FIXME: necessary?
157 return err; 163 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
158 } 164 desc->flags = 0;
165 rx_buf[i] = skb;
166 }
159 167
160 eeprom = kmalloc(0x2010 + EEPROM_READBACK_LEN, GFP_KERNEL); 168 i++;
161 if (!eeprom) { 169 idx++;
162 printk(KERN_ERR "%s (prism54pci): no memory for eeprom!\n", 170 i %= ring_limit;
163 pci_name(priv->pdev));
164 err = -ENOMEM;
165 goto out;
166 } 171 }
167 172
168 memset(ring_control, 0, sizeof(*ring_control)); 173 wmb();
169 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); 174 ring_control->host_idx[ring_index] = cpu_to_le32(idx);
170 P54P_READ(ring_control_base); 175}
171 udelay(10);
172
173 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
174 P54P_READ(int_enable);
175 udelay(10);
176 176
177 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); 177static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
178 int ring_index, struct p54p_desc *ring, u32 ring_limit,
179 struct sk_buff **rx_buf)
180{
181 struct p54p_priv *priv = dev->priv;
182 struct p54p_ring_control *ring_control = priv->ring_control;
183 struct p54p_desc *desc;
184 u32 idx, i;
185
186 i = (*index) % ring_limit;
187 (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
188 idx %= ring_limit;
189 while (i != idx) {
190 u16 len;
191 struct sk_buff *skb;
192 desc = &ring[i];
193 len = le16_to_cpu(desc->len);
194 skb = rx_buf[i];
195
196 if (!skb) {
197 i++;
198 i %= ring_limit;
199 continue;
200 }
201 skb_put(skb, len);
202
203 if (p54_rx(dev, skb)) {
204 pci_unmap_single(priv->pdev,
205 le32_to_cpu(desc->host_addr),
206 priv->common.rx_mtu + 32,
207 PCI_DMA_FROMDEVICE);
208 rx_buf[i] = NULL;
209 desc->host_addr = 0;
210 } else {
211 skb_trim(skb, 0);
212 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
213 }
178 214
179 if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) { 215 i++;
180 printk(KERN_ERR "%s (prism54pci): Cannot boot firmware!\n", 216 i %= ring_limit;
181 pci_name(priv->pdev));
182 err = -EINVAL;
183 goto out;
184 } 217 }
185 218
186 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); 219 p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
187 P54P_READ(int_enable); 220}
188 221
189 hdr = eeprom + 0x2010; 222/* caller must hold priv->lock */
190 p54_fill_eeprom_readback(hdr); 223static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
191 hdr->req_id = cpu_to_le32(priv->common.rx_start); 224 int ring_index, struct p54p_desc *ring, u32 ring_limit,
225 void **tx_buf)
226{
227 struct p54p_priv *priv = dev->priv;
228 struct p54p_ring_control *ring_control = priv->ring_control;
229 struct p54p_desc *desc;
230 u32 idx, i;
192 231
193 rx_mapping = pci_map_single(priv->pdev, eeprom, 232 i = (*index) % ring_limit;
194 0x2010, PCI_DMA_FROMDEVICE); 233 (*index) = idx = le32_to_cpu(ring_control->device_idx[1]);
195 tx_mapping = pci_map_single(priv->pdev, (void *)hdr, 234 idx %= ring_limit;
196 EEPROM_READBACK_LEN, PCI_DMA_TODEVICE);
197 235
198 ring_control->rx_mgmt[0].host_addr = cpu_to_le32(rx_mapping); 236 while (i != idx) {
199 ring_control->rx_mgmt[0].len = cpu_to_le16(0x2010); 237 desc = &ring[i];
200 ring_control->tx_data[0].host_addr = cpu_to_le32(tx_mapping); 238 kfree(tx_buf[i]);
201 ring_control->tx_data[0].device_addr = hdr->req_id; 239 tx_buf[i] = NULL;
202 ring_control->tx_data[0].len = cpu_to_le16(EEPROM_READBACK_LEN);
203 240
204 ring_control->host_idx[2] = cpu_to_le32(1); 241 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
205 ring_control->host_idx[1] = cpu_to_le32(1); 242 le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
206 243
207 wmb(); 244 desc->host_addr = 0;
208 mdelay(100); 245 desc->device_addr = 0;
209 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); 246 desc->len = 0;
247 desc->flags = 0;
210 248
211 wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ); 249 i++;
212 wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ); 250 i %= ring_limit;
213
214 pci_unmap_single(priv->pdev, tx_mapping,
215 EEPROM_READBACK_LEN, PCI_DMA_TODEVICE);
216 pci_unmap_single(priv->pdev, rx_mapping,
217 0x2010, PCI_DMA_FROMDEVICE);
218
219 alen = le16_to_cpu(ring_control->rx_mgmt[0].len);
220 if (le32_to_cpu(ring_control->device_idx[2]) != 1 ||
221 alen < 0x10) {
222 printk(KERN_ERR "%s (prism54pci): Cannot read eeprom!\n",
223 pci_name(priv->pdev));
224 err = -EINVAL;
225 goto out;
226 } 251 }
227
228 p54_parse_eeprom(dev, (u8 *)eeprom + 0x10, alen - 0x10);
229
230 out:
231 kfree(eeprom);
232 P54P_WRITE(int_enable, cpu_to_le32(0));
233 P54P_READ(int_enable);
234 udelay(10);
235 free_irq(priv->pdev->irq, priv);
236 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
237 return err;
238} 252}
239 253
240static void p54p_refill_rx_ring(struct ieee80211_hw *dev) 254static void p54p_rx_tasklet(unsigned long dev_id)
241{ 255{
256 struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
242 struct p54p_priv *priv = dev->priv; 257 struct p54p_priv *priv = dev->priv;
243 struct p54p_ring_control *ring_control = priv->ring_control; 258 struct p54p_ring_control *ring_control = priv->ring_control;
244 u32 limit, host_idx, idx;
245 259
246 host_idx = le32_to_cpu(ring_control->host_idx[0]); 260 p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
247 limit = host_idx; 261 ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
248 limit -= le32_to_cpu(ring_control->device_idx[0]);
249 limit = ARRAY_SIZE(ring_control->rx_data) - limit;
250
251 idx = host_idx % ARRAY_SIZE(ring_control->rx_data);
252 while (limit-- > 1) {
253 struct p54p_desc *desc = &ring_control->rx_data[idx];
254
255 if (!desc->host_addr) {
256 struct sk_buff *skb;
257 dma_addr_t mapping;
258 skb = dev_alloc_skb(MAX_RX_SIZE);
259 if (!skb)
260 break;
261
262 mapping = pci_map_single(priv->pdev,
263 skb_tail_pointer(skb),
264 MAX_RX_SIZE,
265 PCI_DMA_FROMDEVICE);
266 desc->host_addr = cpu_to_le32(mapping);
267 desc->device_addr = 0; // FIXME: necessary?
268 desc->len = cpu_to_le16(MAX_RX_SIZE);
269 desc->flags = 0;
270 priv->rx_buf[idx] = skb;
271 }
272 262
273 idx++; 263 p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
274 host_idx++; 264 ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
275 idx %= ARRAY_SIZE(ring_control->rx_data);
276 }
277 265
278 wmb(); 266 wmb();
279 ring_control->host_idx[0] = cpu_to_le32(host_idx); 267 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
280} 268}
281 269
282static irqreturn_t p54p_interrupt(int irq, void *dev_id) 270static irqreturn_t p54p_interrupt(int irq, void *dev_id)
@@ -298,65 +286,18 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id)
298 reg &= P54P_READ(int_enable); 286 reg &= P54P_READ(int_enable);
299 287
300 if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) { 288 if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) {
301 struct p54p_desc *desc; 289 p54p_check_tx_ring(dev, &priv->tx_idx_mgmt,
302 u32 idx, i; 290 3, ring_control->tx_mgmt,
303 i = priv->tx_idx; 291 ARRAY_SIZE(ring_control->tx_mgmt),
304 i %= ARRAY_SIZE(ring_control->tx_data); 292 priv->tx_buf_mgmt);
305 priv->tx_idx = idx = le32_to_cpu(ring_control->device_idx[1]);
306 idx %= ARRAY_SIZE(ring_control->tx_data);
307
308 while (i != idx) {
309 desc = &ring_control->tx_data[i];
310 if (priv->tx_buf[i]) {
311 kfree(priv->tx_buf[i]);
312 priv->tx_buf[i] = NULL;
313 }
314
315 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
316 le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
317
318 desc->host_addr = 0;
319 desc->device_addr = 0;
320 desc->len = 0;
321 desc->flags = 0;
322
323 i++;
324 i %= ARRAY_SIZE(ring_control->tx_data);
325 }
326
327 i = priv->rx_idx;
328 i %= ARRAY_SIZE(ring_control->rx_data);
329 priv->rx_idx = idx = le32_to_cpu(ring_control->device_idx[0]);
330 idx %= ARRAY_SIZE(ring_control->rx_data);
331 while (i != idx) {
332 u16 len;
333 struct sk_buff *skb;
334 desc = &ring_control->rx_data[i];
335 len = le16_to_cpu(desc->len);
336 skb = priv->rx_buf[i];
337 293
338 skb_put(skb, len); 294 p54p_check_tx_ring(dev, &priv->tx_idx_data,
295 1, ring_control->tx_data,
296 ARRAY_SIZE(ring_control->tx_data),
297 priv->tx_buf_data);
339 298
340 if (p54_rx(dev, skb)) { 299 tasklet_schedule(&priv->rx_tasklet);
341 pci_unmap_single(priv->pdev,
342 le32_to_cpu(desc->host_addr),
343 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
344 300
345 priv->rx_buf[i] = NULL;
346 desc->host_addr = 0;
347 } else {
348 skb_trim(skb, 0);
349 desc->len = cpu_to_le16(MAX_RX_SIZE);
350 }
351
352 i++;
353 i %= ARRAY_SIZE(ring_control->rx_data);
354 }
355
356 p54p_refill_rx_ring(dev);
357
358 wmb();
359 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
360 } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT)) 301 } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
361 complete(&priv->boot_comp); 302 complete(&priv->boot_comp);
362 303
@@ -392,7 +333,7 @@ static void p54p_tx(struct ieee80211_hw *dev, struct p54_control_hdr *data,
392 ring_control->host_idx[1] = cpu_to_le32(idx + 1); 333 ring_control->host_idx[1] = cpu_to_le32(idx + 1);
393 334
394 if (free_on_tx) 335 if (free_on_tx)
395 priv->tx_buf[i] = data; 336 priv->tx_buf_data[i] = data;
396 337
397 spin_unlock_irqrestore(&priv->lock, flags); 338 spin_unlock_irqrestore(&priv->lock, flags);
398 339
@@ -412,7 +353,7 @@ static int p54p_open(struct ieee80211_hw *dev)
412 353
413 init_completion(&priv->boot_comp); 354 init_completion(&priv->boot_comp);
414 err = request_irq(priv->pdev->irq, &p54p_interrupt, 355 err = request_irq(priv->pdev->irq, &p54p_interrupt,
415 IRQF_SHARED, "prism54pci", dev); 356 IRQF_SHARED, "p54pci", dev);
416 if (err) { 357 if (err) {
417 printk(KERN_ERR "%s: failed to register IRQ handler\n", 358 printk(KERN_ERR "%s: failed to register IRQ handler\n",
418 wiphy_name(dev->wiphy)); 359 wiphy_name(dev->wiphy));
@@ -420,10 +361,19 @@ static int p54p_open(struct ieee80211_hw *dev)
420 } 361 }
421 362
422 memset(priv->ring_control, 0, sizeof(*priv->ring_control)); 363 memset(priv->ring_control, 0, sizeof(*priv->ring_control));
423 priv->rx_idx = priv->tx_idx = 0; 364 err = p54p_upload_firmware(dev);
424 p54p_refill_rx_ring(dev); 365 if (err) {
366 free_irq(priv->pdev->irq, dev);
367 return err;
368 }
369 priv->rx_idx_data = priv->tx_idx_data = 0;
370 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
371
372 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
373 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
425 374
426 p54p_upload_firmware(dev); 375 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
376 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
427 377
428 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); 378 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
429 P54P_READ(ring_control_base); 379 P54P_READ(ring_control_base);
@@ -465,6 +415,8 @@ static void p54p_stop(struct ieee80211_hw *dev)
465 unsigned int i; 415 unsigned int i;
466 struct p54p_desc *desc; 416 struct p54p_desc *desc;
467 417
418 tasklet_kill(&priv->rx_tasklet);
419
468 P54P_WRITE(int_enable, cpu_to_le32(0)); 420 P54P_WRITE(int_enable, cpu_to_le32(0));
469 P54P_READ(int_enable); 421 P54P_READ(int_enable);
470 udelay(10); 422 udelay(10);
@@ -473,26 +425,53 @@ static void p54p_stop(struct ieee80211_hw *dev)
473 425
474 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); 426 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
475 427
476 for (i = 0; i < ARRAY_SIZE(priv->rx_buf); i++) { 428 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
477 desc = &ring_control->rx_data[i]; 429 desc = &ring_control->rx_data[i];
478 if (desc->host_addr) 430 if (desc->host_addr)
479 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), 431 pci_unmap_single(priv->pdev,
480 MAX_RX_SIZE, PCI_DMA_FROMDEVICE); 432 le32_to_cpu(desc->host_addr),
481 kfree_skb(priv->rx_buf[i]); 433 priv->common.rx_mtu + 32,
482 priv->rx_buf[i] = NULL; 434 PCI_DMA_FROMDEVICE);
435 kfree_skb(priv->rx_buf_data[i]);
436 priv->rx_buf_data[i] = NULL;
437 }
438
439 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
440 desc = &ring_control->rx_mgmt[i];
441 if (desc->host_addr)
442 pci_unmap_single(priv->pdev,
443 le32_to_cpu(desc->host_addr),
444 priv->common.rx_mtu + 32,
445 PCI_DMA_FROMDEVICE);
446 kfree_skb(priv->rx_buf_mgmt[i]);
447 priv->rx_buf_mgmt[i] = NULL;
483 } 448 }
484 449
485 for (i = 0; i < ARRAY_SIZE(priv->tx_buf); i++) { 450 for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
486 desc = &ring_control->tx_data[i]; 451 desc = &ring_control->tx_data[i];
487 if (desc->host_addr) 452 if (desc->host_addr)
488 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), 453 pci_unmap_single(priv->pdev,
489 le16_to_cpu(desc->len), PCI_DMA_TODEVICE); 454 le32_to_cpu(desc->host_addr),
455 le16_to_cpu(desc->len),
456 PCI_DMA_TODEVICE);
490 457
491 kfree(priv->tx_buf[i]); 458 kfree(priv->tx_buf_data[i]);
492 priv->tx_buf[i] = NULL; 459 priv->tx_buf_data[i] = NULL;
493 } 460 }
494 461
495 memset(ring_control, 0, sizeof(ring_control)); 462 for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
463 desc = &ring_control->tx_mgmt[i];
464 if (desc->host_addr)
465 pci_unmap_single(priv->pdev,
466 le32_to_cpu(desc->host_addr),
467 le16_to_cpu(desc->len),
468 PCI_DMA_TODEVICE);
469
470 kfree(priv->tx_buf_mgmt[i]);
471 priv->tx_buf_mgmt[i] = NULL;
472 }
473
474 memset(ring_control, 0, sizeof(*ring_control));
496} 475}
497 476
498static int __devinit p54p_probe(struct pci_dev *pdev, 477static int __devinit p54p_probe(struct pci_dev *pdev,
@@ -506,7 +485,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
506 485
507 err = pci_enable_device(pdev); 486 err = pci_enable_device(pdev);
508 if (err) { 487 if (err) {
509 printk(KERN_ERR "%s (prism54pci): Cannot enable new PCI device\n", 488 printk(KERN_ERR "%s (p54pci): Cannot enable new PCI device\n",
510 pci_name(pdev)); 489 pci_name(pdev));
511 return err; 490 return err;
512 } 491 }
@@ -514,22 +493,22 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
514 mem_addr = pci_resource_start(pdev, 0); 493 mem_addr = pci_resource_start(pdev, 0);
515 mem_len = pci_resource_len(pdev, 0); 494 mem_len = pci_resource_len(pdev, 0);
516 if (mem_len < sizeof(struct p54p_csr)) { 495 if (mem_len < sizeof(struct p54p_csr)) {
517 printk(KERN_ERR "%s (prism54pci): Too short PCI resources\n", 496 printk(KERN_ERR "%s (p54pci): Too short PCI resources\n",
518 pci_name(pdev)); 497 pci_name(pdev));
519 pci_disable_device(pdev); 498 pci_disable_device(pdev);
520 return err; 499 return err;
521 } 500 }
522 501
523 err = pci_request_regions(pdev, "prism54pci"); 502 err = pci_request_regions(pdev, "p54pci");
524 if (err) { 503 if (err) {
525 printk(KERN_ERR "%s (prism54pci): Cannot obtain PCI resources\n", 504 printk(KERN_ERR "%s (p54pci): Cannot obtain PCI resources\n",
526 pci_name(pdev)); 505 pci_name(pdev));
527 return err; 506 return err;
528 } 507 }
529 508
530 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || 509 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
531 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { 510 pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
532 printk(KERN_ERR "%s (prism54pci): No suitable DMA available\n", 511 printk(KERN_ERR "%s (p54pci): No suitable DMA available\n",
533 pci_name(pdev)); 512 pci_name(pdev));
534 goto err_free_reg; 513 goto err_free_reg;
535 } 514 }
@@ -542,7 +521,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
542 521
543 dev = p54_init_common(sizeof(*priv)); 522 dev = p54_init_common(sizeof(*priv));
544 if (!dev) { 523 if (!dev) {
545 printk(KERN_ERR "%s (prism54pci): ieee80211 alloc failed\n", 524 printk(KERN_ERR "%s (p54pci): ieee80211 alloc failed\n",
546 pci_name(pdev)); 525 pci_name(pdev));
547 err = -ENOMEM; 526 err = -ENOMEM;
548 goto err_free_reg; 527 goto err_free_reg;
@@ -556,7 +535,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
556 535
557 priv->map = ioremap(mem_addr, mem_len); 536 priv->map = ioremap(mem_addr, mem_len);
558 if (!priv->map) { 537 if (!priv->map) {
559 printk(KERN_ERR "%s (prism54pci): Cannot map device memory\n", 538 printk(KERN_ERR "%s (p54pci): Cannot map device memory\n",
560 pci_name(pdev)); 539 pci_name(pdev));
561 err = -EINVAL; // TODO: use a better error code? 540 err = -EINVAL; // TODO: use a better error code?
562 goto err_free_dev; 541 goto err_free_dev;
@@ -565,39 +544,31 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
565 priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), 544 priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
566 &priv->ring_control_dma); 545 &priv->ring_control_dma);
567 if (!priv->ring_control) { 546 if (!priv->ring_control) {
568 printk(KERN_ERR "%s (prism54pci): Cannot allocate rings\n", 547 printk(KERN_ERR "%s (p54pci): Cannot allocate rings\n",
569 pci_name(pdev)); 548 pci_name(pdev));
570 err = -ENOMEM; 549 err = -ENOMEM;
571 goto err_iounmap; 550 goto err_iounmap;
572 } 551 }
573 memset(priv->ring_control, 0, sizeof(*priv->ring_control));
574
575 err = p54p_upload_firmware(dev);
576 if (err)
577 goto err_free_desc;
578
579 err = p54p_read_eeprom(dev);
580 if (err)
581 goto err_free_desc;
582
583 priv->common.open = p54p_open; 552 priv->common.open = p54p_open;
584 priv->common.stop = p54p_stop; 553 priv->common.stop = p54p_stop;
585 priv->common.tx = p54p_tx; 554 priv->common.tx = p54p_tx;
586 555
587 spin_lock_init(&priv->lock); 556 spin_lock_init(&priv->lock);
557 tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
558
559 p54p_open(dev);
560 err = p54_read_eeprom(dev);
561 p54p_stop(dev);
562 if (err)
563 goto err_free_desc;
588 564
589 err = ieee80211_register_hw(dev); 565 err = ieee80211_register_hw(dev);
590 if (err) { 566 if (err) {
591 printk(KERN_ERR "%s (prism54pci): Cannot register netdevice\n", 567 printk(KERN_ERR "%s (p54pci): Cannot register netdevice\n",
592 pci_name(pdev)); 568 pci_name(pdev));
593 goto err_free_common; 569 goto err_free_common;
594 } 570 }
595 571
596 printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n",
597 wiphy_name(dev->wiphy),
598 print_mac(mac, dev->wiphy->perm_addr),
599 priv->common.version);
600
601 return 0; 572 return 0;
602 573
603 err_free_common: 574 err_free_common:
@@ -645,7 +616,7 @@ static int p54p_suspend(struct pci_dev *pdev, pm_message_t state)
645 struct ieee80211_hw *dev = pci_get_drvdata(pdev); 616 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
646 struct p54p_priv *priv = dev->priv; 617 struct p54p_priv *priv = dev->priv;
647 618
648 if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { 619 if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
649 ieee80211_stop_queues(dev); 620 ieee80211_stop_queues(dev);
650 p54p_stop(dev); 621 p54p_stop(dev);
651 } 622 }
@@ -663,7 +634,7 @@ static int p54p_resume(struct pci_dev *pdev)
663 pci_set_power_state(pdev, PCI_D0); 634 pci_set_power_state(pdev, PCI_D0);
664 pci_restore_state(pdev); 635 pci_restore_state(pdev);
665 636
666 if (priv->common.mode != IEEE80211_IF_TYPE_INVALID) { 637 if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) {
667 p54p_open(dev); 638 p54p_open(dev);
668 ieee80211_wake_queues(dev); 639 ieee80211_wake_queues(dev);
669 } 640 }
@@ -673,7 +644,7 @@ static int p54p_resume(struct pci_dev *pdev)
673#endif /* CONFIG_PM */ 644#endif /* CONFIG_PM */
674 645
675static struct pci_driver p54p_driver = { 646static struct pci_driver p54p_driver = {
676 .name = "prism54pci", 647 .name = "p54pci",
677 .id_table = p54p_table, 648 .id_table = p54p_table,
678 .probe = p54p_probe, 649 .probe = p54p_probe,
679 .remove = __devexit_p(p54p_remove), 650 .remove = __devexit_p(p54p_remove),
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 5bedd7af385d..4a6778070afc 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -1,5 +1,5 @@
1#ifndef PRISM54PCI_H 1#ifndef P54PCI_H
2#define PRISM54PCI_H 2#define P54PCI_H
3 3
4/* 4/*
5 * Defines for PCI based mac80211 Prism54 driver 5 * Defines for PCI based mac80211 Prism54 driver
@@ -68,7 +68,7 @@ struct p54p_csr {
68} __attribute__ ((packed)); 68} __attribute__ ((packed));
69 69
70/* usb backend only needs the register defines above */ 70/* usb backend only needs the register defines above */
71#ifndef PRISM54USB_H 71#ifndef P54USB_H
72struct p54p_desc { 72struct p54p_desc {
73 __le32 host_addr; 73 __le32 host_addr;
74 __le32 device_addr; 74 __le32 device_addr;
@@ -92,15 +92,19 @@ struct p54p_priv {
92 struct p54_common common; 92 struct p54_common common;
93 struct pci_dev *pdev; 93 struct pci_dev *pdev;
94 struct p54p_csr __iomem *map; 94 struct p54p_csr __iomem *map;
95 struct tasklet_struct rx_tasklet;
95 96
96 spinlock_t lock; 97 spinlock_t lock;
97 struct p54p_ring_control *ring_control; 98 struct p54p_ring_control *ring_control;
98 dma_addr_t ring_control_dma; 99 dma_addr_t ring_control_dma;
99 u32 rx_idx, tx_idx; 100 u32 rx_idx_data, tx_idx_data;
100 struct sk_buff *rx_buf[8]; 101 u32 rx_idx_mgmt, tx_idx_mgmt;
101 void *tx_buf[32]; 102 struct sk_buff *rx_buf_data[8];
103 struct sk_buff *rx_buf_mgmt[4];
104 void *tx_buf_data[32];
105 void *tx_buf_mgmt[4];
102 struct completion boot_comp; 106 struct completion boot_comp;
103}; 107};
104 108
105#endif /* PRISM54USB_H */ 109#endif /* P54USB_H */
106#endif /* PRISM54PCI_H */ 110#endif /* P54PCI_H */
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index cbaca23a9453..1912f5e9a0a9 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -91,11 +91,16 @@ static void p54u_rx_cb(struct urb *urb)
91 91
92 skb_unlink(skb, &priv->rx_queue); 92 skb_unlink(skb, &priv->rx_queue);
93 skb_put(skb, urb->actual_length); 93 skb_put(skb, urb->actual_length);
94 if (!priv->hw_type) 94
95 skb_pull(skb, sizeof(struct net2280_tx_hdr)); 95 if (priv->hw_type == P54U_NET2280)
96 skb_pull(skb, priv->common.tx_hdr_len);
97 if (priv->common.fw_interface == FW_LM87) {
98 skb_pull(skb, 4);
99 skb_put(skb, 4);
100 }
96 101
97 if (p54_rx(dev, skb)) { 102 if (p54_rx(dev, skb)) {
98 skb = dev_alloc_skb(MAX_RX_SIZE); 103 skb = dev_alloc_skb(priv->common.rx_mtu + 32);
99 if (unlikely(!skb)) { 104 if (unlikely(!skb)) {
100 usb_free_urb(urb); 105 usb_free_urb(urb);
101 /* TODO check rx queue length and refill *somewhere* */ 106 /* TODO check rx queue length and refill *somewhere* */
@@ -109,9 +114,12 @@ static void p54u_rx_cb(struct urb *urb)
109 urb->context = skb; 114 urb->context = skb;
110 skb_queue_tail(&priv->rx_queue, skb); 115 skb_queue_tail(&priv->rx_queue, skb);
111 } else { 116 } else {
112 if (!priv->hw_type) 117 if (priv->hw_type == P54U_NET2280)
113 skb_push(skb, sizeof(struct net2280_tx_hdr)); 118 skb_push(skb, priv->common.tx_hdr_len);
114 119 if (priv->common.fw_interface == FW_LM87) {
120 skb_push(skb, 4);
121 skb_put(skb, 4);
122 }
115 skb_reset_tail_pointer(skb); 123 skb_reset_tail_pointer(skb);
116 skb_trim(skb, 0); 124 skb_trim(skb, 0);
117 if (urb->transfer_buffer != skb_tail_pointer(skb)) { 125 if (urb->transfer_buffer != skb_tail_pointer(skb)) {
@@ -145,7 +153,7 @@ static int p54u_init_urbs(struct ieee80211_hw *dev)
145 struct p54u_rx_info *info; 153 struct p54u_rx_info *info;
146 154
147 while (skb_queue_len(&priv->rx_queue) < 32) { 155 while (skb_queue_len(&priv->rx_queue) < 32) {
148 skb = __dev_alloc_skb(MAX_RX_SIZE, GFP_KERNEL); 156 skb = __dev_alloc_skb(priv->common.rx_mtu + 32, GFP_KERNEL);
149 if (!skb) 157 if (!skb)
150 break; 158 break;
151 entry = usb_alloc_urb(0, GFP_KERNEL); 159 entry = usb_alloc_urb(0, GFP_KERNEL);
@@ -153,7 +161,10 @@ static int p54u_init_urbs(struct ieee80211_hw *dev)
153 kfree_skb(skb); 161 kfree_skb(skb);
154 break; 162 break;
155 } 163 }
156 usb_fill_bulk_urb(entry, priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), skb_tail_pointer(skb), MAX_RX_SIZE, p54u_rx_cb, skb); 164 usb_fill_bulk_urb(entry, priv->udev,
165 usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA),
166 skb_tail_pointer(skb),
167 priv->common.rx_mtu + 32, p54u_rx_cb, skb);
157 info = (struct p54u_rx_info *) skb->cb; 168 info = (struct p54u_rx_info *) skb->cb;
158 info->urb = entry; 169 info->urb = entry;
159 info->dev = dev; 170 info->dev = dev;
@@ -207,6 +218,42 @@ static void p54u_tx_3887(struct ieee80211_hw *dev, struct p54_control_hdr *data,
207 usb_submit_urb(data_urb, GFP_ATOMIC); 218 usb_submit_urb(data_urb, GFP_ATOMIC);
208} 219}
209 220
221static __le32 p54u_lm87_chksum(const u32 *data, size_t length)
222{
223 u32 chk = 0;
224
225 length >>= 2;
226 while (length--) {
227 chk ^= *data++;
228 chk = (chk >> 5) ^ (chk << 3);
229 }
230
231 return cpu_to_le32(chk);
232}
233
234static void p54u_tx_lm87(struct ieee80211_hw *dev,
235 struct p54_control_hdr *data,
236 size_t len, int free_on_tx)
237{
238 struct p54u_priv *priv = dev->priv;
239 struct urb *data_urb;
240 struct lm87_tx_hdr *hdr = (void *)data - sizeof(*hdr);
241
242 data_urb = usb_alloc_urb(0, GFP_ATOMIC);
243 if (!data_urb)
244 return;
245
246 hdr->chksum = p54u_lm87_chksum((u32 *)data, len);
247 hdr->device_addr = data->req_id;
248
249 usb_fill_bulk_urb(data_urb, priv->udev,
250 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr,
251 len + sizeof(*hdr), free_on_tx ? p54u_tx_free_cb : p54u_tx_cb,
252 dev);
253
254 usb_submit_urb(data_urb, GFP_ATOMIC);
255}
256
210static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *data, 257static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *data,
211 size_t len, int free_on_tx) 258 size_t len, int free_on_tx)
212{ 259{
@@ -312,73 +359,6 @@ static int p54u_bulk_msg(struct p54u_priv *priv, unsigned int ep,
312 data, len, &alen, 2000); 359 data, len, &alen, 2000);
313} 360}
314 361
315static int p54u_read_eeprom(struct ieee80211_hw *dev)
316{
317 struct p54u_priv *priv = dev->priv;
318 void *buf;
319 struct p54_control_hdr *hdr;
320 int err, alen;
321 size_t offset = priv->hw_type ? 0x10 : 0x20;
322
323 buf = kmalloc(0x2020, GFP_KERNEL);
324 if (!buf) {
325 printk(KERN_ERR "prism54usb: cannot allocate memory for "
326 "eeprom readback!\n");
327 return -ENOMEM;
328 }
329
330 if (priv->hw_type) {
331 *((u32 *) buf) = priv->common.rx_start;
332 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32));
333 if (err) {
334 printk(KERN_ERR "prism54usb: addr send failed\n");
335 goto fail;
336 }
337 } else {
338 struct net2280_reg_write *reg = buf;
339 reg->port = cpu_to_le16(NET2280_DEV_U32);
340 reg->addr = cpu_to_le32(P54U_DEV_BASE);
341 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA);
342 err = p54u_bulk_msg(priv, P54U_PIPE_DEV, buf, sizeof(*reg));
343 if (err) {
344 printk(KERN_ERR "prism54usb: dev_int send failed\n");
345 goto fail;
346 }
347 }
348
349 hdr = buf + priv->common.tx_hdr_len;
350 p54_fill_eeprom_readback(hdr);
351 hdr->req_id = cpu_to_le32(priv->common.rx_start);
352 if (priv->common.tx_hdr_len) {
353 struct net2280_tx_hdr *tx_hdr = buf;
354 tx_hdr->device_addr = hdr->req_id;
355 tx_hdr->len = cpu_to_le16(EEPROM_READBACK_LEN);
356 }
357
358 /* we can just pretend to send 0x2000 bytes of nothing in the headers */
359 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf,
360 EEPROM_READBACK_LEN + priv->common.tx_hdr_len);
361 if (err) {
362 printk(KERN_ERR "prism54usb: eeprom req send failed\n");
363 goto fail;
364 }
365
366 err = usb_bulk_msg(priv->udev,
367 usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA),
368 buf, 0x2020, &alen, 1000);
369 if (!err && alen > offset) {
370 p54_parse_eeprom(dev, (u8 *)buf + offset, alen - offset);
371 } else {
372 printk(KERN_ERR "prism54usb: eeprom read failed!\n");
373 err = -EINVAL;
374 goto fail;
375 }
376
377 fail:
378 kfree(buf);
379 return err;
380}
381
382static int p54u_upload_firmware_3887(struct ieee80211_hw *dev) 362static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
383{ 363{
384 static char start_string[] = "~~~~<\r"; 364 static char start_string[] = "~~~~<\r";
@@ -412,7 +392,9 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
412 goto err_req_fw_failed; 392 goto err_req_fw_failed;
413 } 393 }
414 394
415 p54_parse_firmware(dev, fw_entry); 395 err = p54_parse_firmware(dev, fw_entry);
396 if (err)
397 goto err_upload_failed;
416 398
417 left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size); 399 left = block_size = min((size_t)P54U_FW_BLOCK, fw_entry->size);
418 strcpy(buf, start_string); 400 strcpy(buf, start_string);
@@ -458,7 +440,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
458 440
459 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size); 441 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size);
460 if (err) { 442 if (err) {
461 printk(KERN_ERR "prism54usb: firmware upload failed!\n"); 443 printk(KERN_ERR "p54usb: firmware upload failed!\n");
462 goto err_upload_failed; 444 goto err_upload_failed;
463 } 445 }
464 446
@@ -469,7 +451,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
469 *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size)); 451 *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size));
470 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32)); 452 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32));
471 if (err) { 453 if (err) {
472 printk(KERN_ERR "prism54usb: firmware upload failed!\n"); 454 printk(KERN_ERR "p54usb: firmware upload failed!\n");
473 goto err_upload_failed; 455 goto err_upload_failed;
474 } 456 }
475 457
@@ -480,13 +462,13 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
480 break; 462 break;
481 463
482 if (alen > 5 && !memcmp(buf, "ERROR", 5)) { 464 if (alen > 5 && !memcmp(buf, "ERROR", 5)) {
483 printk(KERN_INFO "prism54usb: firmware upload failed!\n"); 465 printk(KERN_INFO "p54usb: firmware upload failed!\n");
484 err = -EINVAL; 466 err = -EINVAL;
485 break; 467 break;
486 } 468 }
487 469
488 if (time_after(jiffies, timeout)) { 470 if (time_after(jiffies, timeout)) {
489 printk(KERN_ERR "prism54usb: firmware boot timed out!\n"); 471 printk(KERN_ERR "p54usb: firmware boot timed out!\n");
490 err = -ETIMEDOUT; 472 err = -ETIMEDOUT;
491 break; 473 break;
492 } 474 }
@@ -498,7 +480,7 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
498 buf[1] = '\r'; 480 buf[1] = '\r';
499 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2); 481 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2);
500 if (err) { 482 if (err) {
501 printk(KERN_ERR "prism54usb: firmware boot failed!\n"); 483 printk(KERN_ERR "p54usb: firmware boot failed!\n");
502 goto err_upload_failed; 484 goto err_upload_failed;
503 } 485 }
504 486
@@ -549,7 +531,12 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
549 return err; 531 return err;
550 } 532 }
551 533
552 p54_parse_firmware(dev, fw_entry); 534 err = p54_parse_firmware(dev, fw_entry);
535 if (err) {
536 kfree(buf);
537 release_firmware(fw_entry);
538 return err;
539 }
553 540
554#define P54U_WRITE(type, addr, data) \ 541#define P54U_WRITE(type, addr, data) \
555 do {\ 542 do {\
@@ -660,7 +647,7 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
660 647
661 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len); 648 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len);
662 if (err) { 649 if (err) {
663 printk(KERN_ERR "prism54usb: firmware block upload " 650 printk(KERN_ERR "p54usb: firmware block upload "
664 "failed\n"); 651 "failed\n");
665 goto fail; 652 goto fail;
666 } 653 }
@@ -694,7 +681,7 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
694 0x002C | (unsigned long)&devreg->direct_mem_win); 681 0x002C | (unsigned long)&devreg->direct_mem_win);
695 if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) || 682 if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) ||
696 !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) { 683 !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) {
697 printk(KERN_ERR "prism54usb: firmware DMA transfer " 684 printk(KERN_ERR "p54usb: firmware DMA transfer "
698 "failed\n"); 685 "failed\n");
699 goto fail; 686 goto fail;
700 } 687 }
@@ -802,7 +789,7 @@ static int __devinit p54u_probe(struct usb_interface *intf,
802 789
803 dev = p54_init_common(sizeof(*priv)); 790 dev = p54_init_common(sizeof(*priv));
804 if (!dev) { 791 if (!dev) {
805 printk(KERN_ERR "prism54usb: ieee80211 alloc failed\n"); 792 printk(KERN_ERR "p54usb: ieee80211 alloc failed\n");
806 return -ENOMEM; 793 return -ENOMEM;
807 } 794 }
808 795
@@ -833,49 +820,40 @@ static int __devinit p54u_probe(struct usb_interface *intf,
833 } 820 }
834 } 821 }
835 priv->common.open = p54u_open; 822 priv->common.open = p54u_open;
836 823 priv->common.stop = p54u_stop;
837 if (recognized_pipes < P54U_PIPE_NUMBER) { 824 if (recognized_pipes < P54U_PIPE_NUMBER) {
838 priv->hw_type = P54U_3887; 825 priv->hw_type = P54U_3887;
839 priv->common.tx = p54u_tx_3887; 826 err = p54u_upload_firmware_3887(dev);
827 if (priv->common.fw_interface == FW_LM87) {
828 dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);
829 priv->common.tx_hdr_len = sizeof(struct lm87_tx_hdr);
830 priv->common.tx = p54u_tx_lm87;
831 } else
832 priv->common.tx = p54u_tx_3887;
840 } else { 833 } else {
834 priv->hw_type = P54U_NET2280;
841 dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr); 835 dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr);
842 priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr); 836 priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr);
843 priv->common.tx = p54u_tx_net2280; 837 priv->common.tx = p54u_tx_net2280;
844 }
845 priv->common.stop = p54u_stop;
846
847 if (priv->hw_type)
848 err = p54u_upload_firmware_3887(dev);
849 else
850 err = p54u_upload_firmware_net2280(dev); 838 err = p54u_upload_firmware_net2280(dev);
839 }
851 if (err) 840 if (err)
852 goto err_free_dev; 841 goto err_free_dev;
853 842
854 err = p54u_read_eeprom(dev); 843 skb_queue_head_init(&priv->rx_queue);
844
845 p54u_open(dev);
846 err = p54_read_eeprom(dev);
847 p54u_stop(dev);
855 if (err) 848 if (err)
856 goto err_free_dev; 849 goto err_free_dev;
857 850
858 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
859 u8 perm_addr[ETH_ALEN];
860
861 printk(KERN_WARNING "prism54usb: Invalid hwaddr! Using randomly generated MAC addr\n");
862 random_ether_addr(perm_addr);
863 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
864 }
865
866 skb_queue_head_init(&priv->rx_queue);
867
868 err = ieee80211_register_hw(dev); 851 err = ieee80211_register_hw(dev);
869 if (err) { 852 if (err) {
870 printk(KERN_ERR "prism54usb: Cannot register netdevice\n"); 853 printk(KERN_ERR "p54usb: Cannot register netdevice\n");
871 goto err_free_dev; 854 goto err_free_dev;
872 } 855 }
873 856
874 printk(KERN_INFO "%s: hwaddr %s, isl38%02x\n",
875 wiphy_name(dev->wiphy),
876 print_mac(mac, dev->wiphy->perm_addr),
877 priv->common.version);
878
879 return 0; 857 return 0;
880 858
881 err_free_dev: 859 err_free_dev:
@@ -902,7 +880,7 @@ static void __devexit p54u_disconnect(struct usb_interface *intf)
902} 880}
903 881
904static struct usb_driver p54u_driver = { 882static struct usb_driver p54u_driver = {
905 .name = "prism54usb", 883 .name = "p54usb",
906 .id_table = p54u_table, 884 .id_table = p54u_table,
907 .probe = p54u_probe, 885 .probe = p54u_probe,
908 .disconnect = p54u_disconnect, 886 .disconnect = p54u_disconnect,
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index d1896b396c1c..5b8fe91379c3 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -1,5 +1,5 @@
1#ifndef PRISM54USB_H 1#ifndef P54USB_H
2#define PRISM54USB_H 2#define P54USB_H
3 3
4/* 4/*
5 * Defines for USB based mac80211 Prism54 driver 5 * Defines for USB based mac80211 Prism54 driver
@@ -72,6 +72,11 @@ struct net2280_tx_hdr {
72 u8 padding[8]; 72 u8 padding[8];
73} __attribute__((packed)); 73} __attribute__((packed));
74 74
75struct lm87_tx_hdr {
76 __le32 device_addr;
77 __le32 chksum;
78} __attribute__((packed));
79
75/* Some flags for the isl hardware registers controlling DMA inside the 80/* Some flags for the isl hardware registers controlling DMA inside the
76 * chip */ 81 * chip */
77#define ISL38XX_DMA_STATUS_DONE 0x00000001 82#define ISL38XX_DMA_STATUS_DONE 0x00000001
@@ -130,4 +135,4 @@ struct p54u_priv {
130 struct sk_buff_head rx_queue; 135 struct sk_buff_head rx_queue;
131}; 136};
132 137
133#endif /* PRISM54USB_H */ 138#endif /* P54USB_H */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 3d75a7137d3c..16e68f4b654a 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -71,7 +71,7 @@ prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
71 if (iw_mode == IW_MODE_REPEAT || iw_mode == IW_MODE_SECOND) { 71 if (iw_mode == IW_MODE_REPEAT || iw_mode == IW_MODE_SECOND) {
72 printk(KERN_DEBUG 72 printk(KERN_DEBUG
73 "%s(): Sorry, Repeater mode and Secondary mode " 73 "%s(): Sorry, Repeater mode and Secondary mode "
74 "are not yet supported by this driver.\n", __FUNCTION__); 74 "are not yet supported by this driver.\n", __func__);
75 return -EINVAL; 75 return -EINVAL;
76 } 76 }
77 77
@@ -333,7 +333,7 @@ prism54_set_mode(struct net_device *ndev, struct iw_request_info *info,
333 if (*uwrq > IW_MODE_MONITOR || *uwrq < IW_MODE_AUTO) { 333 if (*uwrq > IW_MODE_MONITOR || *uwrq < IW_MODE_AUTO) {
334 printk(KERN_DEBUG 334 printk(KERN_DEBUG
335 "%s: %s() You passed a non-valid init_mode.\n", 335 "%s: %s() You passed a non-valid init_mode.\n",
336 priv->ndev->name, __FUNCTION__); 336 priv->ndev->name, __func__);
337 return -EINVAL; 337 return -EINVAL;
338 } 338 }
339 339
@@ -1234,7 +1234,7 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
1234 /* don't know how to disable radio */ 1234 /* don't know how to disable radio */
1235 printk(KERN_DEBUG 1235 printk(KERN_DEBUG
1236 "%s: %s() disabling radio is not yet supported.\n", 1236 "%s: %s() disabling radio is not yet supported.\n",
1237 priv->ndev->name, __FUNCTION__); 1237 priv->ndev->name, __func__);
1238 return -ENOTSUPP; 1238 return -ENOTSUPP;
1239 } else if (vwrq->fixed) 1239 } else if (vwrq->fixed)
1240 /* currently only fixed value is supported */ 1240 /* currently only fixed value is supported */
@@ -1242,7 +1242,7 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
1242 else { 1242 else {
1243 printk(KERN_DEBUG 1243 printk(KERN_DEBUG
1244 "%s: %s() auto power will be implemented later.\n", 1244 "%s: %s() auto power will be implemented later.\n",
1245 priv->ndev->name, __FUNCTION__); 1245 priv->ndev->name, __func__);
1246 return -ENOTSUPP; 1246 return -ENOTSUPP;
1247 } 1247 }
1248} 1248}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 963960dc30f2..44da0d19b5c8 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -325,7 +325,7 @@ static int ray_probe(struct pcmcia_device *p_dev)
325 p_dev->io.IOAddrLines = 5; 325 p_dev->io.IOAddrLines = 5;
326 326
327 /* Interrupt setup. For PCMCIA, driver takes what's given */ 327 /* Interrupt setup. For PCMCIA, driver takes what's given */
328 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 328 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
329 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; 329 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
330 p_dev->irq.Handler = &ray_interrupt; 330 p_dev->irq.Handler = &ray_interrupt;
331 331
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 00e965b9da75..2b414899dfa0 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1627,7 +1627,6 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
1627static int rndis_iw_set_scan(struct net_device *dev, 1627static int rndis_iw_set_scan(struct net_device *dev,
1628 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1628 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1629{ 1629{
1630 struct iw_param *param = &wrqu->param;
1631 struct usbnet *usbdev = dev->priv; 1630 struct usbnet *usbdev = dev->priv;
1632 union iwreq_data evt; 1631 union iwreq_data evt;
1633 int ret = -EINVAL; 1632 int ret = -EINVAL;
@@ -1635,7 +1634,7 @@ static int rndis_iw_set_scan(struct net_device *dev,
1635 1634
1636 devdbg(usbdev, "SIOCSIWSCAN"); 1635 devdbg(usbdev, "SIOCSIWSCAN");
1637 1636
1638 if (param->flags == 0) { 1637 if (wrqu->data.flags == 0) {
1639 tmp = ccpu2(1); 1638 tmp = ccpu2(1);
1640 ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp, 1639 ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
1641 sizeof(tmp)); 1640 sizeof(tmp));
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index d485a86bba75..f839ce044afd 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,5 +1,5 @@
1config RT2X00 1menuconfig RT2X00
2 tristate "Ralink driver support" 2 bool "Ralink driver support"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
4 ---help--- 4 ---help---
5 This will enable the experimental support for the Ralink drivers, 5 This will enable the experimental support for the Ralink drivers,
@@ -17,31 +17,6 @@ config RT2X00
17 17
18if RT2X00 18if RT2X00
19 19
20config RT2X00_LIB
21 tristate
22
23config RT2X00_LIB_PCI
24 tristate
25 select RT2X00_LIB
26
27config RT2X00_LIB_USB
28 tristate
29 select RT2X00_LIB
30
31config RT2X00_LIB_FIRMWARE
32 boolean
33 depends on RT2X00_LIB
34 select FW_LOADER
35
36config RT2X00_LIB_RFKILL
37 boolean
38 depends on RT2X00_LIB
39 select RFKILL
40
41config RT2X00_LIB_LEDS
42 boolean
43 depends on RT2X00_LIB && NEW_LEDS
44
45config RT2400PCI 20config RT2400PCI
46 tristate "Ralink rt2400 (PCI/PCMCIA) support" 21 tristate "Ralink rt2400 (PCI/PCMCIA) support"
47 depends on PCI 22 depends on PCI
@@ -53,23 +28,6 @@ config RT2400PCI
53 28
54 When compiled as a module, this driver will be called "rt2400pci.ko". 29 When compiled as a module, this driver will be called "rt2400pci.ko".
55 30
56config RT2400PCI_RFKILL
57 bool "Ralink rt2400 rfkill support"
58 depends on RT2400PCI
59 select RT2X00_LIB_RFKILL
60 ---help---
61 This adds support for integrated rt2400 hardware that features a
62 hardware button to control the radio state.
63 This feature depends on the RF switch subsystem rfkill.
64
65config RT2400PCI_LEDS
66 bool "Ralink rt2400 leds support"
67 depends on RT2400PCI && NEW_LEDS
68 select LEDS_CLASS
69 select RT2X00_LIB_LEDS
70 ---help---
71 This adds support for led triggers provided my mac80211.
72
73config RT2500PCI 31config RT2500PCI
74 tristate "Ralink rt2500 (PCI/PCMCIA) support" 32 tristate "Ralink rt2500 (PCI/PCMCIA) support"
75 depends on PCI 33 depends on PCI
@@ -81,28 +39,12 @@ config RT2500PCI
81 39
82 When compiled as a module, this driver will be called "rt2500pci.ko". 40 When compiled as a module, this driver will be called "rt2500pci.ko".
83 41
84config RT2500PCI_RFKILL
85 bool "Ralink rt2500 rfkill support"
86 depends on RT2500PCI
87 select RT2X00_LIB_RFKILL
88 ---help---
89 This adds support for integrated rt2500 hardware that features a
90 hardware button to control the radio state.
91 This feature depends on the RF switch subsystem rfkill.
92
93config RT2500PCI_LEDS
94 bool "Ralink rt2500 leds support"
95 depends on RT2500PCI && NEW_LEDS
96 select LEDS_CLASS
97 select RT2X00_LIB_LEDS
98 ---help---
99 This adds support for led triggers provided my mac80211.
100
101config RT61PCI 42config RT61PCI
102 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" 43 tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
103 depends on PCI 44 depends on PCI
104 select RT2X00_LIB_PCI 45 select RT2X00_LIB_PCI
105 select RT2X00_LIB_FIRMWARE 46 select RT2X00_LIB_FIRMWARE
47 select RT2X00_LIB_CRYPTO
106 select CRC_ITU_T 48 select CRC_ITU_T
107 select EEPROM_93CX6 49 select EEPROM_93CX6
108 ---help--- 50 ---help---
@@ -111,23 +53,6 @@ config RT61PCI
111 53
112 When compiled as a module, this driver will be called "rt61pci.ko". 54 When compiled as a module, this driver will be called "rt61pci.ko".
113 55
114config RT61PCI_RFKILL
115 bool "Ralink rt2501/rt61 rfkill support"
116 depends on RT61PCI
117 select RT2X00_LIB_RFKILL
118 ---help---
119 This adds support for integrated rt61 hardware that features a
120 hardware button to control the radio state.
121 This feature depends on the RF switch subsystem rfkill.
122
123config RT61PCI_LEDS
124 bool "Ralink rt2501/rt61 leds support"
125 depends on RT61PCI && NEW_LEDS
126 select LEDS_CLASS
127 select RT2X00_LIB_LEDS
128 ---help---
129 This adds support for led triggers provided my mac80211.
130
131config RT2500USB 56config RT2500USB
132 tristate "Ralink rt2500 (USB) support" 57 tristate "Ralink rt2500 (USB) support"
133 depends on USB 58 depends on USB
@@ -138,19 +63,12 @@ config RT2500USB
138 63
139 When compiled as a module, this driver will be called "rt2500usb.ko". 64 When compiled as a module, this driver will be called "rt2500usb.ko".
140 65
141config RT2500USB_LEDS
142 bool "Ralink rt2500 leds support"
143 depends on RT2500USB && NEW_LEDS
144 select LEDS_CLASS
145 select RT2X00_LIB_LEDS
146 ---help---
147 This adds support for led triggers provided my mac80211.
148
149config RT73USB 66config RT73USB
150 tristate "Ralink rt2501/rt73 (USB) support" 67 tristate "Ralink rt2501/rt73 (USB) support"
151 depends on USB 68 depends on USB
152 select RT2X00_LIB_USB 69 select RT2X00_LIB_USB
153 select RT2X00_LIB_FIRMWARE 70 select RT2X00_LIB_FIRMWARE
71 select RT2X00_LIB_CRYPTO
154 select CRC_ITU_T 72 select CRC_ITU_T
155 ---help--- 73 ---help---
156 This adds support for rt2501 wireless chipset family. 74 This adds support for rt2501 wireless chipset family.
@@ -158,13 +76,37 @@ config RT73USB
158 76
159 When compiled as a module, this driver will be called "rt73usb.ko". 77 When compiled as a module, this driver will be called "rt73usb.ko".
160 78
161config RT73USB_LEDS 79config RT2X00_LIB_PCI
162 bool "Ralink rt2501/rt73 leds support" 80 tristate
163 depends on RT73USB && NEW_LEDS 81 select RT2X00_LIB
164 select LEDS_CLASS 82
165 select RT2X00_LIB_LEDS 83config RT2X00_LIB_USB
166 ---help--- 84 tristate
167 This adds support for led triggers provided my mac80211. 85 select RT2X00_LIB
86
87config RT2X00_LIB
88 tristate
89
90config RT2X00_LIB_FIRMWARE
91 boolean
92 select FW_LOADER
93
94config RT2X00_LIB_CRYPTO
95 boolean
96
97config RT2X00_LIB_RFKILL
98 boolean
99 default y if (RT2X00_LIB=y && RFKILL=y) || (RT2X00_LIB=m && RFKILL!=n)
100
101comment "rt2x00 rfkill support disabled due to modularized RFKILL and built-in rt2x00"
102 depends on RT2X00_LIB=y && RFKILL=m
103
104config RT2X00_LIB_LEDS
105 boolean
106 default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
107
108comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
109 depends on RT2X00_LIB=y && LEDS_CLASS=m
168 110
169config RT2X00_LIB_DEBUGFS 111config RT2X00_LIB_DEBUGFS
170 bool "Ralink debugfs support" 112 bool "Ralink debugfs support"
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 1087dbcf1a04..917cb4f3b038 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -3,6 +3,7 @@ rt2x00lib-y += rt2x00mac.o
3rt2x00lib-y += rt2x00config.o 3rt2x00lib-y += rt2x00config.o
4rt2x00lib-y += rt2x00queue.o 4rt2x00lib-y += rt2x00queue.o
5rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS) += rt2x00debug.o 5rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS) += rt2x00debug.o
6rt2x00lib-$(CONFIG_RT2X00_LIB_CRYPTO) += rt2x00crypto.o
6rt2x00lib-$(CONFIG_RT2X00_LIB_RFKILL) += rt2x00rfkill.o 7rt2x00lib-$(CONFIG_RT2X00_LIB_RFKILL) += rt2x00rfkill.o
7rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o 8rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
8rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o 9rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 4c0538d6099b..08cb9eec16a6 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -231,7 +231,7 @@ static const struct rt2x00debug rt2400pci_rt2x00debug = {
231}; 231};
232#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 232#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
233 233
234#ifdef CONFIG_RT2400PCI_RFKILL 234#ifdef CONFIG_RT2X00_LIB_RFKILL
235static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) 235static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
236{ 236{
237 u32 reg; 237 u32 reg;
@@ -241,9 +241,9 @@ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
241} 241}
242#else 242#else
243#define rt2400pci_rfkill_poll NULL 243#define rt2400pci_rfkill_poll NULL
244#endif /* CONFIG_RT2400PCI_RFKILL */ 244#endif /* CONFIG_RT2X00_LIB_RFKILL */
245 245
246#ifdef CONFIG_RT2400PCI_LEDS 246#ifdef CONFIG_RT2X00_LIB_LEDS
247static void rt2400pci_brightness_set(struct led_classdev *led_cdev, 247static void rt2400pci_brightness_set(struct led_classdev *led_cdev,
248 enum led_brightness brightness) 248 enum led_brightness brightness)
249{ 249{
@@ -288,7 +288,7 @@ static void rt2400pci_init_led(struct rt2x00_dev *rt2x00dev,
288 led->led_dev.blink_set = rt2400pci_blink_set; 288 led->led_dev.blink_set = rt2400pci_blink_set;
289 led->flags = LED_INITIALIZED; 289 led->flags = LED_INITIALIZED;
290} 290}
291#endif /* CONFIG_RT2400PCI_LEDS */ 291#endif /* CONFIG_RT2X00_LIB_LEDS */
292 292
293/* 293/*
294 * Configuration handlers. 294 * Configuration handlers.
@@ -1241,7 +1241,7 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1241 if (!reg) 1241 if (!reg)
1242 return IRQ_NONE; 1242 return IRQ_NONE;
1243 1243
1244 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 1244 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1245 return IRQ_HANDLED; 1245 return IRQ_HANDLED;
1246 1246
1247 /* 1247 /*
@@ -1374,22 +1374,22 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1374 /* 1374 /*
1375 * Store led mode, for correct led behaviour. 1375 * Store led mode, for correct led behaviour.
1376 */ 1376 */
1377#ifdef CONFIG_RT2400PCI_LEDS 1377#ifdef CONFIG_RT2X00_LIB_LEDS
1378 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1378 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1379 1379
1380 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1380 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1381 if (value == LED_MODE_TXRX_ACTIVITY) 1381 if (value == LED_MODE_TXRX_ACTIVITY)
1382 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_qual, 1382 rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_qual,
1383 LED_TYPE_ACTIVITY); 1383 LED_TYPE_ACTIVITY);
1384#endif /* CONFIG_RT2400PCI_LEDS */ 1384#endif /* CONFIG_RT2X00_LIB_LEDS */
1385 1385
1386 /* 1386 /*
1387 * Detect if this device has an hardware controlled radio. 1387 * Detect if this device has an hardware controlled radio.
1388 */ 1388 */
1389#ifdef CONFIG_RT2400PCI_RFKILL 1389#ifdef CONFIG_RT2X00_LIB_RFKILL
1390 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1390 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1391 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 1391 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
1392#endif /* CONFIG_RT2400PCI_RFKILL */ 1392#endif /* CONFIG_RT2X00_LIB_RFKILL */
1393 1393
1394 /* 1394 /*
1395 * Check if the BBP tuning should be enabled. 1395 * Check if the BBP tuning should be enabled.
@@ -1404,7 +1404,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1404 * RF value list for RF2420 & RF2421 1404 * RF value list for RF2420 & RF2421
1405 * Supports: 2.4 GHz 1405 * Supports: 2.4 GHz
1406 */ 1406 */
1407static const struct rf_channel rf_vals_bg[] = { 1407static const struct rf_channel rf_vals_b[] = {
1408 { 1, 0x00022058, 0x000c1fda, 0x00000101, 0 }, 1408 { 1, 0x00022058, 0x000c1fda, 0x00000101, 0 },
1409 { 2, 0x00022058, 0x000c1fee, 0x00000101, 0 }, 1409 { 2, 0x00022058, 0x000c1fee, 0x00000101, 0 },
1410 { 3, 0x00022058, 0x000c2002, 0x00000101, 0 }, 1410 { 3, 0x00022058, 0x000c2002, 0x00000101, 0 },
@@ -1421,10 +1421,11 @@ static const struct rf_channel rf_vals_bg[] = {
1421 { 14, 0x00022058, 0x000c20fa, 0x00000101, 0 }, 1421 { 14, 0x00022058, 0x000c20fa, 0x00000101, 0 },
1422}; 1422};
1423 1423
1424static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 1424static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1425{ 1425{
1426 struct hw_mode_spec *spec = &rt2x00dev->spec; 1426 struct hw_mode_spec *spec = &rt2x00dev->spec;
1427 u8 *txpower; 1427 struct channel_info *info;
1428 char *tx_power;
1428 unsigned int i; 1429 unsigned int i;
1429 1430
1430 /* 1431 /*
@@ -1440,23 +1441,28 @@ static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1440 EEPROM_MAC_ADDR_0)); 1441 EEPROM_MAC_ADDR_0));
1441 1442
1442 /* 1443 /*
1443 * Convert tx_power array in eeprom.
1444 */
1445 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1446 for (i = 0; i < 14; i++)
1447 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
1448
1449 /*
1450 * Initialize hw_mode information. 1444 * Initialize hw_mode information.
1451 */ 1445 */
1452 spec->supported_bands = SUPPORT_BAND_2GHZ; 1446 spec->supported_bands = SUPPORT_BAND_2GHZ;
1453 spec->supported_rates = SUPPORT_RATE_CCK; 1447 spec->supported_rates = SUPPORT_RATE_CCK;
1454 spec->tx_power_a = NULL;
1455 spec->tx_power_bg = txpower;
1456 spec->tx_power_default = DEFAULT_TXPOWER;
1457 1448
1458 spec->num_channels = ARRAY_SIZE(rf_vals_bg); 1449 spec->num_channels = ARRAY_SIZE(rf_vals_b);
1459 spec->channels = rf_vals_bg; 1450 spec->channels = rf_vals_b;
1451
1452 /*
1453 * Create channel information array
1454 */
1455 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
1456 if (!info)
1457 return -ENOMEM;
1458
1459 spec->channels_info = info;
1460
1461 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1462 for (i = 0; i < 14; i++)
1463 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1464
1465 return 0;
1460} 1466}
1461 1467
1462static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) 1468static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1477,7 +1483,9 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1477 /* 1483 /*
1478 * Initialize hw specifications. 1484 * Initialize hw specifications.
1479 */ 1485 */
1480 rt2400pci_probe_hw_mode(rt2x00dev); 1486 retval = rt2400pci_probe_hw_mode(rt2x00dev);
1487 if (retval)
1488 return retval;
1481 1489
1482 /* 1490 /*
1483 * This device requires the atim queue and DMA-mapped skbs. 1491 * This device requires the atim queue and DMA-mapped skbs.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index bc5564258228..bbff381ce396 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -938,19 +938,13 @@
938#define MAX_TXPOWER 62 938#define MAX_TXPOWER 62
939#define DEFAULT_TXPOWER 39 939#define DEFAULT_TXPOWER 39
940 940
941#define TXPOWER_FROM_DEV(__txpower) \ 941#define __CLAMP_TX(__txpower) \
942({ \ 942 clamp_t(char, (__txpower), MIN_TXPOWER, MAX_TXPOWER)
943 ((__txpower) > MAX_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER : \ 943
944 ((__txpower) < MIN_TXPOWER) ? DEFAULT_TXPOWER - MIN_TXPOWER : \ 944#define TXPOWER_FROM_DEV(__txpower) \
945 (((__txpower) - MAX_TXPOWER) + MIN_TXPOWER); \ 945 ((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER)
946}) 946
947 947#define TXPOWER_TO_DEV(__txpower) \
948#define TXPOWER_TO_DEV(__txpower) \ 948 MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER)
949({ \
950 (__txpower) += MIN_TXPOWER; \
951 ((__txpower) <= MIN_TXPOWER) ? MAX_TXPOWER : \
952 (((__txpower) >= MAX_TXPOWER) ? MIN_TXPOWER : \
953 (MAX_TXPOWER - ((__txpower) - MIN_TXPOWER))); \
954})
955 949
956#endif /* RT2400PCI_H */ 950#endif /* RT2400PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 181a146b4768..ef42cc04a2d7 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -231,7 +231,7 @@ static const struct rt2x00debug rt2500pci_rt2x00debug = {
231}; 231};
232#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 232#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
233 233
234#ifdef CONFIG_RT2500PCI_RFKILL 234#ifdef CONFIG_RT2X00_LIB_RFKILL
235static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) 235static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
236{ 236{
237 u32 reg; 237 u32 reg;
@@ -241,9 +241,9 @@ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
241} 241}
242#else 242#else
243#define rt2500pci_rfkill_poll NULL 243#define rt2500pci_rfkill_poll NULL
244#endif /* CONFIG_RT2500PCI_RFKILL */ 244#endif /* CONFIG_RT2X00_LIB_RFKILL */
245 245
246#ifdef CONFIG_RT2500PCI_LEDS 246#ifdef CONFIG_RT2X00_LIB_LEDS
247static void rt2500pci_brightness_set(struct led_classdev *led_cdev, 247static void rt2500pci_brightness_set(struct led_classdev *led_cdev,
248 enum led_brightness brightness) 248 enum led_brightness brightness)
249{ 249{
@@ -288,7 +288,7 @@ static void rt2500pci_init_led(struct rt2x00_dev *rt2x00dev,
288 led->led_dev.blink_set = rt2500pci_blink_set; 288 led->led_dev.blink_set = rt2500pci_blink_set;
289 led->flags = LED_INITIALIZED; 289 led->flags = LED_INITIALIZED;
290} 290}
291#endif /* CONFIG_RT2500PCI_LEDS */ 291#endif /* CONFIG_RT2X00_LIB_LEDS */
292 292
293/* 293/*
294 * Configuration handlers. 294 * Configuration handlers.
@@ -1316,6 +1316,8 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1316 1316
1317 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1317 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1318 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1318 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1319 else
1320 rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
1319 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1321 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1320 rxdesc->dev_flags |= RXDONE_MY_BSS; 1322 rxdesc->dev_flags |= RXDONE_MY_BSS;
1321} 1323}
@@ -1377,7 +1379,7 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1377 if (!reg) 1379 if (!reg)
1378 return IRQ_NONE; 1380 return IRQ_NONE;
1379 1381
1380 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 1382 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1381 return IRQ_HANDLED; 1383 return IRQ_HANDLED;
1382 1384
1383 /* 1385 /*
@@ -1531,22 +1533,22 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1531 /* 1533 /*
1532 * Store led mode, for correct led behaviour. 1534 * Store led mode, for correct led behaviour.
1533 */ 1535 */
1534#ifdef CONFIG_RT2500PCI_LEDS 1536#ifdef CONFIG_RT2X00_LIB_LEDS
1535 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1537 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1536 1538
1537 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1539 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1538 if (value == LED_MODE_TXRX_ACTIVITY) 1540 if (value == LED_MODE_TXRX_ACTIVITY)
1539 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_qual, 1541 rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_qual,
1540 LED_TYPE_ACTIVITY); 1542 LED_TYPE_ACTIVITY);
1541#endif /* CONFIG_RT2500PCI_LEDS */ 1543#endif /* CONFIG_RT2X00_LIB_LEDS */
1542 1544
1543 /* 1545 /*
1544 * Detect if this device has an hardware controlled radio. 1546 * Detect if this device has an hardware controlled radio.
1545 */ 1547 */
1546#ifdef CONFIG_RT2500PCI_RFKILL 1548#ifdef CONFIG_RT2X00_LIB_RFKILL
1547 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1549 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
1548 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 1550 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
1549#endif /* CONFIG_RT2500PCI_RFKILL */ 1551#endif /* CONFIG_RT2X00_LIB_RFKILL */
1550 1552
1551 /* 1553 /*
1552 * Check if the BBP tuning should be enabled. 1554 * Check if the BBP tuning should be enabled.
@@ -1721,10 +1723,11 @@ static const struct rf_channel rf_vals_5222[] = {
1721 { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, 1723 { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 },
1722}; 1724};
1723 1725
1724static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 1726static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1725{ 1727{
1726 struct hw_mode_spec *spec = &rt2x00dev->spec; 1728 struct hw_mode_spec *spec = &rt2x00dev->spec;
1727 u8 *txpower; 1729 struct channel_info *info;
1730 char *tx_power;
1728 unsigned int i; 1731 unsigned int i;
1729 1732
1730 /* 1733 /*
@@ -1741,20 +1744,10 @@ static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1741 EEPROM_MAC_ADDR_0)); 1744 EEPROM_MAC_ADDR_0));
1742 1745
1743 /* 1746 /*
1744 * Convert tx_power array in eeprom.
1745 */
1746 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1747 for (i = 0; i < 14; i++)
1748 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
1749
1750 /*
1751 * Initialize hw_mode information. 1747 * Initialize hw_mode information.
1752 */ 1748 */
1753 spec->supported_bands = SUPPORT_BAND_2GHZ; 1749 spec->supported_bands = SUPPORT_BAND_2GHZ;
1754 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1750 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1755 spec->tx_power_a = NULL;
1756 spec->tx_power_bg = txpower;
1757 spec->tx_power_default = DEFAULT_TXPOWER;
1758 1751
1759 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1752 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
1760 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1753 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
@@ -1776,6 +1769,26 @@ static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1776 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1769 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1777 spec->channels = rf_vals_5222; 1770 spec->channels = rf_vals_5222;
1778 } 1771 }
1772
1773 /*
1774 * Create channel information array
1775 */
1776 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
1777 if (!info)
1778 return -ENOMEM;
1779
1780 spec->channels_info = info;
1781
1782 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1783 for (i = 0; i < 14; i++)
1784 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1785
1786 if (spec->num_channels > 14) {
1787 for (i = 14; i < spec->num_channels; i++)
1788 info[i].tx_power1 = DEFAULT_TXPOWER;
1789 }
1790
1791 return 0;
1779} 1792}
1780 1793
1781static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) 1794static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1796,7 +1809,9 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1796 /* 1809 /*
1797 * Initialize hw specifications. 1810 * Initialize hw specifications.
1798 */ 1811 */
1799 rt2500pci_probe_hw_mode(rt2x00dev); 1812 retval = rt2500pci_probe_hw_mode(rt2x00dev);
1813 if (retval)
1814 return retval;
1800 1815
1801 /* 1816 /*
1802 * This device requires the atim queue and DMA-mapped skbs. 1817 * This device requires the atim queue and DMA-mapped skbs.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 42f376929ea9..8c26bef6cf49 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1223,17 +1223,10 @@
1223#define MAX_TXPOWER 31 1223#define MAX_TXPOWER 31
1224#define DEFAULT_TXPOWER 24 1224#define DEFAULT_TXPOWER 24
1225 1225
1226#define TXPOWER_FROM_DEV(__txpower) \ 1226#define TXPOWER_FROM_DEV(__txpower) \
1227({ \ 1227 (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1228 ((__txpower) > MAX_TXPOWER) ? \ 1228
1229 DEFAULT_TXPOWER : (__txpower); \ 1229#define TXPOWER_TO_DEV(__txpower) \
1230}) 1230 clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
1231
1232#define TXPOWER_TO_DEV(__txpower) \
1233({ \
1234 ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \
1235 (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \
1236 (__txpower)); \
1237})
1238 1231
1239#endif /* RT2500PCI_H */ 1232#endif /* RT2500PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index cd5af656932d..d3bf7bba611a 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -288,7 +288,7 @@ static const struct rt2x00debug rt2500usb_rt2x00debug = {
288}; 288};
289#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 289#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
290 290
291#ifdef CONFIG_RT2500USB_LEDS 291#ifdef CONFIG_RT2X00_LIB_LEDS
292static void rt2500usb_brightness_set(struct led_classdev *led_cdev, 292static void rt2500usb_brightness_set(struct led_classdev *led_cdev,
293 enum led_brightness brightness) 293 enum led_brightness brightness)
294{ 294{
@@ -333,7 +333,7 @@ static void rt2500usb_init_led(struct rt2x00_dev *rt2x00dev,
333 led->led_dev.blink_set = rt2500usb_blink_set; 333 led->led_dev.blink_set = rt2500usb_blink_set;
334 led->flags = LED_INITIALIZED; 334 led->flags = LED_INITIALIZED;
335} 335}
336#endif /* CONFIG_RT2500USB_LEDS */ 336#endif /* CONFIG_RT2X00_LIB_LEDS */
337 337
338/* 338/*
339 * Configuration handlers. 339 * Configuration handlers.
@@ -384,7 +384,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
384 rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg); 384 rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg);
385 rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6); 385 rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6);
386 rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW, 386 rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW,
387 2 * (conf->type != IEEE80211_IF_TYPE_STA)); 387 2 * (conf->type != NL80211_IFTYPE_STATION));
388 rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); 388 rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg);
389 389
390 /* 390 /*
@@ -1114,8 +1114,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1114 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, 1114 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1115 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); 1115 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1116 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1116 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1117 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, 1117 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1118 skb->len - skbdesc->desc_len);
1119 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE); 1118 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE);
1120 rt2x00_desc_write(txd, 0, word); 1119 rt2x00_desc_write(txd, 0, word);
1121} 1120}
@@ -1134,7 +1133,6 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1134 int pipe = usb_sndbulkpipe(usb_dev, 1); 1133 int pipe = usb_sndbulkpipe(usb_dev, 1);
1135 int length; 1134 int length;
1136 u16 reg; 1135 u16 reg;
1137 u32 word, len;
1138 1136
1139 /* 1137 /*
1140 * Add the descriptor in front of the skb. 1138 * Add the descriptor in front of the skb.
@@ -1144,17 +1142,6 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1144 skbdesc->desc = entry->skb->data; 1142 skbdesc->desc = entry->skb->data;
1145 1143
1146 /* 1144 /*
1147 * Adjust the beacon databyte count. The current number is
1148 * calculated before this function gets called, but falsely
1149 * assumes that the descriptor was already present in the SKB.
1150 */
1151 rt2x00_desc_read(skbdesc->desc, 0, &word);
1152 len = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
1153 len += skbdesc->desc_len;
1154 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
1155 rt2x00_desc_write(skbdesc->desc, 0, word);
1156
1157 /*
1158 * Disable beaconing while we are reloading the beacon data, 1145 * Disable beaconing while we are reloading the beacon data,
1159 * otherwise we might be sending out invalid data. 1146 * otherwise we might be sending out invalid data.
1160 */ 1147 */
@@ -1280,6 +1267,8 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1280 1267
1281 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1268 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1282 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1269 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1270 else
1271 rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
1283 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1272 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1284 rxdesc->dev_flags |= RXDONE_MY_BSS; 1273 rxdesc->dev_flags |= RXDONE_MY_BSS;
1285 1274
@@ -1297,7 +1286,7 @@ static void rt2500usb_beacondone(struct urb *urb)
1297 struct queue_entry *entry = (struct queue_entry *)urb->context; 1286 struct queue_entry *entry = (struct queue_entry *)urb->context;
1298 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; 1287 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
1299 1288
1300 if (!test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) 1289 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags))
1301 return; 1290 return;
1302 1291
1303 /* 1292 /*
@@ -1484,14 +1473,14 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1484 /* 1473 /*
1485 * Store led mode, for correct led behaviour. 1474 * Store led mode, for correct led behaviour.
1486 */ 1475 */
1487#ifdef CONFIG_RT2500USB_LEDS 1476#ifdef CONFIG_RT2X00_LIB_LEDS
1488 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1477 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1489 1478
1490 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1479 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1491 if (value == LED_MODE_TXRX_ACTIVITY) 1480 if (value == LED_MODE_TXRX_ACTIVITY)
1492 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual, 1481 rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual,
1493 LED_TYPE_ACTIVITY); 1482 LED_TYPE_ACTIVITY);
1494#endif /* CONFIG_RT2500USB_LEDS */ 1483#endif /* CONFIG_RT2X00_LIB_LEDS */
1495 1484
1496 /* 1485 /*
1497 * Check if the BBP tuning should be disabled. 1486 * Check if the BBP tuning should be disabled.
@@ -1665,10 +1654,11 @@ static const struct rf_channel rf_vals_5222[] = {
1665 { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, 1654 { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 },
1666}; 1655};
1667 1656
1668static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 1657static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1669{ 1658{
1670 struct hw_mode_spec *spec = &rt2x00dev->spec; 1659 struct hw_mode_spec *spec = &rt2x00dev->spec;
1671 u8 *txpower; 1660 struct channel_info *info;
1661 char *tx_power;
1672 unsigned int i; 1662 unsigned int i;
1673 1663
1674 /* 1664 /*
@@ -1687,20 +1677,10 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1687 EEPROM_MAC_ADDR_0)); 1677 EEPROM_MAC_ADDR_0));
1688 1678
1689 /* 1679 /*
1690 * Convert tx_power array in eeprom.
1691 */
1692 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1693 for (i = 0; i < 14; i++)
1694 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
1695
1696 /*
1697 * Initialize hw_mode information. 1680 * Initialize hw_mode information.
1698 */ 1681 */
1699 spec->supported_bands = SUPPORT_BAND_2GHZ; 1682 spec->supported_bands = SUPPORT_BAND_2GHZ;
1700 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 1683 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1701 spec->tx_power_a = NULL;
1702 spec->tx_power_bg = txpower;
1703 spec->tx_power_default = DEFAULT_TXPOWER;
1704 1684
1705 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) { 1685 if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
1706 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); 1686 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
@@ -1722,6 +1702,26 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1722 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1702 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1723 spec->channels = rf_vals_5222; 1703 spec->channels = rf_vals_5222;
1724 } 1704 }
1705
1706 /*
1707 * Create channel information array
1708 */
1709 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
1710 if (!info)
1711 return -ENOMEM;
1712
1713 spec->channels_info = info;
1714
1715 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
1716 for (i = 0; i < 14; i++)
1717 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1718
1719 if (spec->num_channels > 14) {
1720 for (i = 14; i < spec->num_channels; i++)
1721 info[i].tx_power1 = DEFAULT_TXPOWER;
1722 }
1723
1724 return 0;
1725} 1725}
1726 1726
1727static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) 1727static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1742,7 +1742,9 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1742 /* 1742 /*
1743 * Initialize hw specifications. 1743 * Initialize hw specifications.
1744 */ 1744 */
1745 rt2500usb_probe_hw_mode(rt2x00dev); 1745 retval = rt2500usb_probe_hw_mode(rt2x00dev);
1746 if (retval)
1747 return retval;
1746 1748
1747 /* 1749 /*
1748 * This device requires the atim queue 1750 * This device requires the atim queue
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 4769ffeb4cc6..89e5ed24e4f7 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -825,17 +825,10 @@
825#define MAX_TXPOWER 31 825#define MAX_TXPOWER 31
826#define DEFAULT_TXPOWER 24 826#define DEFAULT_TXPOWER 24
827 827
828#define TXPOWER_FROM_DEV(__txpower) \ 828#define TXPOWER_FROM_DEV(__txpower) \
829({ \ 829 (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
830 ((__txpower) > MAX_TXPOWER) ? \ 830
831 DEFAULT_TXPOWER : (__txpower); \ 831#define TXPOWER_TO_DEV(__txpower) \
832}) 832 clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
833
834#define TXPOWER_TO_DEV(__txpower) \
835({ \
836 ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \
837 (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \
838 (__txpower)); \
839})
840 833
841#endif /* RT2500USB_H */ 834#endif /* RT2500USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8b10ea41b204..1359a3768404 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -44,7 +44,7 @@
44/* 44/*
45 * Module information. 45 * Module information.
46 */ 46 */
47#define DRV_VERSION "2.1.8" 47#define DRV_VERSION "2.2.1"
48#define DRV_PROJECT "http://rt2x00.serialmonkey.com" 48#define DRV_PROJECT "http://rt2x00.serialmonkey.com"
49 49
50/* 50/*
@@ -53,11 +53,11 @@
53 */ 53 */
54#define DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, __args...) \ 54#define DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, __args...) \
55 printk(__kernlvl "%s -> %s: %s - " __msg, \ 55 printk(__kernlvl "%s -> %s: %s - " __msg, \
56 wiphy_name((__dev)->hw->wiphy), __FUNCTION__, __lvl, ##__args) 56 wiphy_name((__dev)->hw->wiphy), __func__, __lvl, ##__args)
57 57
58#define DEBUG_PRINTK_PROBE(__kernlvl, __lvl, __msg, __args...) \ 58#define DEBUG_PRINTK_PROBE(__kernlvl, __lvl, __msg, __args...) \
59 printk(__kernlvl "%s -> %s: %s - " __msg, \ 59 printk(__kernlvl "%s -> %s: %s - " __msg, \
60 KBUILD_MODNAME, __FUNCTION__, __lvl, ##__args) 60 KBUILD_MODNAME, __func__, __lvl, ##__args)
61 61
62#ifdef CONFIG_RT2X00_DEBUG 62#ifdef CONFIG_RT2X00_DEBUG
63#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ 63#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \
@@ -144,6 +144,17 @@ struct rf_channel {
144}; 144};
145 145
146/* 146/*
147 * Channel information structure
148 */
149struct channel_info {
150 unsigned int flags;
151#define GEOGRAPHY_ALLOWED 0x00000001
152
153 short tx_power1;
154 short tx_power2;
155};
156
157/*
147 * Antenna setup values. 158 * Antenna setup values.
148 */ 159 */
149struct antenna_setup { 160struct antenna_setup {
@@ -394,10 +405,7 @@ static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
394 * @num_channels: Number of supported channels. This is used as array size 405 * @num_channels: Number of supported channels. This is used as array size
395 * for @tx_power_a, @tx_power_bg and @channels. 406 * for @tx_power_a, @tx_power_bg and @channels.
396 * @channels: Device/chipset specific channel values (See &struct rf_channel). 407 * @channels: Device/chipset specific channel values (See &struct rf_channel).
397 * @tx_power_a: TX power values for all 5.2GHz channels (may be NULL). 408 * @channels_info: Additional information for channels (See &struct channel_info).
398 * @tx_power_bg: TX power values for all 2.4GHz channels (may be NULL).
399 * @tx_power_default: Default TX power value to use when either
400 * @tx_power_a or @tx_power_bg is missing.
401 */ 409 */
402struct hw_mode_spec { 410struct hw_mode_spec {
403 unsigned int supported_bands; 411 unsigned int supported_bands;
@@ -410,10 +418,7 @@ struct hw_mode_spec {
410 418
411 unsigned int num_channels; 419 unsigned int num_channels;
412 const struct rf_channel *channels; 420 const struct rf_channel *channels;
413 421 const struct channel_info *channels_info;
414 const u8 *tx_power_a;
415 const u8 *tx_power_bg;
416 u8 tx_power_default;
417}; 422};
418 423
419/* 424/*
@@ -425,7 +430,9 @@ struct hw_mode_spec {
425 */ 430 */
426struct rt2x00lib_conf { 431struct rt2x00lib_conf {
427 struct ieee80211_conf *conf; 432 struct ieee80211_conf *conf;
433
428 struct rf_channel rf; 434 struct rf_channel rf;
435 struct channel_info channel;
429 436
430 struct antenna_setup ant; 437 struct antenna_setup ant;
431 438
@@ -452,6 +459,23 @@ struct rt2x00lib_erp {
452}; 459};
453 460
454/* 461/*
462 * Configuration structure for hardware encryption.
463 */
464struct rt2x00lib_crypto {
465 enum cipher cipher;
466
467 enum set_key_cmd cmd;
468 const u8 *address;
469
470 u32 bssidx;
471 u32 aid;
472
473 u8 key[16];
474 u8 tx_mic[8];
475 u8 rx_mic[8];
476};
477
478/*
455 * Configuration structure wrapper around the 479 * Configuration structure wrapper around the
456 * rt2x00 interface configuration handler. 480 * rt2x00 interface configuration handler.
457 */ 481 */
@@ -459,7 +483,7 @@ struct rt2x00intf_conf {
459 /* 483 /*
460 * Interface type 484 * Interface type
461 */ 485 */
462 enum ieee80211_if_types type; 486 enum nl80211_iftype type;
463 487
464 /* 488 /*
465 * TSF sync value, this is dependant on the operation type. 489 * TSF sync value, this is dependant on the operation type.
@@ -547,6 +571,12 @@ struct rt2x00lib_ops {
547 /* 571 /*
548 * Configuration handlers. 572 * Configuration handlers.
549 */ 573 */
574 int (*config_shared_key) (struct rt2x00_dev *rt2x00dev,
575 struct rt2x00lib_crypto *crypto,
576 struct ieee80211_key_conf *key);
577 int (*config_pairwise_key) (struct rt2x00_dev *rt2x00dev,
578 struct rt2x00lib_crypto *crypto,
579 struct ieee80211_key_conf *key);
550 void (*config_filter) (struct rt2x00_dev *rt2x00dev, 580 void (*config_filter) (struct rt2x00_dev *rt2x00dev,
551 const unsigned int filter_flags); 581 const unsigned int filter_flags);
552 void (*config_intf) (struct rt2x00_dev *rt2x00dev, 582 void (*config_intf) (struct rt2x00_dev *rt2x00dev,
@@ -599,17 +629,16 @@ enum rt2x00_flags {
599 /* 629 /*
600 * Device state flags 630 * Device state flags
601 */ 631 */
602 DEVICE_PRESENT, 632 DEVICE_STATE_PRESENT,
603 DEVICE_REGISTERED_HW, 633 DEVICE_STATE_REGISTERED_HW,
604 DEVICE_INITIALIZED, 634 DEVICE_STATE_INITIALIZED,
605 DEVICE_STARTED, 635 DEVICE_STATE_STARTED,
606 DEVICE_STARTED_SUSPEND, 636 DEVICE_STATE_STARTED_SUSPEND,
607 DEVICE_ENABLED_RADIO, 637 DEVICE_STATE_ENABLED_RADIO,
608 DEVICE_DISABLED_RADIO_HW, 638 DEVICE_STATE_DISABLED_RADIO_HW,
609 DEVICE_DIRTY_CONFIG,
610 639
611 /* 640 /*
612 * Driver features 641 * Driver requirements
613 */ 642 */
614 DRIVER_REQUIRE_FIRMWARE, 643 DRIVER_REQUIRE_FIRMWARE,
615 DRIVER_REQUIRE_BEACON_GUARD, 644 DRIVER_REQUIRE_BEACON_GUARD,
@@ -618,9 +647,14 @@ enum rt2x00_flags {
618 DRIVER_REQUIRE_DMA, 647 DRIVER_REQUIRE_DMA,
619 648
620 /* 649 /*
621 * Driver configuration 650 * Driver features
622 */ 651 */
623 CONFIG_SUPPORT_HW_BUTTON, 652 CONFIG_SUPPORT_HW_BUTTON,
653 CONFIG_SUPPORT_HW_CRYPTO,
654
655 /*
656 * Driver configuration
657 */
624 CONFIG_FRAME_TYPE, 658 CONFIG_FRAME_TYPE,
625 CONFIG_RF_SEQUENCE, 659 CONFIG_RF_SEQUENCE,
626 CONFIG_EXTERNAL_LNA_A, 660 CONFIG_EXTERNAL_LNA_A,
@@ -769,6 +803,11 @@ struct rt2x00_dev {
769 u32 *rf; 803 u32 *rf;
770 804
771 /* 805 /*
806 * LNA gain
807 */
808 short lna_gain;
809
810 /*
772 * USB Max frame size (for rt2500usb & rt73usb). 811 * USB Max frame size (for rt2500usb & rt73usb).
773 */ 812 */
774 u16 usb_maxpacket; 813 u16 usb_maxpacket;
@@ -966,6 +1005,13 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
966 unsigned int changed_flags, 1005 unsigned int changed_flags,
967 unsigned int *total_flags, 1006 unsigned int *total_flags,
968 int mc_count, struct dev_addr_list *mc_list); 1007 int mc_count, struct dev_addr_list *mc_list);
1008#ifdef CONFIG_RT2X00_LIB_CRYPTO
1009int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1010 const u8 *local_address, const u8 *address,
1011 struct ieee80211_key_conf *key);
1012#else
1013#define rt2x00mac_set_key NULL
1014#endif /* CONFIG_RT2X00_LIB_CRYPTO */
969int rt2x00mac_get_stats(struct ieee80211_hw *hw, 1015int rt2x00mac_get_stats(struct ieee80211_hw *hw,
970 struct ieee80211_low_level_stats *stats); 1016 struct ieee80211_low_level_stats *stats);
971int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw, 1017int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index d134c3be539a..4d5e87b015a0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -31,7 +31,7 @@
31 31
32void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, 32void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
33 struct rt2x00_intf *intf, 33 struct rt2x00_intf *intf,
34 enum ieee80211_if_types type, 34 enum nl80211_iftype type,
35 u8 *mac, u8 *bssid) 35 u8 *mac, u8 *bssid)
36{ 36{
37 struct rt2x00intf_conf conf; 37 struct rt2x00intf_conf conf;
@@ -40,11 +40,11 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
40 conf.type = type; 40 conf.type = type;
41 41
42 switch (type) { 42 switch (type) {
43 case IEEE80211_IF_TYPE_IBSS: 43 case NL80211_IFTYPE_ADHOC:
44 case IEEE80211_IF_TYPE_AP: 44 case NL80211_IFTYPE_AP:
45 conf.sync = TSF_SYNC_BEACON; 45 conf.sync = TSF_SYNC_BEACON;
46 break; 46 break;
47 case IEEE80211_IF_TYPE_STA: 47 case NL80211_IFTYPE_STATION:
48 conf.sync = TSF_SYNC_INFRA; 48 conf.sync = TSF_SYNC_INFRA;
49 break; 49 break;
50 default: 50 default:
@@ -121,7 +121,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
121 * Antenna setup changes require the RX to be disabled, 121 * Antenna setup changes require the RX to be disabled,
122 * else the changes will be ignored by the device. 122 * else the changes will be ignored by the device.
123 */ 123 */
124 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 124 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
125 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK); 125 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK);
126 126
127 /* 127 /*
@@ -136,7 +136,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
136 rt2x00dev->link.ant.active.rx = libconf.ant.rx; 136 rt2x00dev->link.ant.active.rx = libconf.ant.rx;
137 rt2x00dev->link.ant.active.tx = libconf.ant.tx; 137 rt2x00dev->link.ant.active.tx = libconf.ant.tx;
138 138
139 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 139 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
140 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); 140 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
141} 141}
142 142
@@ -245,6 +245,10 @@ config:
245 memcpy(&libconf.rf, 245 memcpy(&libconf.rf,
246 &rt2x00dev->spec.channels[conf->channel->hw_value], 246 &rt2x00dev->spec.channels[conf->channel->hw_value],
247 sizeof(libconf.rf)); 247 sizeof(libconf.rf));
248
249 memcpy(&libconf.channel,
250 &rt2x00dev->spec.channels_info[conf->channel->hw_value],
251 sizeof(libconf.channel));
248 } 252 }
249 253
250 if (flags & CONFIG_UPDATE_ANTENNA) { 254 if (flags & CONFIG_UPDATE_ANTENNA) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
new file mode 100644
index 000000000000..e1448cfa9444
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -0,0 +1,215 @@
1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 crypto specific routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28
29#include "rt2x00.h"
30#include "rt2x00lib.h"
31
32enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
33{
34 switch (key->alg) {
35 case ALG_WEP:
36 if (key->keylen == LEN_WEP40)
37 return CIPHER_WEP64;
38 else
39 return CIPHER_WEP128;
40 case ALG_TKIP:
41 return CIPHER_TKIP;
42 case ALG_CCMP:
43 return CIPHER_AES;
44 default:
45 return CIPHER_NONE;
46 }
47}
48
49unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info)
50{
51 struct ieee80211_key_conf *key = tx_info->control.hw_key;
52 unsigned int overhead = 0;
53
54 /*
55 * Extend frame length to include IV/EIV/ICV/MMIC,
56 * note that these lengths should only be added when
57 * mac80211 does not generate it.
58 */
59 overhead += tx_info->control.icv_len;
60
61 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
62 overhead += tx_info->control.iv_len;
63
64 if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
65 if (key->alg == ALG_TKIP)
66 overhead += 8;
67 }
68
69 return overhead;
70}
71
72void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len)
73{
74 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
75 unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
76
77 if (unlikely(!iv_len))
78 return;
79
80 /* Copy IV/EIV data */
81 if (iv_len >= 4)
82 memcpy(&skbdesc->iv, skb->data + header_length, 4);
83 if (iv_len >= 8)
84 memcpy(&skbdesc->eiv, skb->data + header_length + 4, 4);
85
86 /* Move ieee80211 header */
87 memmove(skb->data + iv_len, skb->data, header_length);
88
89 /* Pull buffer to correct size */
90 skb_pull(skb, iv_len);
91
92 /* IV/EIV data has officially be stripped */
93 skbdesc->flags |= FRAME_DESC_IV_STRIPPED;
94}
95
96void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
97{
98 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
99 unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
100 const unsigned int iv_len =
101 ((!!(skbdesc->iv)) * 4) + ((!!(skbdesc->eiv)) * 4);
102
103 if (!(skbdesc->flags & FRAME_DESC_IV_STRIPPED))
104 return;
105
106 skb_push(skb, iv_len);
107
108 /* Move ieee80211 header */
109 memmove(skb->data, skb->data + iv_len, header_length);
110
111 /* Copy IV/EIV data */
112 if (iv_len >= 4)
113 memcpy(skb->data + header_length, &skbdesc->iv, 4);
114 if (iv_len >= 8)
115 memcpy(skb->data + header_length + 4, &skbdesc->eiv, 4);
116
117 /* IV/EIV data has returned into the frame */
118 skbdesc->flags &= ~FRAME_DESC_IV_STRIPPED;
119}
120
121void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
122 unsigned int header_length,
123 struct rxdone_entry_desc *rxdesc)
124{
125 unsigned int payload_len = rxdesc->size - header_length;
126 unsigned int iv_len;
127 unsigned int icv_len;
128 unsigned int transfer = 0;
129
130 /*
131 * WEP64/WEP128: Provides IV & ICV
132 * TKIP: Provides IV/EIV & ICV
133 * AES: Provies IV/EIV & ICV
134 */
135 switch (rxdesc->cipher) {
136 case CIPHER_WEP64:
137 case CIPHER_WEP128:
138 iv_len = 4;
139 icv_len = 4;
140 break;
141 case CIPHER_TKIP:
142 iv_len = 8;
143 icv_len = 4;
144 break;
145 case CIPHER_AES:
146 iv_len = 8;
147 icv_len = 8;
148 break;
149 default:
150 /* Unsupport type */
151 return;
152 }
153
154 /*
155 * Make room for new data, note that we increase both
156 * headsize and tailsize when required. The tailsize is
157 * only needed when ICV data needs to be inserted and
158 * the padding is smaller then the ICV data.
159 * When alignment requirements is greater then the
160 * ICV data we must trim the skb to the correct size
161 * because we need to remove the extra bytes.
162 */
163 skb_push(skb, iv_len + align);
164 if (align < icv_len)
165 skb_put(skb, icv_len - align);
166 else if (align > icv_len)
167 skb_trim(skb, rxdesc->size + iv_len + icv_len);
168
169 /* Move ieee80211 header */
170 memmove(skb->data + transfer,
171 skb->data + transfer + iv_len + align,
172 header_length);
173 transfer += header_length;
174
175 /* Copy IV data */
176 if (iv_len >= 4) {
177 memcpy(skb->data + transfer, &rxdesc->iv, 4);
178 transfer += 4;
179 }
180
181 /* Copy EIV data */
182 if (iv_len >= 8) {
183 memcpy(skb->data + transfer, &rxdesc->eiv, 4);
184 transfer += 4;
185 }
186
187 /* Move payload */
188 if (align) {
189 memmove(skb->data + transfer,
190 skb->data + transfer + align,
191 payload_len);
192 }
193
194 /*
195 * NOTE: Always count the payload as transfered,
196 * even when alignment was set to zero. This is required
197 * for determining the correct offset for the ICV data.
198 */
199 transfer += payload_len;
200
201 /* Copy ICV data */
202 if (icv_len >= 4) {
203 memcpy(skb->data + transfer, &rxdesc->icv, 4);
204 /*
205 * AES appends 8 bytes, we can't fill the upper
206 * 4 bytes, but mac80211 doesn't care about what
207 * we provide here anyway and strips it immediately.
208 */
209 transfer += icv_len;
210 }
211
212 /* IV/EIV/ICV has been inserted into frame */
213 rxdesc->size = transfer;
214 rxdesc->flags &= ~RX_FLAG_IV_STRIPPED;
215}
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 6bee1d611bbf..5cf4c859e39d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -35,6 +35,13 @@
35 35
36#define MAX_LINE_LENGTH 64 36#define MAX_LINE_LENGTH 64
37 37
38struct rt2x00debug_crypto {
39 unsigned long success;
40 unsigned long icv_error;
41 unsigned long mic_error;
42 unsigned long key_error;
43};
44
38struct rt2x00debug_intf { 45struct rt2x00debug_intf {
39 /* 46 /*
40 * Pointer to driver structure where 47 * Pointer to driver structure where
@@ -63,6 +70,7 @@ struct rt2x00debug_intf {
63 * - queue folder 70 * - queue folder
64 * - frame dump file 71 * - frame dump file
65 * - queue stats file 72 * - queue stats file
73 * - crypto stats file
66 */ 74 */
67 struct dentry *driver_folder; 75 struct dentry *driver_folder;
68 struct dentry *driver_entry; 76 struct dentry *driver_entry;
@@ -80,6 +88,7 @@ struct rt2x00debug_intf {
80 struct dentry *queue_folder; 88 struct dentry *queue_folder;
81 struct dentry *queue_frame_dump_entry; 89 struct dentry *queue_frame_dump_entry;
82 struct dentry *queue_stats_entry; 90 struct dentry *queue_stats_entry;
91 struct dentry *crypto_stats_entry;
83 92
84 /* 93 /*
85 * The frame dump file only allows a single reader, 94 * The frame dump file only allows a single reader,
@@ -98,6 +107,12 @@ struct rt2x00debug_intf {
98 wait_queue_head_t frame_dump_waitqueue; 107 wait_queue_head_t frame_dump_waitqueue;
99 108
100 /* 109 /*
110 * HW crypto statistics.
111 * All statistics are stored seperately per cipher type.
112 */
113 struct rt2x00debug_crypto crypto_stats[CIPHER_MAX];
114
115 /*
101 * Driver and chipset files will use a data buffer 116 * Driver and chipset files will use a data buffer
102 * that has been created in advance. This will simplify 117 * that has been created in advance. This will simplify
103 * the code since we can use the debugfs functions. 118 * the code since we can use the debugfs functions.
@@ -114,6 +129,25 @@ struct rt2x00debug_intf {
114 unsigned int offset_rf; 129 unsigned int offset_rf;
115}; 130};
116 131
132void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
133 enum cipher cipher, enum rx_crypto status)
134{
135 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
136
137 if (cipher == CIPHER_TKIP_NO_MIC)
138 cipher = CIPHER_TKIP;
139 if (cipher == CIPHER_NONE || cipher > CIPHER_MAX)
140 return;
141
142 /* Remove CIPHER_NONE index */
143 cipher--;
144
145 intf->crypto_stats[cipher].success += (status == RX_CRYPTO_SUCCESS);
146 intf->crypto_stats[cipher].icv_error += (status == RX_CRYPTO_FAIL_ICV);
147 intf->crypto_stats[cipher].mic_error += (status == RX_CRYPTO_FAIL_MIC);
148 intf->crypto_stats[cipher].key_error += (status == RX_CRYPTO_FAIL_KEY);
149}
150
117void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, 151void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
118 enum rt2x00_dump_type type, struct sk_buff *skb) 152 enum rt2x00_dump_type type, struct sk_buff *skb)
119{ 153{
@@ -327,6 +361,59 @@ static const struct file_operations rt2x00debug_fop_queue_stats = {
327 .release = rt2x00debug_file_release, 361 .release = rt2x00debug_file_release,
328}; 362};
329 363
364#ifdef CONFIG_RT2X00_LIB_CRYPTO
365static ssize_t rt2x00debug_read_crypto_stats(struct file *file,
366 char __user *buf,
367 size_t length,
368 loff_t *offset)
369{
370 struct rt2x00debug_intf *intf = file->private_data;
371 char *name[] = { "WEP64", "WEP128", "TKIP", "AES" };
372 char *data;
373 char *temp;
374 size_t size;
375 unsigned int i;
376
377 if (*offset)
378 return 0;
379
380 data = kzalloc((1 + CIPHER_MAX)* MAX_LINE_LENGTH, GFP_KERNEL);
381 if (!data)
382 return -ENOMEM;
383
384 temp = data;
385 temp += sprintf(data, "cipher\tsuccess\ticv err\tmic err\tkey err\n");
386
387 for (i = 0; i < CIPHER_MAX; i++) {
388 temp += sprintf(temp, "%s\t%lu\t%lu\t%lu\t%lu\n", name[i],
389 intf->crypto_stats[i].success,
390 intf->crypto_stats[i].icv_error,
391 intf->crypto_stats[i].mic_error,
392 intf->crypto_stats[i].key_error);
393 }
394
395 size = strlen(data);
396 size = min(size, length);
397
398 if (copy_to_user(buf, data, size)) {
399 kfree(data);
400 return -EFAULT;
401 }
402
403 kfree(data);
404
405 *offset += size;
406 return size;
407}
408
409static const struct file_operations rt2x00debug_fop_crypto_stats = {
410 .owner = THIS_MODULE,
411 .read = rt2x00debug_read_crypto_stats,
412 .open = rt2x00debug_file_open,
413 .release = rt2x00debug_file_release,
414};
415#endif
416
330#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \ 417#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \
331static ssize_t rt2x00debug_read_##__name(struct file *file, \ 418static ssize_t rt2x00debug_read_##__name(struct file *file, \
332 char __user *buf, \ 419 char __user *buf, \
@@ -569,6 +656,13 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
569 debugfs_create_file("queue", S_IRUSR, intf->queue_folder, 656 debugfs_create_file("queue", S_IRUSR, intf->queue_folder,
570 intf, &rt2x00debug_fop_queue_stats); 657 intf, &rt2x00debug_fop_queue_stats);
571 658
659#ifdef CONFIG_RT2X00_LIB_CRYPTO
660 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
661 intf->crypto_stats_entry =
662 debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
663 intf, &rt2x00debug_fop_crypto_stats);
664#endif
665
572 return; 666 return;
573 667
574exit: 668exit:
@@ -587,6 +681,9 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
587 681
588 skb_queue_purge(&intf->frame_dump_skbqueue); 682 skb_queue_purge(&intf->frame_dump_skbqueue);
589 683
684#ifdef CONFIG_RT2X00_LIB_CRYPTO
685 debugfs_remove(intf->crypto_stats_entry);
686#endif
590 debugfs_remove(intf->queue_stats_entry); 687 debugfs_remove(intf->queue_stats_entry);
591 debugfs_remove(intf->queue_frame_dump_entry); 688 debugfs_remove(intf->queue_frame_dump_entry);
592 debugfs_remove(intf->queue_folder); 689 debugfs_remove(intf->queue_folder);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index f42283ad7b02..86840e3585e8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -34,7 +34,7 @@
34 */ 34 */
35void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev) 35void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev)
36{ 36{
37 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 37 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
38 return; 38 return;
39 39
40 /* 40 /*
@@ -94,8 +94,8 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
94 * Don't enable the radio twice. 94 * Don't enable the radio twice.
95 * And check if the hardware button has been disabled. 95 * And check if the hardware button has been disabled.
96 */ 96 */
97 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 97 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
98 test_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags)) 98 test_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags))
99 return 0; 99 return 0;
100 100
101 /* 101 /*
@@ -117,7 +117,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
117 rt2x00leds_led_radio(rt2x00dev, true); 117 rt2x00leds_led_radio(rt2x00dev, true);
118 rt2x00led_led_activity(rt2x00dev, true); 118 rt2x00led_led_activity(rt2x00dev, true);
119 119
120 __set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags); 120 set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
121 121
122 /* 122 /*
123 * Enable RX. 123 * Enable RX.
@@ -134,7 +134,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
134 134
135void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) 135void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
136{ 136{
137 if (!__test_and_clear_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 137 if (!test_and_clear_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
138 return; 138 return;
139 139
140 /* 140 /*
@@ -354,7 +354,7 @@ static void rt2x00lib_link_tuner(struct work_struct *work)
354 * When the radio is shutting down we should 354 * When the radio is shutting down we should
355 * immediately cease all link tuning. 355 * immediately cease all link tuning.
356 */ 356 */
357 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 357 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
358 return; 358 return;
359 359
360 /* 360 /*
@@ -431,7 +431,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
431 * note that in the spinlock protected area above the delayed_flags 431 * note that in the spinlock protected area above the delayed_flags
432 * have been cleared correctly. 432 * have been cleared correctly.
433 */ 433 */
434 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 434 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
435 return; 435 return;
436 436
437 if (delayed_flags & DELAYED_UPDATE_BEACON) 437 if (delayed_flags & DELAYED_UPDATE_BEACON)
@@ -467,8 +467,8 @@ static void rt2x00lib_beacondone_iter(void *data, u8 *mac,
467 struct rt2x00_dev *rt2x00dev = data; 467 struct rt2x00_dev *rt2x00dev = data;
468 struct rt2x00_intf *intf = vif_to_intf(vif); 468 struct rt2x00_intf *intf = vif_to_intf(vif);
469 469
470 if (vif->type != IEEE80211_IF_TYPE_AP && 470 if (vif->type != NL80211_IFTYPE_AP &&
471 vif->type != IEEE80211_IF_TYPE_IBSS) 471 vif->type != NL80211_IFTYPE_ADHOC)
472 return; 472 return;
473 473
474 /* 474 /*
@@ -484,7 +484,7 @@ static void rt2x00lib_beacondone_iter(void *data, u8 *mac,
484 484
485void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) 485void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
486{ 486{
487 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 487 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
488 return; 488 return;
489 489
490 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, 490 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
@@ -508,6 +508,15 @@ void rt2x00lib_txdone(struct queue_entry *entry,
508 rt2x00queue_unmap_skb(rt2x00dev, entry->skb); 508 rt2x00queue_unmap_skb(rt2x00dev, entry->skb);
509 509
510 /* 510 /*
511 * If the IV/EIV data was stripped from the frame before it was
512 * passed to the hardware, we should now reinsert it again because
513 * mac80211 will expect the the same data to be present it the
514 * frame as it was passed to us.
515 */
516 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
517 rt2x00crypto_tx_insert_iv(entry->skb);
518
519 /*
511 * Send frame to debugfs immediately, after this call is completed 520 * Send frame to debugfs immediately, after this call is completed
512 * we are going to overwrite the skb->cb array. 521 * we are going to overwrite the skb->cb array.
513 */ 522 */
@@ -563,7 +572,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
563 572
564 rt2x00dev->ops->lib->init_txentry(rt2x00dev, entry); 573 rt2x00dev->ops->lib->init_txentry(rt2x00dev, entry);
565 574
566 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 575 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
567 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 576 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
568 577
569 /* 578 /*
@@ -585,7 +594,7 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
585 struct ieee80211_supported_band *sband; 594 struct ieee80211_supported_band *sband;
586 struct ieee80211_hdr *hdr; 595 struct ieee80211_hdr *hdr;
587 const struct rt2x00_rate *rate; 596 const struct rt2x00_rate *rate;
588 unsigned int header_size; 597 unsigned int header_length;
589 unsigned int align; 598 unsigned int align;
590 unsigned int i; 599 unsigned int i;
591 int idx = -1; 600 int idx = -1;
@@ -613,10 +622,19 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
613 * The data behind the ieee80211 header must be 622 * The data behind the ieee80211 header must be
614 * aligned on a 4 byte boundary. 623 * aligned on a 4 byte boundary.
615 */ 624 */
616 header_size = ieee80211_get_hdrlen_from_skb(entry->skb); 625 header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
617 align = ((unsigned long)(entry->skb->data + header_size)) & 3; 626 align = ((unsigned long)(entry->skb->data + header_length)) & 3;
618 627
619 if (align) { 628 /*
629 * Hardware might have stripped the IV/EIV/ICV data,
630 * in that case it is possible that the data was
631 * provided seperately (through hardware descriptor)
632 * in which case we should reinsert the data into the frame.
633 */
634 if ((rxdesc.flags & RX_FLAG_IV_STRIPPED)) {
635 rt2x00crypto_rx_insert_iv(entry->skb, align,
636 header_length, &rxdesc);
637 } else if (align) {
620 skb_push(entry->skb, align); 638 skb_push(entry->skb, align);
621 /* Move entire frame in 1 command */ 639 /* Move entire frame in 1 command */
622 memmove(entry->skb->data, entry->skb->data + align, 640 memmove(entry->skb->data, entry->skb->data + align,
@@ -635,7 +653,7 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
635 653
636 if (((rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) && 654 if (((rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) &&
637 (rate->plcp == rxdesc.signal)) || 655 (rate->plcp == rxdesc.signal)) ||
638 (!(rxdesc.dev_flags & RXDONE_SIGNAL_PLCP) && 656 ((rxdesc.dev_flags & RXDONE_SIGNAL_BITRATE) &&
639 (rate->bitrate == rxdesc.signal))) { 657 (rate->bitrate == rxdesc.signal))) {
640 idx = i; 658 idx = i;
641 break; 659 break;
@@ -657,6 +675,10 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
657 (rxdesc.dev_flags & RXDONE_MY_BSS)) 675 (rxdesc.dev_flags & RXDONE_MY_BSS))
658 rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc.rssi); 676 rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc.rssi);
659 677
678 rt2x00debug_update_crypto(rt2x00dev,
679 rxdesc.cipher,
680 rxdesc.cipher_status);
681
660 rt2x00dev->link.qual.rx_success++; 682 rt2x00dev->link.qual.rx_success++;
661 683
662 rx_status->mactime = rxdesc.timestamp; 684 rx_status->mactime = rxdesc.timestamp;
@@ -796,7 +818,6 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
796 struct ieee80211_rate *rates; 818 struct ieee80211_rate *rates;
797 unsigned int num_rates; 819 unsigned int num_rates;
798 unsigned int i; 820 unsigned int i;
799 unsigned char tx_power;
800 821
801 num_rates = 0; 822 num_rates = 0;
802 if (spec->supported_rates & SUPPORT_RATE_CCK) 823 if (spec->supported_rates & SUPPORT_RATE_CCK)
@@ -822,20 +843,9 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
822 * Initialize Channel list. 843 * Initialize Channel list.
823 */ 844 */
824 for (i = 0; i < spec->num_channels; i++) { 845 for (i = 0; i < spec->num_channels; i++) {
825 if (spec->channels[i].channel <= 14) {
826 if (spec->tx_power_bg)
827 tx_power = spec->tx_power_bg[i];
828 else
829 tx_power = spec->tx_power_default;
830 } else {
831 if (spec->tx_power_a)
832 tx_power = spec->tx_power_a[i];
833 else
834 tx_power = spec->tx_power_default;
835 }
836
837 rt2x00lib_channel(&channels[i], 846 rt2x00lib_channel(&channels[i],
838 spec->channels[i].channel, tx_power, i); 847 spec->channels[i].channel,
848 spec->channels_info[i].tx_power1, i);
839 } 849 }
840 850
841 /* 851 /*
@@ -878,7 +888,7 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
878 888
879static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev) 889static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
880{ 890{
881 if (test_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags)) 891 if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
882 ieee80211_unregister_hw(rt2x00dev->hw); 892 ieee80211_unregister_hw(rt2x00dev->hw);
883 893
884 if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) { 894 if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) {
@@ -887,6 +897,8 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
887 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL; 897 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
888 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; 898 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
889 } 899 }
900
901 kfree(rt2x00dev->spec.channels_info);
890} 902}
891 903
892static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) 904static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -894,6 +906,9 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
894 struct hw_mode_spec *spec = &rt2x00dev->spec; 906 struct hw_mode_spec *spec = &rt2x00dev->spec;
895 int status; 907 int status;
896 908
909 if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
910 return 0;
911
897 /* 912 /*
898 * Initialize HW modes. 913 * Initialize HW modes.
899 */ 914 */
@@ -915,7 +930,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
915 return status; 930 return status;
916 } 931 }
917 932
918 __set_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags); 933 set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags);
919 934
920 return 0; 935 return 0;
921} 936}
@@ -925,7 +940,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
925 */ 940 */
926static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) 941static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
927{ 942{
928 if (!__test_and_clear_bit(DEVICE_INITIALIZED, &rt2x00dev->flags)) 943 if (!test_and_clear_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags))
929 return; 944 return;
930 945
931 /* 946 /*
@@ -948,7 +963,7 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
948{ 963{
949 int status; 964 int status;
950 965
951 if (test_bit(DEVICE_INITIALIZED, &rt2x00dev->flags)) 966 if (test_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags))
952 return 0; 967 return 0;
953 968
954 /* 969 /*
@@ -967,7 +982,7 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
967 return status; 982 return status;
968 } 983 }
969 984
970 __set_bit(DEVICE_INITIALIZED, &rt2x00dev->flags); 985 set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
971 986
972 /* 987 /*
973 * Register the extra components. 988 * Register the extra components.
@@ -981,7 +996,7 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
981{ 996{
982 int retval; 997 int retval;
983 998
984 if (test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 999 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
985 return 0; 1000 return 0;
986 1001
987 /* 1002 /*
@@ -999,28 +1014,18 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
999 if (retval) 1014 if (retval)
1000 return retval; 1015 return retval;
1001 1016
1002 /*
1003 * Enable radio.
1004 */
1005 retval = rt2x00lib_enable_radio(rt2x00dev);
1006 if (retval) {
1007 rt2x00lib_uninitialize(rt2x00dev);
1008 return retval;
1009 }
1010
1011 rt2x00dev->intf_ap_count = 0; 1017 rt2x00dev->intf_ap_count = 0;
1012 rt2x00dev->intf_sta_count = 0; 1018 rt2x00dev->intf_sta_count = 0;
1013 rt2x00dev->intf_associated = 0; 1019 rt2x00dev->intf_associated = 0;
1014 1020
1015 __set_bit(DEVICE_STARTED, &rt2x00dev->flags); 1021 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
1016 __set_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
1017 1022
1018 return 0; 1023 return 0;
1019} 1024}
1020 1025
1021void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) 1026void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
1022{ 1027{
1023 if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 1028 if (!test_and_clear_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
1024 return; 1029 return;
1025 1030
1026 /* 1031 /*
@@ -1032,8 +1037,6 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
1032 rt2x00dev->intf_ap_count = 0; 1037 rt2x00dev->intf_ap_count = 0;
1033 rt2x00dev->intf_sta_count = 0; 1038 rt2x00dev->intf_sta_count = 0;
1034 rt2x00dev->intf_associated = 0; 1039 rt2x00dev->intf_associated = 0;
1035
1036 __clear_bit(DEVICE_STARTED, &rt2x00dev->flags);
1037} 1040}
1038 1041
1039/* 1042/*
@@ -1049,6 +1052,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1049 */ 1052 */
1050 rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf); 1053 rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);
1051 1054
1055 rt2x00dev->hw->wiphy->interface_modes =
1056 BIT(NL80211_IFTYPE_AP) |
1057 BIT(NL80211_IFTYPE_STATION) |
1058 BIT(NL80211_IFTYPE_ADHOC);
1059
1052 /* 1060 /*
1053 * Let the driver probe the device to detect the capabilities. 1061 * Let the driver probe the device to detect the capabilities.
1054 */ 1062 */
@@ -1088,7 +1096,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1088 rt2x00rfkill_allocate(rt2x00dev); 1096 rt2x00rfkill_allocate(rt2x00dev);
1089 rt2x00debug_register(rt2x00dev); 1097 rt2x00debug_register(rt2x00dev);
1090 1098
1091 __set_bit(DEVICE_PRESENT, &rt2x00dev->flags); 1099 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1092 1100
1093 return 0; 1101 return 0;
1094 1102
@@ -1101,7 +1109,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_probe_dev);
1101 1109
1102void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) 1110void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1103{ 1111{
1104 __clear_bit(DEVICE_PRESENT, &rt2x00dev->flags); 1112 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1105 1113
1106 /* 1114 /*
1107 * Disable radio. 1115 * Disable radio.
@@ -1146,14 +1154,15 @@ int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
1146 int retval; 1154 int retval;
1147 1155
1148 NOTICE(rt2x00dev, "Going to sleep.\n"); 1156 NOTICE(rt2x00dev, "Going to sleep.\n");
1149 __clear_bit(DEVICE_PRESENT, &rt2x00dev->flags);
1150 1157
1151 /* 1158 /*
1152 * Only continue if mac80211 has open interfaces. 1159 * Only continue if mac80211 has open interfaces.
1153 */ 1160 */
1154 if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 1161 if (!test_and_clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
1162 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
1155 goto exit; 1163 goto exit;
1156 __set_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags); 1164
1165 set_bit(DEVICE_STATE_STARTED_SUSPEND, &rt2x00dev->flags);
1157 1166
1158 /* 1167 /*
1159 * Disable radio. 1168 * Disable radio.
@@ -1203,8 +1212,8 @@ static void rt2x00lib_resume_intf(void *data, u8 *mac,
1203 /* 1212 /*
1204 * Master or Ad-hoc mode require a new beacon update. 1213 * Master or Ad-hoc mode require a new beacon update.
1205 */ 1214 */
1206 if (vif->type == IEEE80211_IF_TYPE_AP || 1215 if (vif->type == NL80211_IFTYPE_AP ||
1207 vif->type == IEEE80211_IF_TYPE_IBSS) 1216 vif->type == NL80211_IFTYPE_ADHOC)
1208 intf->delayed_flags |= DELAYED_UPDATE_BEACON; 1217 intf->delayed_flags |= DELAYED_UPDATE_BEACON;
1209 1218
1210 spin_unlock(&intf->lock); 1219 spin_unlock(&intf->lock);
@@ -1225,7 +1234,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1225 /* 1234 /*
1226 * Only continue if mac80211 had open interfaces. 1235 * Only continue if mac80211 had open interfaces.
1227 */ 1236 */
1228 if (!__test_and_clear_bit(DEVICE_STARTED_SUSPEND, &rt2x00dev->flags)) 1237 if (!test_and_clear_bit(DEVICE_STATE_STARTED_SUSPEND, &rt2x00dev->flags))
1229 return 0; 1238 return 0;
1230 1239
1231 /* 1240 /*
@@ -1252,7 +1261,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1252 /* 1261 /*
1253 * We are ready again to receive requests from mac80211. 1262 * We are ready again to receive requests from mac80211.
1254 */ 1263 */
1255 __set_bit(DEVICE_PRESENT, &rt2x00dev->flags); 1264 set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1256 1265
1257 /* 1266 /*
1258 * It is possible that during that mac80211 has attempted 1267 * It is possible that during that mac80211 has attempted
@@ -1272,7 +1281,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1272 return 0; 1281 return 0;
1273 1282
1274exit: 1283exit:
1275 rt2x00lib_disable_radio(rt2x00dev); 1284 rt2x00lib_stop(rt2x00dev);
1276 rt2x00lib_uninitialize(rt2x00dev); 1285 rt2x00lib_uninitialize(rt2x00dev);
1277 rt2x00debug_deregister(rt2x00dev); 1286 rt2x00debug_deregister(rt2x00dev);
1278 1287
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index c5fb3a72cf37..797eb619aa0a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -88,7 +88,7 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev);
88 */ 88 */
89void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, 89void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
90 struct rt2x00_intf *intf, 90 struct rt2x00_intf *intf,
91 enum ieee80211_if_types type, 91 enum nl80211_iftype type,
92 u8 *mac, u8 *bssid); 92 u8 *mac, u8 *bssid);
93void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, 93void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
94 struct rt2x00_intf *intf, 94 struct rt2x00_intf *intf,
@@ -181,6 +181,8 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev);
181void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev); 181void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev);
182void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, 182void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
183 enum rt2x00_dump_type type, struct sk_buff *skb); 183 enum rt2x00_dump_type type, struct sk_buff *skb);
184void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
185 enum cipher cipher, enum rx_crypto status);
184#else 186#else
185static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) 187static inline void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
186{ 188{
@@ -195,9 +197,54 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
195 struct sk_buff *skb) 197 struct sk_buff *skb)
196{ 198{
197} 199}
200
201static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
202 enum cipher cipher,
203 enum rx_crypto status)
204{
205}
198#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 206#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
199 207
200/* 208/*
209 * Crypto handlers.
210 */
211#ifdef CONFIG_RT2X00_LIB_CRYPTO
212enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key);
213unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info);
214void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len);
215void rt2x00crypto_tx_insert_iv(struct sk_buff *skb);
216void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
217 unsigned int header_length,
218 struct rxdone_entry_desc *rxdesc);
219#else
220static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
221{
222 return CIPHER_NONE;
223}
224
225static inline unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info)
226{
227 return 0;
228}
229
230static inline void rt2x00crypto_tx_remove_iv(struct sk_buff *skb,
231 unsigned int iv_len)
232{
233}
234
235static inline void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
236{
237}
238
239static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
240 unsigned int align,
241 unsigned int header_length,
242 struct rxdone_entry_desc *rxdesc)
243{
244}
245#endif
246
247/*
201 * RFkill handlers. 248 * RFkill handlers.
202 */ 249 */
203#ifdef CONFIG_RT2X00_LIB_RFKILL 250#ifdef CONFIG_RT2X00_LIB_RFKILL
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index d06507388635..2c6cc5c374ff 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -36,21 +36,22 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb); 36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
37 struct ieee80211_tx_info *rts_info; 37 struct ieee80211_tx_info *rts_info;
38 struct sk_buff *skb; 38 struct sk_buff *skb;
39 int size; 39 unsigned int data_length;
40 int retval = 0;
40 41
41 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 42 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
42 size = sizeof(struct ieee80211_cts); 43 data_length = sizeof(struct ieee80211_cts);
43 else 44 else
44 size = sizeof(struct ieee80211_rts); 45 data_length = sizeof(struct ieee80211_rts);
45 46
46 skb = dev_alloc_skb(size + rt2x00dev->hw->extra_tx_headroom); 47 skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
47 if (!skb) { 48 if (unlikely(!skb)) {
48 WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n"); 49 WARNING(rt2x00dev, "Failed to create RTS/CTS frame.\n");
49 return NETDEV_TX_BUSY; 50 return -ENOMEM;
50 } 51 }
51 52
52 skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom); 53 skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
53 skb_put(skb, size); 54 skb_put(skb, data_length);
54 55
55 /* 56 /*
56 * Copy TX information over from original frame to 57 * Copy TX information over from original frame to
@@ -63,7 +64,6 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
63 */ 64 */
64 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb)); 65 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
65 rts_info = IEEE80211_SKB_CB(skb); 66 rts_info = IEEE80211_SKB_CB(skb);
66 rts_info->control.hw_key = NULL;
67 rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS; 67 rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS;
68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT; 68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT;
69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS; 69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
@@ -73,22 +73,33 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
73 else 73 else
74 rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK; 74 rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
75 75
76 skb->do_not_encrypt = 1;
77
78 /*
79 * RTS/CTS frame should use the length of the frame plus any
80 * encryption overhead that will be added by the hardware.
81 */
82#ifdef CONFIG_RT2X00_LIB_CRYPTO
83 if (!frag_skb->do_not_encrypt)
84 data_length += rt2x00crypto_tx_overhead(tx_info);
85#endif /* CONFIG_RT2X00_LIB_CRYPTO */
86
76 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 87 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)
77 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif, 88 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
78 frag_skb->data, size, tx_info, 89 frag_skb->data, data_length, tx_info,
79 (struct ieee80211_cts *)(skb->data)); 90 (struct ieee80211_cts *)(skb->data));
80 else 91 else
81 ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif, 92 ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
82 frag_skb->data, size, tx_info, 93 frag_skb->data, data_length, tx_info,
83 (struct ieee80211_rts *)(skb->data)); 94 (struct ieee80211_rts *)(skb->data));
84 95
85 if (rt2x00queue_write_tx_frame(queue, skb)) { 96 retval = rt2x00queue_write_tx_frame(queue, skb);
97 if (retval) {
86 dev_kfree_skb_any(skb); 98 dev_kfree_skb_any(skb);
87 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); 99 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
88 return NETDEV_TX_BUSY;
89 } 100 }
90 101
91 return NETDEV_TX_OK; 102 return retval;
92} 103}
93 104
94int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 105int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -106,11 +117,8 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
106 * Note that we can only stop the TX queues inside the TX path 117 * Note that we can only stop the TX queues inside the TX path
107 * due to possible race conditions in mac80211. 118 * due to possible race conditions in mac80211.
108 */ 119 */
109 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) { 120 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
110 ieee80211_stop_queues(hw); 121 goto exit_fail;
111 dev_kfree_skb_any(skb);
112 return NETDEV_TX_OK;
113 }
114 122
115 /* 123 /*
116 * Determine which queue to put packet on. 124 * Determine which queue to put packet on.
@@ -141,26 +149,25 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
141 if ((tx_info->flags & (IEEE80211_TX_CTL_USE_RTS_CTS | 149 if ((tx_info->flags & (IEEE80211_TX_CTL_USE_RTS_CTS |
142 IEEE80211_TX_CTL_USE_CTS_PROTECT)) && 150 IEEE80211_TX_CTL_USE_CTS_PROTECT)) &&
143 !rt2x00dev->ops->hw->set_rts_threshold) { 151 !rt2x00dev->ops->hw->set_rts_threshold) {
144 if (rt2x00queue_available(queue) <= 1) { 152 if (rt2x00queue_available(queue) <= 1)
145 ieee80211_stop_queue(rt2x00dev->hw, qid); 153 goto exit_fail;
146 return NETDEV_TX_BUSY;
147 }
148
149 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) {
150 ieee80211_stop_queue(rt2x00dev->hw, qid);
151 return NETDEV_TX_BUSY;
152 }
153 }
154 154
155 if (rt2x00queue_write_tx_frame(queue, skb)) { 155 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb))
156 ieee80211_stop_queue(rt2x00dev->hw, qid); 156 goto exit_fail;
157 return NETDEV_TX_BUSY;
158 } 157 }
159 158
159 if (rt2x00queue_write_tx_frame(queue, skb))
160 goto exit_fail;
161
160 if (rt2x00queue_threshold(queue)) 162 if (rt2x00queue_threshold(queue))
161 ieee80211_stop_queue(rt2x00dev->hw, qid); 163 ieee80211_stop_queue(rt2x00dev->hw, qid);
162 164
163 return NETDEV_TX_OK; 165 return NETDEV_TX_OK;
166
167 exit_fail:
168 ieee80211_stop_queue(rt2x00dev->hw, qid);
169 dev_kfree_skb_any(skb);
170 return NETDEV_TX_OK;
164} 171}
165EXPORT_SYMBOL_GPL(rt2x00mac_tx); 172EXPORT_SYMBOL_GPL(rt2x00mac_tx);
166 173
@@ -168,7 +175,7 @@ int rt2x00mac_start(struct ieee80211_hw *hw)
168{ 175{
169 struct rt2x00_dev *rt2x00dev = hw->priv; 176 struct rt2x00_dev *rt2x00dev = hw->priv;
170 177
171 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) 178 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
172 return 0; 179 return 0;
173 180
174 return rt2x00lib_start(rt2x00dev); 181 return rt2x00lib_start(rt2x00dev);
@@ -179,7 +186,7 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
179{ 186{
180 struct rt2x00_dev *rt2x00dev = hw->priv; 187 struct rt2x00_dev *rt2x00dev = hw->priv;
181 188
182 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) 189 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
183 return; 190 return;
184 191
185 rt2x00lib_stop(rt2x00dev); 192 rt2x00lib_stop(rt2x00dev);
@@ -199,12 +206,12 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
199 * Don't allow interfaces to be added 206 * Don't allow interfaces to be added
200 * the device has disappeared. 207 * the device has disappeared.
201 */ 208 */
202 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || 209 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
203 !test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 210 !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
204 return -ENODEV; 211 return -ENODEV;
205 212
206 switch (conf->type) { 213 switch (conf->type) {
207 case IEEE80211_IF_TYPE_AP: 214 case NL80211_IFTYPE_AP:
208 /* 215 /*
209 * We don't support mixed combinations of 216 * We don't support mixed combinations of
210 * sta and ap interfaces. 217 * sta and ap interfaces.
@@ -220,8 +227,8 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
220 return -ENOBUFS; 227 return -ENOBUFS;
221 228
222 break; 229 break;
223 case IEEE80211_IF_TYPE_STA: 230 case NL80211_IFTYPE_STATION:
224 case IEEE80211_IF_TYPE_IBSS: 231 case NL80211_IFTYPE_ADHOC:
225 /* 232 /*
226 * We don't support mixed combinations of 233 * We don't support mixed combinations of
227 * sta and ap interfaces. 234 * sta and ap interfaces.
@@ -249,7 +256,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
249 */ 256 */
250 for (i = 0; i < queue->limit; i++) { 257 for (i = 0; i < queue->limit; i++) {
251 entry = &queue->entries[i]; 258 entry = &queue->entries[i];
252 if (!__test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags)) 259 if (!test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags))
253 break; 260 break;
254 } 261 }
255 262
@@ -261,7 +268,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
261 * increase interface count and start initialization. 268 * increase interface count and start initialization.
262 */ 269 */
263 270
264 if (conf->type == IEEE80211_IF_TYPE_AP) 271 if (conf->type == NL80211_IFTYPE_AP)
265 rt2x00dev->intf_ap_count++; 272 rt2x00dev->intf_ap_count++;
266 else 273 else
267 rt2x00dev->intf_sta_count++; 274 rt2x00dev->intf_sta_count++;
@@ -270,7 +277,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
270 spin_lock_init(&intf->seqlock); 277 spin_lock_init(&intf->seqlock);
271 intf->beacon = entry; 278 intf->beacon = entry;
272 279
273 if (conf->type == IEEE80211_IF_TYPE_AP) 280 if (conf->type == NL80211_IFTYPE_AP)
274 memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN); 281 memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN);
275 memcpy(&intf->mac, conf->mac_addr, ETH_ALEN); 282 memcpy(&intf->mac, conf->mac_addr, ETH_ALEN);
276 283
@@ -303,12 +310,12 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
303 * either the device has disappeared or when 310 * either the device has disappeared or when
304 * no interface is present. 311 * no interface is present.
305 */ 312 */
306 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || 313 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
307 (conf->type == IEEE80211_IF_TYPE_AP && !rt2x00dev->intf_ap_count) || 314 (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
308 (conf->type != IEEE80211_IF_TYPE_AP && !rt2x00dev->intf_sta_count)) 315 (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
309 return; 316 return;
310 317
311 if (conf->type == IEEE80211_IF_TYPE_AP) 318 if (conf->type == NL80211_IFTYPE_AP)
312 rt2x00dev->intf_ap_count--; 319 rt2x00dev->intf_ap_count--;
313 else 320 else
314 rt2x00dev->intf_sta_count--; 321 rt2x00dev->intf_sta_count--;
@@ -317,59 +324,59 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
317 * Release beacon entry so it is available for 324 * Release beacon entry so it is available for
318 * new interfaces again. 325 * new interfaces again.
319 */ 326 */
320 __clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags); 327 clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags);
321 328
322 /* 329 /*
323 * Make sure the bssid and mac address registers 330 * Make sure the bssid and mac address registers
324 * are cleared to prevent false ACKing of frames. 331 * are cleared to prevent false ACKing of frames.
325 */ 332 */
326 rt2x00lib_config_intf(rt2x00dev, intf, 333 rt2x00lib_config_intf(rt2x00dev, intf,
327 IEEE80211_IF_TYPE_INVALID, NULL, NULL); 334 NL80211_IFTYPE_UNSPECIFIED, NULL, NULL);
328} 335}
329EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface); 336EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface);
330 337
331int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 338int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
332{ 339{
333 struct rt2x00_dev *rt2x00dev = hw->priv; 340 struct rt2x00_dev *rt2x00dev = hw->priv;
334 int force_reconfig; 341 int radio_on;
342 int status;
335 343
336 /* 344 /*
337 * Mac80211 might be calling this function while we are trying 345 * Mac80211 might be calling this function while we are trying
338 * to remove the device or perhaps suspending it. 346 * to remove the device or perhaps suspending it.
339 */ 347 */
340 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) 348 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
341 return 0; 349 return 0;
342 350
343 /* 351 /*
344 * Check if we need to disable the radio, 352 * Only change device state when the radio is enabled. It does not
345 * if this is not the case, at least the RX must be disabled. 353 * matter what parameters we have configured when the radio is disabled
354 * because we won't be able to send or receive anyway. Also note that
355 * some configuration parameters (e.g. channel and antenna values) can
356 * only be set when the radio is enabled.
346 */ 357 */
347 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) { 358 radio_on = test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
348 if (!conf->radio_enabled) 359 if (conf->radio_enabled) {
349 rt2x00lib_disable_radio(rt2x00dev); 360 /* For programming the values, we have to turn RX off */
350 else 361 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
351 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
352 }
353 362
354 /* 363 /* Enable the radio */
355 * When the DEVICE_DIRTY_CONFIG flag is set, the device has recently 364 status = rt2x00lib_enable_radio(rt2x00dev);
356 * been started and the configuration must be forced upon the hardware. 365 if (unlikely(status))
357 * Otherwise registers will not be intialized correctly and could 366 return status;
358 * result in non-working hardware because essential registers aren't
359 * initialized.
360 */
361 force_reconfig =
362 __test_and_clear_bit(DEVICE_DIRTY_CONFIG, &rt2x00dev->flags);
363 367
364 rt2x00lib_config(rt2x00dev, conf, force_reconfig); 368 /*
369 * When we've just turned on the radio, we want to reprogram
370 * everything to ensure a consistent state
371 */
372 rt2x00lib_config(rt2x00dev, conf, !radio_on);
365 373
366 /* 374 /* Turn RX back on */
367 * Reenable RX only if the radio should be on.
368 */
369 if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
370 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); 375 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
371 else if (conf->radio_enabled) 376 } else {
372 return rt2x00lib_enable_radio(rt2x00dev); 377 /* Disable the radio */
378 rt2x00lib_disable_radio(rt2x00dev);
379 }
373 380
374 return 0; 381 return 0;
375} 382}
@@ -388,7 +395,7 @@ int rt2x00mac_config_interface(struct ieee80211_hw *hw,
388 * Mac80211 might be calling this function while we are trying 395 * Mac80211 might be calling this function while we are trying
389 * to remove the device or perhaps suspending it. 396 * to remove the device or perhaps suspending it.
390 */ 397 */
391 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) 398 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
392 return 0; 399 return 0;
393 400
394 spin_lock(&intf->lock); 401 spin_lock(&intf->lock);
@@ -467,6 +474,91 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
467} 474}
468EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); 475EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
469 476
477#ifdef CONFIG_RT2X00_LIB_CRYPTO
478int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
479 const u8 *local_address, const u8 *address,
480 struct ieee80211_key_conf *key)
481{
482 struct rt2x00_dev *rt2x00dev = hw->priv;
483 int (*set_key) (struct rt2x00_dev *rt2x00dev,
484 struct rt2x00lib_crypto *crypto,
485 struct ieee80211_key_conf *key);
486 struct rt2x00lib_crypto crypto;
487
488 if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
489 return -EOPNOTSUPP;
490 else if (key->keylen > 32)
491 return -ENOSPC;
492
493 memset(&crypto, 0, sizeof(crypto));
494
495 /*
496 * When in STA mode, bssidx is always 0 otherwise local_address[5]
497 * contains the bss number, see BSS_ID_MASK comments for details.
498 */
499 if (rt2x00dev->intf_sta_count)
500 crypto.bssidx = 0;
501 else
502 crypto.bssidx =
503 local_address[5] & (rt2x00dev->ops->max_ap_intf - 1);
504
505 crypto.cipher = rt2x00crypto_key_to_cipher(key);
506 if (crypto.cipher == CIPHER_NONE)
507 return -EOPNOTSUPP;
508
509 crypto.cmd = cmd;
510 crypto.address = address;
511
512 if (crypto.cipher == CIPHER_TKIP) {
513 if (key->keylen > NL80211_TKIP_DATA_OFFSET_ENCR_KEY)
514 memcpy(&crypto.key,
515 &key->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY],
516 sizeof(crypto.key));
517
518 if (key->keylen > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
519 memcpy(&crypto.tx_mic,
520 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
521 sizeof(crypto.tx_mic));
522
523 if (key->keylen > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY)
524 memcpy(&crypto.rx_mic,
525 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
526 sizeof(crypto.rx_mic));
527 } else
528 memcpy(&crypto.key, &key->key[0], key->keylen);
529
530 /*
531 * Each BSS has a maximum of 4 shared keys.
532 * Shared key index values:
533 * 0) BSS0 key0
534 * 1) BSS0 key1
535 * ...
536 * 4) BSS1 key0
537 * ...
538 * 8) BSS2 key0
539 * ...
540 * Both pairwise as shared key indeces are determined by
541 * driver. This is required because the hardware requires
542 * keys to be assigned in correct order (When key 1 is
543 * provided but key 0 is not, then the key is not found
544 * by the hardware during RX).
545 */
546 if (cmd == SET_KEY)
547 key->hw_key_idx = 0;
548
549 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
550 set_key = rt2x00dev->ops->lib->config_pairwise_key;
551 else
552 set_key = rt2x00dev->ops->lib->config_shared_key;
553
554 if (!set_key)
555 return -EOPNOTSUPP;
556
557 return set_key(rt2x00dev, &crypto, key);
558}
559EXPORT_SYMBOL_GPL(rt2x00mac_set_key);
560#endif /* CONFIG_RT2X00_LIB_CRYPTO */
561
470int rt2x00mac_get_stats(struct ieee80211_hw *hw, 562int rt2x00mac_get_stats(struct ieee80211_hw *hw,
471 struct ieee80211_low_level_stats *stats) 563 struct ieee80211_low_level_stats *stats)
472{ 564{
@@ -575,10 +667,11 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
575 queue->cw_max = 10; /* cw_min: 2^10 = 1024. */ 667 queue->cw_max = 10; /* cw_min: 2^10 = 1024. */
576 668
577 queue->aifs = params->aifs; 669 queue->aifs = params->aifs;
670 queue->txop = params->txop;
578 671
579 INFO(rt2x00dev, 672 INFO(rt2x00dev,
580 "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d.\n", 673 "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d.\n",
581 queue_idx, queue->cw_min, queue->cw_max, queue->aifs); 674 queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop);
582 675
583 return 0; 676 return 0;
584} 677}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 898cdd7f57d9..b7f4fe8fba6e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -33,10 +33,11 @@
33struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev, 33struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
34 struct queue_entry *entry) 34 struct queue_entry *entry)
35{ 35{
36 unsigned int frame_size;
37 unsigned int reserved_size;
38 struct sk_buff *skb; 36 struct sk_buff *skb;
39 struct skb_frame_desc *skbdesc; 37 struct skb_frame_desc *skbdesc;
38 unsigned int frame_size;
39 unsigned int head_size = 0;
40 unsigned int tail_size = 0;
40 41
41 /* 42 /*
42 * The frame size includes descriptor size, because the 43 * The frame size includes descriptor size, because the
@@ -49,16 +50,32 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
49 * this means we need at least 3 bytes for moving the frame 50 * this means we need at least 3 bytes for moving the frame
50 * into the correct offset. 51 * into the correct offset.
51 */ 52 */
52 reserved_size = 4; 53 head_size = 4;
54
55 /*
56 * For IV/EIV/ICV assembly we must make sure there is
57 * at least 8 bytes bytes available in headroom for IV/EIV
58 * and 4 bytes for ICV data as tailroon.
59 */
60#ifdef CONFIG_RT2X00_LIB_CRYPTO
61 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
62 head_size += 8;
63 tail_size += 4;
64 }
65#endif /* CONFIG_RT2X00_LIB_CRYPTO */
53 66
54 /* 67 /*
55 * Allocate skbuffer. 68 * Allocate skbuffer.
56 */ 69 */
57 skb = dev_alloc_skb(frame_size + reserved_size); 70 skb = dev_alloc_skb(frame_size + head_size + tail_size);
58 if (!skb) 71 if (!skb)
59 return NULL; 72 return NULL;
60 73
61 skb_reserve(skb, reserved_size); 74 /*
75 * Make sure we not have a frame with the requested bytes
76 * available in the head and tail.
77 */
78 skb_reserve(skb, head_size);
62 skb_put(skb, frame_size); 79 skb_put(skb, frame_size);
63 80
64 /* 81 /*
@@ -83,8 +100,21 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
83{ 100{
84 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 101 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
85 102
86 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, 103 /*
87 DMA_TO_DEVICE); 104 * If device has requested headroom, we should make sure that
105 * is also mapped to the DMA so it can be used for transfering
106 * additional descriptor information to the hardware.
107 */
108 skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
109
110 skbdesc->skb_dma =
111 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
112
113 /*
114 * Restore data pointer to original location again.
115 */
116 skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
117
88 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; 118 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
89} 119}
90EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); 120EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
@@ -100,7 +130,12 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
100 } 130 }
101 131
102 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { 132 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
103 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len, 133 /*
134 * Add headroom to the skb length, it has been removed
135 * by the driver, but it was actually mapped to DMA.
136 */
137 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
138 skb->len + rt2x00dev->hw->extra_tx_headroom,
104 DMA_TO_DEVICE); 139 DMA_TO_DEVICE);
105 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; 140 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
106 } 141 }
@@ -120,7 +155,6 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
120{ 155{
121 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 156 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
122 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 157 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
123 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 158 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
125 struct ieee80211_rate *rate = 159 struct ieee80211_rate *rate =
126 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 160 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
@@ -140,7 +174,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
140 txdesc->cw_max = entry->queue->cw_max; 174 txdesc->cw_max = entry->queue->cw_max;
141 txdesc->aifs = entry->queue->aifs; 175 txdesc->aifs = entry->queue->aifs;
142 176
143 /* Data length should be extended with 4 bytes for CRC */ 177 /* Data length + CRC + IV/EIV/ICV/MMIC (when using encryption) */
144 data_length = entry->skb->len + 4; 178 data_length = entry->skb->len + 4;
145 179
146 /* 180 /*
@@ -149,6 +183,35 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
149 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 183 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
150 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 184 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
151 185
186#ifdef CONFIG_RT2X00_LIB_CRYPTO
187 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) &&
188 !entry->skb->do_not_encrypt) {
189 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
190
191 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
192
193 txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
194
195 if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
196 __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
197
198 txdesc->key_idx = hw_key->hw_key_idx;
199 txdesc->iv_offset = ieee80211_get_hdrlen_from_skb(entry->skb);
200
201 /*
202 * Extend frame length to include all encryption overhead
203 * that will be added by the hardware.
204 */
205 data_length += rt2x00crypto_tx_overhead(tx_info);
206
207 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
208 __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
209
210 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
211 __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
212 }
213#endif /* CONFIG_RT2X00_LIB_CRYPTO */
214
152 /* 215 /*
153 * Check if this is a RTS/CTS frame 216 * Check if this is a RTS/CTS frame
154 */ 217 */
@@ -214,16 +277,22 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
214 * sequence counter given by mac80211. 277 * sequence counter given by mac80211.
215 */ 278 */
216 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 279 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
217 spin_lock_irqsave(&intf->seqlock, irqflags); 280 if (likely(tx_info->control.vif)) {
281 struct rt2x00_intf *intf;
282
283 intf = vif_to_intf(tx_info->control.vif);
284
285 spin_lock_irqsave(&intf->seqlock, irqflags);
218 286
219 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 287 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
220 intf->seqno += 0x10; 288 intf->seqno += 0x10;
221 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 289 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
222 hdr->seq_ctrl |= cpu_to_le16(intf->seqno); 290 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
223 291
224 spin_unlock_irqrestore(&intf->seqlock, irqflags); 292 spin_unlock_irqrestore(&intf->seqlock, irqflags);
225 293
226 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 294 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
295 }
227 } 296 }
228 297
229 /* 298 /*
@@ -305,11 +374,12 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
305 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 374 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
306 struct txentry_desc txdesc; 375 struct txentry_desc txdesc;
307 struct skb_frame_desc *skbdesc; 376 struct skb_frame_desc *skbdesc;
377 unsigned int iv_len = IEEE80211_SKB_CB(skb)->control.iv_len;
308 378
309 if (unlikely(rt2x00queue_full(queue))) 379 if (unlikely(rt2x00queue_full(queue)))
310 return -EINVAL; 380 return -EINVAL;
311 381
312 if (__test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) { 382 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
313 ERROR(queue->rt2x00dev, 383 ERROR(queue->rt2x00dev,
314 "Arrived at non-free entry in the non-full queue %d.\n" 384 "Arrived at non-free entry in the non-full queue %d.\n"
315 "Please file bug report to %s.\n", 385 "Please file bug report to %s.\n",
@@ -326,21 +396,39 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
326 rt2x00queue_create_tx_descriptor(entry, &txdesc); 396 rt2x00queue_create_tx_descriptor(entry, &txdesc);
327 397
328 /* 398 /*
329 * skb->cb array is now ours and we are free to use it. 399 * All information is retreived from the skb->cb array,
400 * now we should claim ownership of the driver part of that
401 * array.
330 */ 402 */
331 skbdesc = get_skb_frame_desc(entry->skb); 403 skbdesc = get_skb_frame_desc(entry->skb);
332 memset(skbdesc, 0, sizeof(*skbdesc)); 404 memset(skbdesc, 0, sizeof(*skbdesc));
333 skbdesc->entry = entry; 405 skbdesc->entry = entry;
334 406
407 /*
408 * When hardware encryption is supported, and this frame
409 * is to be encrypted, we should strip the IV/EIV data from
410 * the frame so we can provide it to the driver seperately.
411 */
412 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
413 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags))
414 rt2x00crypto_tx_remove_iv(skb, iv_len);
415
416 /*
417 * It could be possible that the queue was corrupted and this
418 * call failed. Just drop the frame, we cannot rollback and pass
419 * the frame to mac80211 because the skb->cb has now been tainted.
420 */
335 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) { 421 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
336 __clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 422 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
337 return -EIO; 423 dev_kfree_skb_any(entry->skb);
424 entry->skb = NULL;
425 return 0;
338 } 426 }
339 427
340 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) 428 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
341 rt2x00queue_map_txskb(queue->rt2x00dev, skb); 429 rt2x00queue_map_txskb(queue->rt2x00dev, skb);
342 430
343 __set_bit(ENTRY_DATA_PENDING, &entry->flags); 431 set_bit(ENTRY_DATA_PENDING, &entry->flags);
344 432
345 rt2x00queue_index_inc(queue, Q_INDEX); 433 rt2x00queue_index_inc(queue, Q_INDEX);
346 rt2x00queue_write_tx_descriptor(entry, &txdesc); 434 rt2x00queue_write_tx_descriptor(entry, &txdesc);
@@ -653,6 +741,7 @@ static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
653 741
654 queue->rt2x00dev = rt2x00dev; 742 queue->rt2x00dev = rt2x00dev;
655 queue->qid = qid; 743 queue->qid = qid;
744 queue->txop = 0;
656 queue->aifs = 2; 745 queue->aifs = 2;
657 queue->cw_min = 5; 746 queue->cw_min = 5;
658 queue->cw_max = 10; 747 queue->cw_max = 10;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index ff78e52ce43c..9dbf04f0f04c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -87,10 +87,13 @@ enum data_queue_qid {
87 * 87 *
88 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX 88 * @SKBDESC_DMA_MAPPED_RX: &skb_dma field has been mapped for RX
89 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX 89 * @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
90 * @FRAME_DESC_IV_STRIPPED: Frame contained a IV/EIV provided by
91 * mac80211 but was stripped for processing by the driver.
90 */ 92 */
91enum skb_frame_desc_flags { 93enum skb_frame_desc_flags {
92 SKBDESC_DMA_MAPPED_RX = (1 << 0), 94 SKBDESC_DMA_MAPPED_RX = 1 << 0,
93 SKBDESC_DMA_MAPPED_TX = (1 << 1), 95 SKBDESC_DMA_MAPPED_TX = 1 << 1,
96 FRAME_DESC_IV_STRIPPED = 1 << 2,
94}; 97};
95 98
96/** 99/**
@@ -104,6 +107,8 @@ enum skb_frame_desc_flags {
104 * @desc: Pointer to descriptor part of the frame. 107 * @desc: Pointer to descriptor part of the frame.
105 * Note that this pointer could point to something outside 108 * Note that this pointer could point to something outside
106 * of the scope of the skb->data pointer. 109 * of the scope of the skb->data pointer.
110 * @iv: IV data used during encryption/decryption.
111 * @eiv: EIV data used during encryption/decryption.
107 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer. 112 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
108 * @entry: The entry to which this sk buffer belongs. 113 * @entry: The entry to which this sk buffer belongs.
109 */ 114 */
@@ -113,6 +118,9 @@ struct skb_frame_desc {
113 unsigned int desc_len; 118 unsigned int desc_len;
114 void *desc; 119 void *desc;
115 120
121 __le32 iv;
122 __le32 eiv;
123
116 dma_addr_t skb_dma; 124 dma_addr_t skb_dma;
117 125
118 struct queue_entry *entry; 126 struct queue_entry *entry;
@@ -132,13 +140,14 @@ static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
132/** 140/**
133 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc 141 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
134 * 142 *
135 * @RXDONE_SIGNAL_PLCP: Does the signal field contain the plcp value, 143 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
136 * or does it contain the bitrate itself. 144 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
137 * @RXDONE_MY_BSS: Does this frame originate from device's BSS. 145 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
138 */ 146 */
139enum rxdone_entry_desc_flags { 147enum rxdone_entry_desc_flags {
140 RXDONE_SIGNAL_PLCP = 1 << 0, 148 RXDONE_SIGNAL_PLCP = 1 << 0,
141 RXDONE_MY_BSS = 1 << 1, 149 RXDONE_SIGNAL_BITRATE = 1 << 1,
150 RXDONE_MY_BSS = 1 << 2,
142}; 151};
143 152
144/** 153/**
@@ -152,7 +161,11 @@ enum rxdone_entry_desc_flags {
152 * @size: Data size of the received frame. 161 * @size: Data size of the received frame.
153 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags). 162 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
154 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags). 163 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
155 164 * @cipher: Cipher type used during decryption.
165 * @cipher_status: Decryption status.
166 * @iv: IV data used during decryption.
167 * @eiv: EIV data used during decryption.
168 * @icv: ICV data used during decryption.
156 */ 169 */
157struct rxdone_entry_desc { 170struct rxdone_entry_desc {
158 u64 timestamp; 171 u64 timestamp;
@@ -161,6 +174,12 @@ struct rxdone_entry_desc {
161 int size; 174 int size;
162 int flags; 175 int flags;
163 int dev_flags; 176 int dev_flags;
177 u8 cipher;
178 u8 cipher_status;
179
180 __le32 iv;
181 __le32 eiv;
182 __le32 icv;
164}; 183};
165 184
166/** 185/**
@@ -206,6 +225,10 @@ struct txdone_entry_desc {
206 * @ENTRY_TXD_BURST: This frame belongs to the same burst event. 225 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
207 * @ENTRY_TXD_ACK: An ACK is required for this frame. 226 * @ENTRY_TXD_ACK: An ACK is required for this frame.
208 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used. 227 * @ENTRY_TXD_RETRY_MODE: When set, the long retry count is used.
228 * @ENTRY_TXD_ENCRYPT: This frame should be encrypted.
229 * @ENTRY_TXD_ENCRYPT_PAIRWISE: Use pairwise key table (instead of shared).
230 * @ENTRY_TXD_ENCRYPT_IV: Generate IV/EIV in hardware.
231 * @ENTRY_TXD_ENCRYPT_MMIC: Generate MIC in hardware.
209 */ 232 */
210enum txentry_desc_flags { 233enum txentry_desc_flags {
211 ENTRY_TXD_RTS_FRAME, 234 ENTRY_TXD_RTS_FRAME,
@@ -218,6 +241,10 @@ enum txentry_desc_flags {
218 ENTRY_TXD_BURST, 241 ENTRY_TXD_BURST,
219 ENTRY_TXD_ACK, 242 ENTRY_TXD_ACK,
220 ENTRY_TXD_RETRY_MODE, 243 ENTRY_TXD_RETRY_MODE,
244 ENTRY_TXD_ENCRYPT,
245 ENTRY_TXD_ENCRYPT_PAIRWISE,
246 ENTRY_TXD_ENCRYPT_IV,
247 ENTRY_TXD_ENCRYPT_MMIC,
221}; 248};
222 249
223/** 250/**
@@ -236,6 +263,9 @@ enum txentry_desc_flags {
236 * @ifs: IFS value. 263 * @ifs: IFS value.
237 * @cw_min: cwmin value. 264 * @cw_min: cwmin value.
238 * @cw_max: cwmax value. 265 * @cw_max: cwmax value.
266 * @cipher: Cipher type used for encryption.
267 * @key_idx: Key index used for encryption.
268 * @iv_offset: Position where IV should be inserted by hardware.
239 */ 269 */
240struct txentry_desc { 270struct txentry_desc {
241 unsigned long flags; 271 unsigned long flags;
@@ -252,6 +282,10 @@ struct txentry_desc {
252 short ifs; 282 short ifs;
253 short cw_min; 283 short cw_min;
254 short cw_max; 284 short cw_max;
285
286 enum cipher cipher;
287 u16 key_idx;
288 u16 iv_offset;
255}; 289};
256 290
257/** 291/**
@@ -335,6 +369,7 @@ enum queue_index {
335 * @length: Number of frames in queue. 369 * @length: Number of frames in queue.
336 * @index: Index pointers to entry positions in the queue, 370 * @index: Index pointers to entry positions in the queue,
337 * use &enum queue_index to get a specific index field. 371 * use &enum queue_index to get a specific index field.
372 * @txop: maximum burst time.
338 * @aifs: The aifs value for outgoing frames (field ignored in RX queue). 373 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
339 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue). 374 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
340 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue). 375 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
@@ -354,6 +389,7 @@ struct data_queue {
354 unsigned short length; 389 unsigned short length;
355 unsigned short index[Q_INDEX_MAX]; 390 unsigned short index[Q_INDEX_MAX];
356 391
392 unsigned short txop;
357 unsigned short aifs; 393 unsigned short aifs;
358 unsigned short cw_min; 394 unsigned short cw_min;
359 unsigned short cw_max; 395 unsigned short cw_max;
@@ -484,25 +520,51 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
484} 520}
485 521
486/** 522/**
487 * rt2x00_desc_read - Read a word from the hardware descriptor. 523 * _rt2x00_desc_read - Read a word from the hardware descriptor.
524 * @desc: Base descriptor address
525 * @word: Word index from where the descriptor should be read.
526 * @value: Address where the descriptor value should be written into.
527 */
528static inline void _rt2x00_desc_read(__le32 *desc, const u8 word, __le32 *value)
529{
530 *value = desc[word];
531}
532
533/**
534 * rt2x00_desc_read - Read a word from the hardware descriptor, this
535 * function will take care of the byte ordering.
488 * @desc: Base descriptor address 536 * @desc: Base descriptor address
489 * @word: Word index from where the descriptor should be read. 537 * @word: Word index from where the descriptor should be read.
490 * @value: Address where the descriptor value should be written into. 538 * @value: Address where the descriptor value should be written into.
491 */ 539 */
492static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value) 540static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
493{ 541{
494 *value = le32_to_cpu(desc[word]); 542 __le32 tmp;
543 _rt2x00_desc_read(desc, word, &tmp);
544 *value = le32_to_cpu(tmp);
545}
546
547/**
548 * rt2x00_desc_write - write a word to the hardware descriptor, this
549 * function will take care of the byte ordering.
550 * @desc: Base descriptor address
551 * @word: Word index from where the descriptor should be written.
552 * @value: Value that should be written into the descriptor.
553 */
554static inline void _rt2x00_desc_write(__le32 *desc, const u8 word, __le32 value)
555{
556 desc[word] = value;
495} 557}
496 558
497/** 559/**
498 * rt2x00_desc_write - wrote a word to the hardware descriptor. 560 * rt2x00_desc_write - write a word to the hardware descriptor.
499 * @desc: Base descriptor address 561 * @desc: Base descriptor address
500 * @word: Word index from where the descriptor should be written. 562 * @word: Word index from where the descriptor should be written.
501 * @value: Value that should be written into the descriptor. 563 * @value: Value that should be written into the descriptor.
502 */ 564 */
503static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value) 565static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
504{ 566{
505 desc[word] = cpu_to_le32(value); 567 _rt2x00_desc_write(desc, word, cpu_to_le32(value));
506} 568}
507 569
508#endif /* RT2X00QUEUE_H */ 570#endif /* RT2X00QUEUE_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 2ea7866abd5d..c2fba7c9f05c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -27,6 +27,16 @@
27#define RT2X00REG_H 27#define RT2X00REG_H
28 28
29/* 29/*
30 * RX crypto status
31 */
32enum rx_crypto {
33 RX_CRYPTO_SUCCESS = 0,
34 RX_CRYPTO_FAIL_ICV = 1,
35 RX_CRYPTO_FAIL_MIC = 2,
36 RX_CRYPTO_FAIL_KEY = 3,
37};
38
39/*
30 * Antenna values 40 * Antenna values
31 */ 41 */
32enum antenna { 42enum antenna {
@@ -104,7 +114,14 @@ enum cipher {
104 */ 114 */
105 CIPHER_CKIP64 = 5, 115 CIPHER_CKIP64 = 5,
106 CIPHER_CKIP128 = 6, 116 CIPHER_CKIP128 = 6,
107 CIPHER_TKIP_NO_MIC = 7, 117 CIPHER_TKIP_NO_MIC = 7, /* Don't send to device */
118
119/*
120 * Max cipher type.
121 * Note that CIPHER_NONE isn't counted, and CKIP64 and CKIP128
122 * are excluded due to limitations in mac80211.
123 */
124 CIPHER_MAX = 4,
108}; 125};
109 126
110/* 127/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
index 04b29716d356..55eff58f1889 100644
--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c
+++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
@@ -41,20 +41,19 @@ static int rt2x00rfkill_toggle_radio(void *data, enum rfkill_state state)
41 /* 41 /*
42 * Only continue if there are enabled interfaces. 42 * Only continue if there are enabled interfaces.
43 */ 43 */
44 if (!test_bit(DEVICE_STARTED, &rt2x00dev->flags)) 44 if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
45 return 0; 45 return 0;
46 46
47 if (state == RFKILL_STATE_UNBLOCKED) { 47 if (state == RFKILL_STATE_UNBLOCKED) {
48 INFO(rt2x00dev, "Hardware button pressed, enabling radio.\n"); 48 INFO(rt2x00dev, "RFKILL event: enabling radio.\n");
49 __clear_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags); 49 clear_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags);
50 retval = rt2x00lib_enable_radio(rt2x00dev); 50 retval = rt2x00lib_enable_radio(rt2x00dev);
51 } else if (state == RFKILL_STATE_SOFT_BLOCKED) { 51 } else if (state == RFKILL_STATE_SOFT_BLOCKED) {
52 INFO(rt2x00dev, "Hardware button pressed, disabling radio.\n"); 52 INFO(rt2x00dev, "RFKILL event: disabling radio.\n");
53 __set_bit(DEVICE_DISABLED_RADIO_HW, &rt2x00dev->flags); 53 set_bit(DEVICE_STATE_DISABLED_RADIO_HW, &rt2x00dev->flags);
54 rt2x00lib_disable_radio(rt2x00dev); 54 rt2x00lib_disable_radio(rt2x00dev);
55 } else { 55 } else {
56 WARNING(rt2x00dev, "Received unexpected rfkill state %d.\n", 56 WARNING(rt2x00dev, "RFKILL event: unknown state %d.\n", state);
57 state);
58 } 57 }
59 58
60 return retval; 59 return retval;
@@ -64,7 +63,12 @@ static int rt2x00rfkill_get_state(void *data, enum rfkill_state *state)
64{ 63{
65 struct rt2x00_dev *rt2x00dev = data; 64 struct rt2x00_dev *rt2x00dev = data;
66 65
67 *state = rt2x00dev->rfkill->state; 66 /*
67 * rfkill_poll reports 1 when the key has been pressed and the
68 * radio should be blocked.
69 */
70 *state = rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ?
71 RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
68 72
69 return 0; 73 return 0;
70} 74}
@@ -73,19 +77,18 @@ static void rt2x00rfkill_poll(struct work_struct *work)
73{ 77{
74 struct rt2x00_dev *rt2x00dev = 78 struct rt2x00_dev *rt2x00dev =
75 container_of(work, struct rt2x00_dev, rfkill_work.work); 79 container_of(work, struct rt2x00_dev, rfkill_work.work);
76 int state; 80 enum rfkill_state state;
77 81
78 if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) 82 if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state) ||
83 !test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
79 return; 84 return;
80 85
81 /* 86 /*
82 * rfkill_poll reports 1 when the key has been pressed and the 87 * Poll latest state and report it to rfkill who should sort
83 * radio should be blocked. 88 * out if the state should be toggled or not.
84 */ 89 */
85 state = !rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ? 90 if (!rt2x00rfkill_get_state(rt2x00dev, &state))
86 RFKILL_STATE_UNBLOCKED : RFKILL_STATE_SOFT_BLOCKED; 91 rfkill_force_state(rt2x00dev->rfkill, state);
87
88 rfkill_force_state(rt2x00dev->rfkill, state);
89 92
90 queue_delayed_work(rt2x00dev->hw->workqueue, 93 queue_delayed_work(rt2x00dev->hw->workqueue,
91 &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL); 94 &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL);
@@ -93,8 +96,8 @@ static void rt2x00rfkill_poll(struct work_struct *work)
93 96
94void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev) 97void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
95{ 98{
96 if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) || 99 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
97 !test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state)) 100 test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
98 return; 101 return;
99 102
100 if (rfkill_register(rt2x00dev->rfkill)) { 103 if (rfkill_register(rt2x00dev->rfkill)) {
@@ -114,7 +117,7 @@ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
114 117
115void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev) 118void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
116{ 119{
117 if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) || 120 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
118 !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state)) 121 !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
119 return; 122 return;
120 123
@@ -127,21 +130,25 @@ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
127 130
128void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev) 131void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
129{ 132{
130 if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) 133 struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy);
134
135 if (test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
131 return; 136 return;
132 137
133 rt2x00dev->rfkill = 138 rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN);
134 rfkill_allocate(wiphy_dev(rt2x00dev->hw->wiphy), RFKILL_TYPE_WLAN);
135 if (!rt2x00dev->rfkill) { 139 if (!rt2x00dev->rfkill) {
136 ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n"); 140 ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n");
137 return; 141 return;
138 } 142 }
139 143
144 __set_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state);
145
140 rt2x00dev->rfkill->name = rt2x00dev->ops->name; 146 rt2x00dev->rfkill->name = rt2x00dev->ops->name;
141 rt2x00dev->rfkill->data = rt2x00dev; 147 rt2x00dev->rfkill->data = rt2x00dev;
142 rt2x00dev->rfkill->state = -1; 148 rt2x00dev->rfkill->state = -1;
143 rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio; 149 rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio;
144 rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state; 150 if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags))
151 rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state;
145 152
146 INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll); 153 INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll);
147 154
@@ -150,8 +157,7 @@ void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
150 157
151void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev) 158void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
152{ 159{
153 if (!test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags) || 160 if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->flags))
154 !test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
155 return; 161 return;
156 162
157 cancel_delayed_work_sync(&rt2x00dev->rfkill_work); 163 cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 2050227ea530..b73a7e0aeed4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -163,16 +163,11 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
163 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 163 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
164 struct txdone_entry_desc txdesc; 164 struct txdone_entry_desc txdesc;
165 165
166 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 166 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
167 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 167 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
168 return; 168 return;
169 169
170 /* 170 /*
171 * Remove the descriptor data from the buffer.
172 */
173 skb_pull(entry->skb, entry->queue->desc_size);
174
175 /*
176 * Obtain the status about this packet. 171 * Obtain the status about this packet.
177 * Note that when the status is 0 it does not mean the 172 * Note that when the status is 0 it does not mean the
178 * frame was send out correctly. It only means the frame 173 * frame was send out correctly. It only means the frame
@@ -224,6 +219,12 @@ int rt2x00usb_write_tx_data(struct queue_entry *entry)
224 entry->skb->data, length, 219 entry->skb->data, length,
225 rt2x00usb_interrupt_txdone, entry); 220 rt2x00usb_interrupt_txdone, entry);
226 221
222 /*
223 * Make sure the skb->data pointer points to the frame, not the
224 * descriptor.
225 */
226 skb_pull(entry->skb, entry->queue->desc_size);
227
227 return 0; 228 return 0;
228} 229}
229EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data); 230EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
@@ -232,7 +233,7 @@ static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
232{ 233{
233 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 234 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
234 235
235 if (__test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) 236 if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
236 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 237 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
237} 238}
238 239
@@ -283,7 +284,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
283 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 284 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
284 u8 rxd[32]; 285 u8 rxd[32];
285 286
286 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 287 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
287 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 288 !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
288 return; 289 return;
289 290
@@ -293,7 +294,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
293 * a problem. 294 * a problem.
294 */ 295 */
295 if (urb->actual_length < entry->queue->desc_size || urb->status) { 296 if (urb->actual_length < entry->queue->desc_size || urb->status) {
296 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 297 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
297 usb_submit_urb(urb, GFP_ATOMIC); 298 usb_submit_urb(urb, GFP_ATOMIC);
298 return; 299 return;
299 } 300 }
@@ -361,7 +362,7 @@ void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
361 entry->skb->data, entry->skb->len, 362 entry->skb->data, entry->skb->len,
362 rt2x00usb_interrupt_rxdone, entry); 363 rt2x00usb_interrupt_rxdone, entry);
363 364
364 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 365 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
365 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 366 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
366} 367}
367EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry); 368EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 087e90b328cd..a461620b489f 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -38,6 +38,13 @@
38#include "rt61pci.h" 38#include "rt61pci.h"
39 39
40/* 40/*
41 * Allow hardware encryption to be disabled.
42 */
43static int modparam_nohwcrypt = 0;
44module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
45MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
46
47/*
41 * Register access. 48 * Register access.
42 * BBP and RF register require indirect register access, 49 * BBP and RF register require indirect register access,
43 * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this. 50 * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this.
@@ -156,7 +163,7 @@ rf_write:
156 rt2x00_rf_write(rt2x00dev, word, value); 163 rt2x00_rf_write(rt2x00dev, word, value);
157} 164}
158 165
159#ifdef CONFIG_RT61PCI_LEDS 166#ifdef CONFIG_RT2X00_LIB_LEDS
160/* 167/*
161 * This function is only called from rt61pci_led_brightness() 168 * This function is only called from rt61pci_led_brightness()
162 * make gcc happy by placing this function inside the 169 * make gcc happy by placing this function inside the
@@ -188,7 +195,7 @@ static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev,
188 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1); 195 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1);
189 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg); 196 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg);
190} 197}
191#endif /* CONFIG_RT61PCI_LEDS */ 198#endif /* CONFIG_RT2X00_LIB_LEDS */
192 199
193static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 200static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
194{ 201{
@@ -264,7 +271,7 @@ static const struct rt2x00debug rt61pci_rt2x00debug = {
264}; 271};
265#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 272#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
266 273
267#ifdef CONFIG_RT61PCI_RFKILL 274#ifdef CONFIG_RT2X00_LIB_RFKILL
268static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) 275static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
269{ 276{
270 u32 reg; 277 u32 reg;
@@ -274,9 +281,9 @@ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
274} 281}
275#else 282#else
276#define rt61pci_rfkill_poll NULL 283#define rt61pci_rfkill_poll NULL
277#endif /* CONFIG_RT61PCI_RFKILL */ 284#endif /* CONFIG_RT2X00_LIB_RFKILL */
278 285
279#ifdef CONFIG_RT61PCI_LEDS 286#ifdef CONFIG_RT2X00_LIB_LEDS
280static void rt61pci_brightness_set(struct led_classdev *led_cdev, 287static void rt61pci_brightness_set(struct led_classdev *led_cdev,
281 enum led_brightness brightness) 288 enum led_brightness brightness)
282{ 289{
@@ -341,11 +348,209 @@ static void rt61pci_init_led(struct rt2x00_dev *rt2x00dev,
341 led->led_dev.blink_set = rt61pci_blink_set; 348 led->led_dev.blink_set = rt61pci_blink_set;
342 led->flags = LED_INITIALIZED; 349 led->flags = LED_INITIALIZED;
343} 350}
344#endif /* CONFIG_RT61PCI_LEDS */ 351#endif /* CONFIG_RT2X00_LIB_LEDS */
345 352
346/* 353/*
347 * Configuration handlers. 354 * Configuration handlers.
348 */ 355 */
356static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
357 struct rt2x00lib_crypto *crypto,
358 struct ieee80211_key_conf *key)
359{
360 struct hw_key_entry key_entry;
361 struct rt2x00_field32 field;
362 u32 mask;
363 u32 reg;
364
365 if (crypto->cmd == SET_KEY) {
366 /*
367 * rt2x00lib can't determine the correct free
368 * key_idx for shared keys. We have 1 register
369 * with key valid bits. The goal is simple, read
370 * the register, if that is full we have no slots
371 * left.
372 * Note that each BSS is allowed to have up to 4
373 * shared keys, so put a mask over the allowed
374 * entries.
375 */
376 mask = (0xf << crypto->bssidx);
377
378 rt2x00pci_register_read(rt2x00dev, SEC_CSR0, &reg);
379 reg &= mask;
380
381 if (reg && reg == mask)
382 return -ENOSPC;
383
384 key->hw_key_idx += reg ? ffz(reg) : 0;
385
386 /*
387 * Upload key to hardware
388 */
389 memcpy(key_entry.key, crypto->key,
390 sizeof(key_entry.key));
391 memcpy(key_entry.tx_mic, crypto->tx_mic,
392 sizeof(key_entry.tx_mic));
393 memcpy(key_entry.rx_mic, crypto->rx_mic,
394 sizeof(key_entry.rx_mic));
395
396 reg = SHARED_KEY_ENTRY(key->hw_key_idx);
397 rt2x00pci_register_multiwrite(rt2x00dev, reg,
398 &key_entry, sizeof(key_entry));
399
400 /*
401 * The cipher types are stored over 2 registers.
402 * bssidx 0 and 1 keys are stored in SEC_CSR1 and
403 * bssidx 1 and 2 keys are stored in SEC_CSR5.
404 * Using the correct defines correctly will cause overhead,
405 * so just calculate the correct offset.
406 */
407 if (key->hw_key_idx < 8) {
408 field.bit_offset = (3 * key->hw_key_idx);
409 field.bit_mask = 0x7 << field.bit_offset;
410
411 rt2x00pci_register_read(rt2x00dev, SEC_CSR1, &reg);
412 rt2x00_set_field32(&reg, field, crypto->cipher);
413 rt2x00pci_register_write(rt2x00dev, SEC_CSR1, reg);
414 } else {
415 field.bit_offset = (3 * (key->hw_key_idx - 8));
416 field.bit_mask = 0x7 << field.bit_offset;
417
418 rt2x00pci_register_read(rt2x00dev, SEC_CSR5, &reg);
419 rt2x00_set_field32(&reg, field, crypto->cipher);
420 rt2x00pci_register_write(rt2x00dev, SEC_CSR5, reg);
421 }
422
423 /*
424 * The driver does not support the IV/EIV generation
425 * in hardware. However it doesn't support the IV/EIV
426 * inside the ieee80211 frame either, but requires it
427 * to be provided seperately for the descriptor.
428 * rt2x00lib will cut the IV/EIV data out of all frames
429 * given to us by mac80211, but we must tell mac80211
430 * to generate the IV/EIV data.
431 */
432 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
433 }
434
435 /*
436 * SEC_CSR0 contains only single-bit fields to indicate
437 * a particular key is valid. Because using the FIELD32()
438 * defines directly will cause a lot of overhead we use
439 * a calculation to determine the correct bit directly.
440 */
441 mask = 1 << key->hw_key_idx;
442
443 rt2x00pci_register_read(rt2x00dev, SEC_CSR0, &reg);
444 if (crypto->cmd == SET_KEY)
445 reg |= mask;
446 else if (crypto->cmd == DISABLE_KEY)
447 reg &= ~mask;
448 rt2x00pci_register_write(rt2x00dev, SEC_CSR0, reg);
449
450 return 0;
451}
452
453static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
454 struct rt2x00lib_crypto *crypto,
455 struct ieee80211_key_conf *key)
456{
457 struct hw_pairwise_ta_entry addr_entry;
458 struct hw_key_entry key_entry;
459 u32 mask;
460 u32 reg;
461
462 if (crypto->cmd == SET_KEY) {
463 /*
464 * rt2x00lib can't determine the correct free
465 * key_idx for pairwise keys. We have 2 registers
466 * with key valid bits. The goal is simple, read
467 * the first register, if that is full move to
468 * the next register.
469 * When both registers are full, we drop the key,
470 * otherwise we use the first invalid entry.
471 */
472 rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg);
473 if (reg && reg == ~0) {
474 key->hw_key_idx = 32;
475 rt2x00pci_register_read(rt2x00dev, SEC_CSR3, &reg);
476 if (reg && reg == ~0)
477 return -ENOSPC;
478 }
479
480 key->hw_key_idx += reg ? ffz(reg) : 0;
481
482 /*
483 * Upload key to hardware
484 */
485 memcpy(key_entry.key, crypto->key,
486 sizeof(key_entry.key));
487 memcpy(key_entry.tx_mic, crypto->tx_mic,
488 sizeof(key_entry.tx_mic));
489 memcpy(key_entry.rx_mic, crypto->rx_mic,
490 sizeof(key_entry.rx_mic));
491
492 memset(&addr_entry, 0, sizeof(addr_entry));
493 memcpy(&addr_entry, crypto->address, ETH_ALEN);
494 addr_entry.cipher = crypto->cipher;
495
496 reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
497 rt2x00pci_register_multiwrite(rt2x00dev, reg,
498 &key_entry, sizeof(key_entry));
499
500 reg = PAIRWISE_TA_ENTRY(key->hw_key_idx);
501 rt2x00pci_register_multiwrite(rt2x00dev, reg,
502 &addr_entry, sizeof(addr_entry));
503
504 /*
505 * Enable pairwise lookup table for given BSS idx,
506 * without this received frames will not be decrypted
507 * by the hardware.
508 */
509 rt2x00pci_register_read(rt2x00dev, SEC_CSR4, &reg);
510 reg |= (1 << crypto->bssidx);
511 rt2x00pci_register_write(rt2x00dev, SEC_CSR4, reg);
512
513 /*
514 * The driver does not support the IV/EIV generation
515 * in hardware. However it doesn't support the IV/EIV
516 * inside the ieee80211 frame either, but requires it
517 * to be provided seperately for the descriptor.
518 * rt2x00lib will cut the IV/EIV data out of all frames
519 * given to us by mac80211, but we must tell mac80211
520 * to generate the IV/EIV data.
521 */
522 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
523 }
524
525 /*
526 * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate
527 * a particular key is valid. Because using the FIELD32()
528 * defines directly will cause a lot of overhead we use
529 * a calculation to determine the correct bit directly.
530 */
531 if (key->hw_key_idx < 32) {
532 mask = 1 << key->hw_key_idx;
533
534 rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg);
535 if (crypto->cmd == SET_KEY)
536 reg |= mask;
537 else if (crypto->cmd == DISABLE_KEY)
538 reg &= ~mask;
539 rt2x00pci_register_write(rt2x00dev, SEC_CSR2, reg);
540 } else {
541 mask = 1 << (key->hw_key_idx - 32);
542
543 rt2x00pci_register_read(rt2x00dev, SEC_CSR3, &reg);
544 if (crypto->cmd == SET_KEY)
545 reg |= mask;
546 else if (crypto->cmd == DISABLE_KEY)
547 reg &= ~mask;
548 rt2x00pci_register_write(rt2x00dev, SEC_CSR3, reg);
549 }
550
551 return 0;
552}
553
349static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev, 554static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
350 const unsigned int filter_flags) 555 const unsigned int filter_flags)
351{ 556{
@@ -440,6 +645,30 @@ static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
440 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); 645 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
441} 646}
442 647
648
649static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
650 struct rt2x00lib_conf *libconf)
651{
652 u16 eeprom;
653 short lna_gain = 0;
654
655 if (libconf->band == IEEE80211_BAND_2GHZ) {
656 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
657 lna_gain += 14;
658
659 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
660 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
661 } else {
662 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
663 lna_gain += 14;
664
665 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
666 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
667 }
668
669 rt2x00dev->lna_gain = lna_gain;
670}
671
443static void rt61pci_config_phymode(struct rt2x00_dev *rt2x00dev, 672static void rt61pci_config_phymode(struct rt2x00_dev *rt2x00dev,
444 const int basic_rate_mask) 673 const int basic_rate_mask)
445{ 674{
@@ -758,6 +987,9 @@ static void rt61pci_config(struct rt2x00_dev *rt2x00dev,
758 struct rt2x00lib_conf *libconf, 987 struct rt2x00lib_conf *libconf,
759 const unsigned int flags) 988 const unsigned int flags)
760{ 989{
990 /* Always recalculate LNA gain before changing configuration */
991 rt61pci_config_lna_gain(rt2x00dev, libconf);
992
761 if (flags & CONFIG_UPDATE_PHYMODE) 993 if (flags & CONFIG_UPDATE_PHYMODE)
762 rt61pci_config_phymode(rt2x00dev, libconf->basic_rates); 994 rt61pci_config_phymode(rt2x00dev, libconf->basic_rates);
763 if (flags & CONFIG_UPDATE_CHANNEL) 995 if (flags & CONFIG_UPDATE_CHANNEL)
@@ -1246,16 +1478,6 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev)
1246 1478
1247 rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); 1479 rt2x00pci_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff);
1248 1480
1249 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
1250 rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC0_TX_OP, 0);
1251 rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC1_TX_OP, 0);
1252 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
1253
1254 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
1255 rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC2_TX_OP, 192);
1256 rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC3_TX_OP, 48);
1257 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
1258
1259 /* 1481 /*
1260 * Clear all beacons 1482 * Clear all beacons
1261 * For the Beacon base registers we only need to clear 1483 * For the Beacon base registers we only need to clear
@@ -1533,8 +1755,8 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1533 * TX descriptor initialization 1755 * TX descriptor initialization
1534 */ 1756 */
1535static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1757static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1536 struct sk_buff *skb, 1758 struct sk_buff *skb,
1537 struct txentry_desc *txdesc) 1759 struct txentry_desc *txdesc)
1538{ 1760{
1539 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1761 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1540 __le32 *txd = skbdesc->desc; 1762 __le32 *txd = skbdesc->desc;
@@ -1548,7 +1770,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1548 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1770 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1549 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1771 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1550 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1772 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1551 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1773 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1552 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1774 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1553 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 1775 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
1554 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1); 1776 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
@@ -1561,6 +1783,11 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1561 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1783 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
1562 rt2x00_desc_write(txd, 2, word); 1784 rt2x00_desc_write(txd, 2, word);
1563 1785
1786 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
1787 _rt2x00_desc_write(txd, 3, skbdesc->iv);
1788 _rt2x00_desc_write(txd, 4, skbdesc->eiv);
1789 }
1790
1564 rt2x00_desc_read(txd, 5, &word); 1791 rt2x00_desc_read(txd, 5, &word);
1565 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid); 1792 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid);
1566 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, 1793 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
@@ -1595,11 +1822,15 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1595 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1822 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1596 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1823 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1597 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1824 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1598 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1825 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
1826 test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
1827 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
1828 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
1829 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
1599 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1830 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1600 rt2x00_set_field32(&word, TXD_W0_BURST, 1831 rt2x00_set_field32(&word, TXD_W0_BURST,
1601 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 1832 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1602 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1833 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
1603 rt2x00_desc_write(txd, 0, word); 1834 rt2x00_desc_write(txd, 0, word);
1604} 1835}
1605 1836
@@ -1676,40 +1907,27 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1676 */ 1907 */
1677static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) 1908static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1678{ 1909{
1679 u16 eeprom; 1910 u8 offset = rt2x00dev->lna_gain;
1680 u8 offset;
1681 u8 lna; 1911 u8 lna;
1682 1912
1683 lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); 1913 lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA);
1684 switch (lna) { 1914 switch (lna) {
1685 case 3: 1915 case 3:
1686 offset = 90; 1916 offset += 90;
1687 break; 1917 break;
1688 case 2: 1918 case 2:
1689 offset = 74; 1919 offset += 74;
1690 break; 1920 break;
1691 case 1: 1921 case 1:
1692 offset = 64; 1922 offset += 64;
1693 break; 1923 break;
1694 default: 1924 default:
1695 return 0; 1925 return 0;
1696 } 1926 }
1697 1927
1698 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) { 1928 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) {
1699 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
1700 offset += 14;
1701
1702 if (lna == 3 || lna == 2) 1929 if (lna == 3 || lna == 2)
1703 offset += 10; 1930 offset += 10;
1704
1705 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
1706 offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
1707 } else {
1708 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
1709 offset += 14;
1710
1711 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
1712 offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
1713 } 1931 }
1714 1932
1715 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; 1933 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
@@ -1718,6 +1936,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1718static void rt61pci_fill_rxdone(struct queue_entry *entry, 1936static void rt61pci_fill_rxdone(struct queue_entry *entry,
1719 struct rxdone_entry_desc *rxdesc) 1937 struct rxdone_entry_desc *rxdesc)
1720{ 1938{
1939 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1721 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1940 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1722 u32 word0; 1941 u32 word0;
1723 u32 word1; 1942 u32 word1;
@@ -1728,6 +1947,38 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
1728 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1947 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1729 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1948 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1730 1949
1950 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
1951 rxdesc->cipher =
1952 rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
1953 rxdesc->cipher_status =
1954 rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
1955 }
1956
1957 if (rxdesc->cipher != CIPHER_NONE) {
1958 _rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv);
1959 _rt2x00_desc_read(entry_priv->desc, 3, &rxdesc->eiv);
1960 _rt2x00_desc_read(entry_priv->desc, 4, &rxdesc->icv);
1961
1962 /*
1963 * Hardware has stripped IV/EIV data from 802.11 frame during
1964 * decryption. It has provided the data seperately but rt2x00lib
1965 * should decide if it should be reinserted.
1966 */
1967 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
1968
1969 /*
1970 * FIXME: Legacy driver indicates that the frame does
1971 * contain the Michael Mic. Unfortunately, in rt2x00
1972 * the MIC seems to be missing completely...
1973 */
1974 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
1975
1976 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
1977 rxdesc->flags |= RX_FLAG_DECRYPTED;
1978 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
1979 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
1980 }
1981
1731 /* 1982 /*
1732 * Obtain the status about this packet. 1983 * Obtain the status about this packet.
1733 * When frame was received with an OFDM bitrate, 1984 * When frame was received with an OFDM bitrate,
@@ -1735,11 +1986,13 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
1735 * a CCK bitrate the signal is the rate in 100kbit/s. 1986 * a CCK bitrate the signal is the rate in 100kbit/s.
1736 */ 1987 */
1737 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); 1988 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
1738 rxdesc->rssi = rt61pci_agc_to_rssi(entry->queue->rt2x00dev, word1); 1989 rxdesc->rssi = rt61pci_agc_to_rssi(rt2x00dev, word1);
1739 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1990 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1740 1991
1741 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1992 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1742 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1993 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1994 else
1995 rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
1743 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1996 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1744 rxdesc->dev_flags |= RXDONE_MY_BSS; 1997 rxdesc->dev_flags |= RXDONE_MY_BSS;
1745} 1998}
@@ -1860,7 +2113,7 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
1860 if (!reg && !reg_mcu) 2113 if (!reg && !reg_mcu)
1861 return IRQ_NONE; 2114 return IRQ_NONE;
1862 2115
1863 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 2116 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1864 return IRQ_HANDLED; 2117 return IRQ_HANDLED;
1865 2118
1866 /* 2119 /*
@@ -2060,10 +2313,10 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2060 /* 2313 /*
2061 * Detect if this device has an hardware controlled radio. 2314 * Detect if this device has an hardware controlled radio.
2062 */ 2315 */
2063#ifdef CONFIG_RT61PCI_RFKILL 2316#ifdef CONFIG_RT2X00_LIB_RFKILL
2064 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 2317 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
2065 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 2318 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
2066#endif /* CONFIG_RT61PCI_RFKILL */ 2319#endif /* CONFIG_RT2X00_LIB_RFKILL */
2067 2320
2068 /* 2321 /*
2069 * Read frequency offset and RF programming sequence. 2322 * Read frequency offset and RF programming sequence.
@@ -2121,7 +2374,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2121 * If the eeprom value is invalid, 2374 * If the eeprom value is invalid,
2122 * switch to default led mode. 2375 * switch to default led mode.
2123 */ 2376 */
2124#ifdef CONFIG_RT61PCI_LEDS 2377#ifdef CONFIG_RT2X00_LIB_LEDS
2125 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); 2378 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
2126 value = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE); 2379 value = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE);
2127 2380
@@ -2155,7 +2408,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2155 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A, 2408 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A,
2156 rt2x00_get_field16(eeprom, 2409 rt2x00_get_field16(eeprom,
2157 EEPROM_LED_POLARITY_RDY_A)); 2410 EEPROM_LED_POLARITY_RDY_A));
2158#endif /* CONFIG_RT61PCI_LEDS */ 2411#endif /* CONFIG_RT2X00_LIB_LEDS */
2159 2412
2160 return 0; 2413 return 0;
2161} 2414}
@@ -2274,10 +2527,11 @@ static const struct rf_channel rf_vals_seq[] = {
2274 { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000c0a23 }, 2527 { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000c0a23 },
2275}; 2528};
2276 2529
2277static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 2530static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2278{ 2531{
2279 struct hw_mode_spec *spec = &rt2x00dev->spec; 2532 struct hw_mode_spec *spec = &rt2x00dev->spec;
2280 u8 *txpower; 2533 struct channel_info *info;
2534 char *tx_power;
2281 unsigned int i; 2535 unsigned int i;
2282 2536
2283 /* 2537 /*
@@ -2294,20 +2548,10 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2294 EEPROM_MAC_ADDR_0)); 2548 EEPROM_MAC_ADDR_0));
2295 2549
2296 /* 2550 /*
2297 * Convert tx_power array in eeprom.
2298 */
2299 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2300 for (i = 0; i < 14; i++)
2301 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
2302
2303 /*
2304 * Initialize hw_mode information. 2551 * Initialize hw_mode information.
2305 */ 2552 */
2306 spec->supported_bands = SUPPORT_BAND_2GHZ; 2553 spec->supported_bands = SUPPORT_BAND_2GHZ;
2307 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2554 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2308 spec->tx_power_a = NULL;
2309 spec->tx_power_bg = txpower;
2310 spec->tx_power_default = DEFAULT_TXPOWER;
2311 2555
2312 if (!test_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags)) { 2556 if (!test_bit(CONFIG_RF_SEQUENCE, &rt2x00dev->flags)) {
2313 spec->num_channels = 14; 2557 spec->num_channels = 14;
@@ -2321,13 +2565,28 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2321 rt2x00_rf(&rt2x00dev->chip, RF5325)) { 2565 rt2x00_rf(&rt2x00dev->chip, RF5325)) {
2322 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2566 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2323 spec->num_channels = ARRAY_SIZE(rf_vals_seq); 2567 spec->num_channels = ARRAY_SIZE(rf_vals_seq);
2568 }
2324 2569
2325 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2570 /*
2326 for (i = 0; i < 14; i++) 2571 * Create channel information array
2327 txpower[i] = TXPOWER_FROM_DEV(txpower[i]); 2572 */
2573 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
2574 if (!info)
2575 return -ENOMEM;
2328 2576
2329 spec->tx_power_a = txpower; 2577 spec->channels_info = info;
2578
2579 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2580 for (i = 0; i < 14; i++)
2581 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2582
2583 if (spec->num_channels > 14) {
2584 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2585 for (i = 14; i < spec->num_channels; i++)
2586 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2330 } 2587 }
2588
2589 return 0;
2331} 2590}
2332 2591
2333static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) 2592static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -2348,13 +2607,17 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2348 /* 2607 /*
2349 * Initialize hw specifications. 2608 * Initialize hw specifications.
2350 */ 2609 */
2351 rt61pci_probe_hw_mode(rt2x00dev); 2610 retval = rt61pci_probe_hw_mode(rt2x00dev);
2611 if (retval)
2612 return retval;
2352 2613
2353 /* 2614 /*
2354 * This device requires firmware and DMA mapped skbs. 2615 * This device requires firmware and DMA mapped skbs.
2355 */ 2616 */
2356 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 2617 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
2357 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 2618 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
2619 if (!modparam_nohwcrypt)
2620 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
2358 2621
2359 /* 2622 /*
2360 * Set the rssi offset. 2623 * Set the rssi offset.
@@ -2381,6 +2644,63 @@ static int rt61pci_set_retry_limit(struct ieee80211_hw *hw,
2381 return 0; 2644 return 0;
2382} 2645}
2383 2646
2647static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2648 const struct ieee80211_tx_queue_params *params)
2649{
2650 struct rt2x00_dev *rt2x00dev = hw->priv;
2651 struct data_queue *queue;
2652 struct rt2x00_field32 field;
2653 int retval;
2654 u32 reg;
2655
2656 /*
2657 * First pass the configuration through rt2x00lib, that will
2658 * update the queue settings and validate the input. After that
2659 * we are free to update the registers based on the value
2660 * in the queue parameter.
2661 */
2662 retval = rt2x00mac_conf_tx(hw, queue_idx, params);
2663 if (retval)
2664 return retval;
2665
2666 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2667
2668 /* Update WMM TXOP register */
2669 if (queue_idx < 2) {
2670 field.bit_offset = queue_idx * 16;
2671 field.bit_mask = 0xffff << field.bit_offset;
2672
2673 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
2674 rt2x00_set_field32(&reg, field, queue->txop);
2675 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
2676 } else if (queue_idx < 4) {
2677 field.bit_offset = (queue_idx - 2) * 16;
2678 field.bit_mask = 0xffff << field.bit_offset;
2679
2680 rt2x00pci_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
2681 rt2x00_set_field32(&reg, field, queue->txop);
2682 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
2683 }
2684
2685 /* Update WMM registers */
2686 field.bit_offset = queue_idx * 4;
2687 field.bit_mask = 0xf << field.bit_offset;
2688
2689 rt2x00pci_register_read(rt2x00dev, AIFSN_CSR, &reg);
2690 rt2x00_set_field32(&reg, field, queue->aifs);
2691 rt2x00pci_register_write(rt2x00dev, AIFSN_CSR, reg);
2692
2693 rt2x00pci_register_read(rt2x00dev, CWMIN_CSR, &reg);
2694 rt2x00_set_field32(&reg, field, queue->cw_min);
2695 rt2x00pci_register_write(rt2x00dev, CWMIN_CSR, reg);
2696
2697 rt2x00pci_register_read(rt2x00dev, CWMAX_CSR, &reg);
2698 rt2x00_set_field32(&reg, field, queue->cw_max);
2699 rt2x00pci_register_write(rt2x00dev, CWMAX_CSR, reg);
2700
2701 return 0;
2702}
2703
2384static u64 rt61pci_get_tsf(struct ieee80211_hw *hw) 2704static u64 rt61pci_get_tsf(struct ieee80211_hw *hw)
2385{ 2705{
2386 struct rt2x00_dev *rt2x00dev = hw->priv; 2706 struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -2404,10 +2724,11 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2404 .config = rt2x00mac_config, 2724 .config = rt2x00mac_config,
2405 .config_interface = rt2x00mac_config_interface, 2725 .config_interface = rt2x00mac_config_interface,
2406 .configure_filter = rt2x00mac_configure_filter, 2726 .configure_filter = rt2x00mac_configure_filter,
2727 .set_key = rt2x00mac_set_key,
2407 .get_stats = rt2x00mac_get_stats, 2728 .get_stats = rt2x00mac_get_stats,
2408 .set_retry_limit = rt61pci_set_retry_limit, 2729 .set_retry_limit = rt61pci_set_retry_limit,
2409 .bss_info_changed = rt2x00mac_bss_info_changed, 2730 .bss_info_changed = rt2x00mac_bss_info_changed,
2410 .conf_tx = rt2x00mac_conf_tx, 2731 .conf_tx = rt61pci_conf_tx,
2411 .get_tx_stats = rt2x00mac_get_tx_stats, 2732 .get_tx_stats = rt2x00mac_get_tx_stats,
2412 .get_tsf = rt61pci_get_tsf, 2733 .get_tsf = rt61pci_get_tsf,
2413}; 2734};
@@ -2432,6 +2753,8 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2432 .write_beacon = rt61pci_write_beacon, 2753 .write_beacon = rt61pci_write_beacon,
2433 .kick_tx_queue = rt61pci_kick_tx_queue, 2754 .kick_tx_queue = rt61pci_kick_tx_queue,
2434 .fill_rxdone = rt61pci_fill_rxdone, 2755 .fill_rxdone = rt61pci_fill_rxdone,
2756 .config_shared_key = rt61pci_config_shared_key,
2757 .config_pairwise_key = rt61pci_config_pairwise_key,
2435 .config_filter = rt61pci_config_filter, 2758 .config_filter = rt61pci_config_filter,
2436 .config_intf = rt61pci_config_intf, 2759 .config_intf = rt61pci_config_intf,
2437 .config_erp = rt61pci_config_erp, 2760 .config_erp = rt61pci_config_erp,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 1004d5b899e6..8ec1451308cc 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -134,6 +134,16 @@
134#define PAIRWISE_KEY_TABLE_BASE 0x1200 134#define PAIRWISE_KEY_TABLE_BASE 0x1200
135#define PAIRWISE_TA_TABLE_BASE 0x1a00 135#define PAIRWISE_TA_TABLE_BASE 0x1a00
136 136
137#define SHARED_KEY_ENTRY(__idx) \
138 ( SHARED_KEY_TABLE_BASE + \
139 ((__idx) * sizeof(struct hw_key_entry)) )
140#define PAIRWISE_KEY_ENTRY(__idx) \
141 ( PAIRWISE_KEY_TABLE_BASE + \
142 ((__idx) * sizeof(struct hw_key_entry)) )
143#define PAIRWISE_TA_ENTRY(__idx) \
144 ( PAIRWISE_TA_TABLE_BASE + \
145 ((__idx) * sizeof(struct hw_pairwise_ta_entry)) )
146
137struct hw_key_entry { 147struct hw_key_entry {
138 u8 key[16]; 148 u8 key[16];
139 u8 tx_mic[8]; 149 u8 tx_mic[8];
@@ -142,7 +152,8 @@ struct hw_key_entry {
142 152
143struct hw_pairwise_ta_entry { 153struct hw_pairwise_ta_entry {
144 u8 address[6]; 154 u8 address[6];
145 u8 reserved[2]; 155 u8 cipher;
156 u8 reserved;
146} __attribute__ ((packed)); 157} __attribute__ ((packed));
147 158
148/* 159/*
@@ -662,6 +673,10 @@ struct hw_pairwise_ta_entry {
662 * SEC_CSR4: Pairwise key table lookup control. 673 * SEC_CSR4: Pairwise key table lookup control.
663 */ 674 */
664#define SEC_CSR4 0x30b0 675#define SEC_CSR4 0x30b0
676#define SEC_CSR4_ENABLE_BSS0 FIELD32(0x00000001)
677#define SEC_CSR4_ENABLE_BSS1 FIELD32(0x00000002)
678#define SEC_CSR4_ENABLE_BSS2 FIELD32(0x00000004)
679#define SEC_CSR4_ENABLE_BSS3 FIELD32(0x00000008)
665 680
666/* 681/*
667 * SEC_CSR5: shared key table security mode register. 682 * SEC_CSR5: shared key table security mode register.
@@ -1428,8 +1443,10 @@ struct hw_pairwise_ta_entry {
1428 1443
1429/* 1444/*
1430 * Word4 1445 * Word4
1446 * ICV: Received ICV of originally encrypted.
1447 * NOTE: This is a guess, the official definition is "reserved"
1431 */ 1448 */
1432#define RXD_W4_RESERVED FIELD32(0xffffffff) 1449#define RXD_W4_ICV FIELD32(0xffffffff)
1433 1450
1434/* 1451/*
1435 * the above 20-byte is called RXINFO and will be DMAed to MAC RX block 1452 * the above 20-byte is called RXINFO and will be DMAed to MAC RX block
@@ -1465,17 +1482,10 @@ struct hw_pairwise_ta_entry {
1465#define MAX_TXPOWER 31 1482#define MAX_TXPOWER 31
1466#define DEFAULT_TXPOWER 24 1483#define DEFAULT_TXPOWER 24
1467 1484
1468#define TXPOWER_FROM_DEV(__txpower) \ 1485#define TXPOWER_FROM_DEV(__txpower) \
1469({ \ 1486 (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1470 ((__txpower) > MAX_TXPOWER) ? \ 1487
1471 DEFAULT_TXPOWER : (__txpower); \ 1488#define TXPOWER_TO_DEV(__txpower) \
1472}) 1489 clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
1473
1474#define TXPOWER_TO_DEV(__txpower) \
1475({ \
1476 ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \
1477 (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \
1478 (__txpower)); \
1479})
1480 1490
1481#endif /* RT61PCI_H */ 1491#endif /* RT61PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 9761eaaa08be..934f8e03c5aa 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -37,6 +37,13 @@
37#include "rt73usb.h" 37#include "rt73usb.h"
38 38
39/* 39/*
40 * Allow hardware encryption to be disabled.
41 */
42static int modparam_nohwcrypt = 0;
43module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
44MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
45
46/*
40 * Register access. 47 * Register access.
41 * All access to the CSR registers will go through the methods 48 * All access to the CSR registers will go through the methods
42 * rt73usb_register_read and rt73usb_register_write. 49 * rt73usb_register_read and rt73usb_register_write.
@@ -285,7 +292,7 @@ static const struct rt2x00debug rt73usb_rt2x00debug = {
285}; 292};
286#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 293#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
287 294
288#ifdef CONFIG_RT73USB_LEDS 295#ifdef CONFIG_RT2X00_LIB_LEDS
289static void rt73usb_brightness_set(struct led_classdev *led_cdev, 296static void rt73usb_brightness_set(struct led_classdev *led_cdev,
290 enum led_brightness brightness) 297 enum led_brightness brightness)
291{ 298{
@@ -352,11 +359,224 @@ static void rt73usb_init_led(struct rt2x00_dev *rt2x00dev,
352 led->led_dev.blink_set = rt73usb_blink_set; 359 led->led_dev.blink_set = rt73usb_blink_set;
353 led->flags = LED_INITIALIZED; 360 led->flags = LED_INITIALIZED;
354} 361}
355#endif /* CONFIG_RT73USB_LEDS */ 362#endif /* CONFIG_RT2X00_LIB_LEDS */
356 363
357/* 364/*
358 * Configuration handlers. 365 * Configuration handlers.
359 */ 366 */
367static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
368 struct rt2x00lib_crypto *crypto,
369 struct ieee80211_key_conf *key)
370{
371 struct hw_key_entry key_entry;
372 struct rt2x00_field32 field;
373 int timeout;
374 u32 mask;
375 u32 reg;
376
377 if (crypto->cmd == SET_KEY) {
378 /*
379 * rt2x00lib can't determine the correct free
380 * key_idx for shared keys. We have 1 register
381 * with key valid bits. The goal is simple, read
382 * the register, if that is full we have no slots
383 * left.
384 * Note that each BSS is allowed to have up to 4
385 * shared keys, so put a mask over the allowed
386 * entries.
387 */
388 mask = (0xf << crypto->bssidx);
389
390 rt73usb_register_read(rt2x00dev, SEC_CSR0, &reg);
391 reg &= mask;
392
393 if (reg && reg == mask)
394 return -ENOSPC;
395
396 key->hw_key_idx += reg ? ffz(reg) : 0;
397
398 /*
399 * Upload key to hardware
400 */
401 memcpy(key_entry.key, crypto->key,
402 sizeof(key_entry.key));
403 memcpy(key_entry.tx_mic, crypto->tx_mic,
404 sizeof(key_entry.tx_mic));
405 memcpy(key_entry.rx_mic, crypto->rx_mic,
406 sizeof(key_entry.rx_mic));
407
408 reg = SHARED_KEY_ENTRY(key->hw_key_idx);
409 timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
410 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
411 USB_VENDOR_REQUEST_OUT, reg,
412 &key_entry,
413 sizeof(key_entry),
414 timeout);
415
416 /*
417 * The cipher types are stored over 2 registers.
418 * bssidx 0 and 1 keys are stored in SEC_CSR1 and
419 * bssidx 1 and 2 keys are stored in SEC_CSR5.
420 * Using the correct defines correctly will cause overhead,
421 * so just calculate the correct offset.
422 */
423 if (key->hw_key_idx < 8) {
424 field.bit_offset = (3 * key->hw_key_idx);
425 field.bit_mask = 0x7 << field.bit_offset;
426
427 rt73usb_register_read(rt2x00dev, SEC_CSR1, &reg);
428 rt2x00_set_field32(&reg, field, crypto->cipher);
429 rt73usb_register_write(rt2x00dev, SEC_CSR1, reg);
430 } else {
431 field.bit_offset = (3 * (key->hw_key_idx - 8));
432 field.bit_mask = 0x7 << field.bit_offset;
433
434 rt73usb_register_read(rt2x00dev, SEC_CSR5, &reg);
435 rt2x00_set_field32(&reg, field, crypto->cipher);
436 rt73usb_register_write(rt2x00dev, SEC_CSR5, reg);
437 }
438
439 /*
440 * The driver does not support the IV/EIV generation
441 * in hardware. However it doesn't support the IV/EIV
442 * inside the ieee80211 frame either, but requires it
443 * to be provided seperately for the descriptor.
444 * rt2x00lib will cut the IV/EIV data out of all frames
445 * given to us by mac80211, but we must tell mac80211
446 * to generate the IV/EIV data.
447 */
448 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
449 }
450
451 /*
452 * SEC_CSR0 contains only single-bit fields to indicate
453 * a particular key is valid. Because using the FIELD32()
454 * defines directly will cause a lot of overhead we use
455 * a calculation to determine the correct bit directly.
456 */
457 mask = 1 << key->hw_key_idx;
458
459 rt73usb_register_read(rt2x00dev, SEC_CSR0, &reg);
460 if (crypto->cmd == SET_KEY)
461 reg |= mask;
462 else if (crypto->cmd == DISABLE_KEY)
463 reg &= ~mask;
464 rt73usb_register_write(rt2x00dev, SEC_CSR0, reg);
465
466 return 0;
467}
468
469static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
470 struct rt2x00lib_crypto *crypto,
471 struct ieee80211_key_conf *key)
472{
473 struct hw_pairwise_ta_entry addr_entry;
474 struct hw_key_entry key_entry;
475 int timeout;
476 u32 mask;
477 u32 reg;
478
479 if (crypto->cmd == SET_KEY) {
480 /*
481 * rt2x00lib can't determine the correct free
482 * key_idx for pairwise keys. We have 2 registers
483 * with key valid bits. The goal is simple, read
484 * the first register, if that is full move to
485 * the next register.
486 * When both registers are full, we drop the key,
487 * otherwise we use the first invalid entry.
488 */
489 rt73usb_register_read(rt2x00dev, SEC_CSR2, &reg);
490 if (reg && reg == ~0) {
491 key->hw_key_idx = 32;
492 rt73usb_register_read(rt2x00dev, SEC_CSR3, &reg);
493 if (reg && reg == ~0)
494 return -ENOSPC;
495 }
496
497 key->hw_key_idx += reg ? ffz(reg) : 0;
498
499 /*
500 * Upload key to hardware
501 */
502 memcpy(key_entry.key, crypto->key,
503 sizeof(key_entry.key));
504 memcpy(key_entry.tx_mic, crypto->tx_mic,
505 sizeof(key_entry.tx_mic));
506 memcpy(key_entry.rx_mic, crypto->rx_mic,
507 sizeof(key_entry.rx_mic));
508
509 reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
510 timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
511 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
512 USB_VENDOR_REQUEST_OUT, reg,
513 &key_entry,
514 sizeof(key_entry),
515 timeout);
516
517 /*
518 * Send the address and cipher type to the hardware register.
519 * This data fits within the CSR cache size, so we can use
520 * rt73usb_register_multiwrite() directly.
521 */
522 memset(&addr_entry, 0, sizeof(addr_entry));
523 memcpy(&addr_entry, crypto->address, ETH_ALEN);
524 addr_entry.cipher = crypto->cipher;
525
526 reg = PAIRWISE_TA_ENTRY(key->hw_key_idx);
527 rt73usb_register_multiwrite(rt2x00dev, reg,
528 &addr_entry, sizeof(addr_entry));
529
530 /*
531 * Enable pairwise lookup table for given BSS idx,
532 * without this received frames will not be decrypted
533 * by the hardware.
534 */
535 rt73usb_register_read(rt2x00dev, SEC_CSR4, &reg);
536 reg |= (1 << crypto->bssidx);
537 rt73usb_register_write(rt2x00dev, SEC_CSR4, reg);
538
539 /*
540 * The driver does not support the IV/EIV generation
541 * in hardware. However it doesn't support the IV/EIV
542 * inside the ieee80211 frame either, but requires it
543 * to be provided seperately for the descriptor.
544 * rt2x00lib will cut the IV/EIV data out of all frames
545 * given to us by mac80211, but we must tell mac80211
546 * to generate the IV/EIV data.
547 */
548 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
549 }
550
551 /*
552 * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate
553 * a particular key is valid. Because using the FIELD32()
554 * defines directly will cause a lot of overhead we use
555 * a calculation to determine the correct bit directly.
556 */
557 if (key->hw_key_idx < 32) {
558 mask = 1 << key->hw_key_idx;
559
560 rt73usb_register_read(rt2x00dev, SEC_CSR2, &reg);
561 if (crypto->cmd == SET_KEY)
562 reg |= mask;
563 else if (crypto->cmd == DISABLE_KEY)
564 reg &= ~mask;
565 rt73usb_register_write(rt2x00dev, SEC_CSR2, reg);
566 } else {
567 mask = 1 << (key->hw_key_idx - 32);
568
569 rt73usb_register_read(rt2x00dev, SEC_CSR3, &reg);
570 if (crypto->cmd == SET_KEY)
571 reg |= mask;
572 else if (crypto->cmd == DISABLE_KEY)
573 reg &= ~mask;
574 rt73usb_register_write(rt2x00dev, SEC_CSR3, reg);
575 }
576
577 return 0;
578}
579
360static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev, 580static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
361 const unsigned int filter_flags) 581 const unsigned int filter_flags)
362{ 582{
@@ -451,6 +671,26 @@ static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
451 rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); 671 rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg);
452} 672}
453 673
674static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
675 struct rt2x00lib_conf *libconf)
676{
677 u16 eeprom;
678 short lna_gain = 0;
679
680 if (libconf->band == IEEE80211_BAND_2GHZ) {
681 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
682 lna_gain += 14;
683
684 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
685 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
686 } else {
687 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
688 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
689 }
690
691 rt2x00dev->lna_gain = lna_gain;
692}
693
454static void rt73usb_config_phymode(struct rt2x00_dev *rt2x00dev, 694static void rt73usb_config_phymode(struct rt2x00_dev *rt2x00dev,
455 const int basic_rate_mask) 695 const int basic_rate_mask)
456{ 696{
@@ -705,6 +945,9 @@ static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
705 struct rt2x00lib_conf *libconf, 945 struct rt2x00lib_conf *libconf,
706 const unsigned int flags) 946 const unsigned int flags)
707{ 947{
948 /* Always recalculate LNA gain before changing configuration */
949 rt73usb_config_lna_gain(rt2x00dev, libconf);
950
708 if (flags & CONFIG_UPDATE_PHYMODE) 951 if (flags & CONFIG_UPDATE_PHYMODE)
709 rt73usb_config_phymode(rt2x00dev, libconf->basic_rates); 952 rt73usb_config_phymode(rt2x00dev, libconf->basic_rates);
710 if (flags & CONFIG_UPDATE_CHANNEL) 953 if (flags & CONFIG_UPDATE_CHANNEL)
@@ -1034,16 +1277,6 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1034 rt73usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606); 1277 rt73usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606);
1035 rt73usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408); 1278 rt73usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408);
1036 1279
1037 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
1038 rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC0_TX_OP, 0);
1039 rt2x00_set_field32(&reg, AC_TXOP_CSR0_AC1_TX_OP, 0);
1040 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
1041
1042 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
1043 rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC2_TX_OP, 192);
1044 rt2x00_set_field32(&reg, AC_TXOP_CSR1_AC3_TX_OP, 48);
1045 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
1046
1047 rt73usb_register_read(rt2x00dev, MAC_CSR9, &reg); 1280 rt73usb_register_read(rt2x00dev, MAC_CSR9, &reg);
1048 rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0); 1281 rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0);
1049 rt73usb_register_write(rt2x00dev, MAC_CSR9, reg); 1282 rt73usb_register_write(rt2x00dev, MAC_CSR9, reg);
@@ -1265,8 +1498,8 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1265 * TX descriptor initialization 1498 * TX descriptor initialization
1266 */ 1499 */
1267static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1500static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1268 struct sk_buff *skb, 1501 struct sk_buff *skb,
1269 struct txentry_desc *txdesc) 1502 struct txentry_desc *txdesc)
1270{ 1503{
1271 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1504 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1272 __le32 *txd = skbdesc->desc; 1505 __le32 *txd = skbdesc->desc;
@@ -1280,7 +1513,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1280 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1513 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1281 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1514 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1282 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1515 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1283 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1516 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1284 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1517 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1285 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 1518 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
1286 rt2x00_desc_write(txd, 1, word); 1519 rt2x00_desc_write(txd, 1, word);
@@ -1292,6 +1525,11 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1292 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1525 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
1293 rt2x00_desc_write(txd, 2, word); 1526 rt2x00_desc_write(txd, 2, word);
1294 1527
1528 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
1529 _rt2x00_desc_write(txd, 3, skbdesc->iv);
1530 _rt2x00_desc_write(txd, 4, skbdesc->eiv);
1531 }
1532
1295 rt2x00_desc_read(txd, 5, &word); 1533 rt2x00_desc_read(txd, 5, &word);
1296 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1534 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1297 TXPOWER_TO_DEV(rt2x00dev->tx_power)); 1535 TXPOWER_TO_DEV(rt2x00dev->tx_power));
@@ -1313,12 +1551,15 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1313 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1551 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1314 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1552 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1315 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1553 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1316 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1554 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
1317 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, 1555 test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
1318 skb->len - skbdesc->desc_len); 1556 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
1557 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
1558 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
1559 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1319 rt2x00_set_field32(&word, TXD_W0_BURST2, 1560 rt2x00_set_field32(&word, TXD_W0_BURST2,
1320 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 1561 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1321 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1562 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
1322 rt2x00_desc_write(txd, 0, word); 1563 rt2x00_desc_write(txd, 0, word);
1323} 1564}
1324 1565
@@ -1331,7 +1572,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1331 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1572 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1332 unsigned int beacon_base; 1573 unsigned int beacon_base;
1333 u32 reg; 1574 u32 reg;
1334 u32 word, len;
1335 1575
1336 /* 1576 /*
1337 * Add the descriptor in front of the skb. 1577 * Add the descriptor in front of the skb.
@@ -1341,17 +1581,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1341 skbdesc->desc = entry->skb->data; 1581 skbdesc->desc = entry->skb->data;
1342 1582
1343 /* 1583 /*
1344 * Adjust the beacon databyte count. The current number is
1345 * calculated before this function gets called, but falsely
1346 * assumes that the descriptor was already present in the SKB.
1347 */
1348 rt2x00_desc_read(skbdesc->desc, 0, &word);
1349 len = rt2x00_get_field32(word, TXD_W0_DATABYTE_COUNT);
1350 len += skbdesc->desc_len;
1351 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, len);
1352 rt2x00_desc_write(skbdesc->desc, 0, word);
1353
1354 /*
1355 * Disable beaconing while we are reloading the beacon data, 1584 * Disable beaconing while we are reloading the beacon data,
1356 * otherwise we might be sending out invalid data. 1585 * otherwise we might be sending out invalid data.
1357 */ 1586 */
@@ -1422,20 +1651,19 @@ static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1422 */ 1651 */
1423static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) 1652static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1424{ 1653{
1425 u16 eeprom; 1654 u8 offset = rt2x00dev->lna_gain;
1426 u8 offset;
1427 u8 lna; 1655 u8 lna;
1428 1656
1429 lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); 1657 lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA);
1430 switch (lna) { 1658 switch (lna) {
1431 case 3: 1659 case 3:
1432 offset = 90; 1660 offset += 90;
1433 break; 1661 break;
1434 case 2: 1662 case 2:
1435 offset = 74; 1663 offset += 74;
1436 break; 1664 break;
1437 case 1: 1665 case 1:
1438 offset = 64; 1666 offset += 64;
1439 break; 1667 break;
1440 default: 1668 default:
1441 return 0; 1669 return 0;
@@ -1451,15 +1679,6 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1451 else if (lna == 2) 1679 else if (lna == 2)
1452 offset += 8; 1680 offset += 8;
1453 } 1681 }
1454
1455 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
1456 offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
1457 } else {
1458 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
1459 offset += 14;
1460
1461 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
1462 offset -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
1463 } 1682 }
1464 1683
1465 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; 1684 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
@@ -1468,6 +1687,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1468static void rt73usb_fill_rxdone(struct queue_entry *entry, 1687static void rt73usb_fill_rxdone(struct queue_entry *entry,
1469 struct rxdone_entry_desc *rxdesc) 1688 struct rxdone_entry_desc *rxdesc)
1470{ 1689{
1690 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1471 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1691 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1472 __le32 *rxd = (__le32 *)entry->skb->data; 1692 __le32 *rxd = (__le32 *)entry->skb->data;
1473 u32 word0; 1693 u32 word0;
@@ -1489,6 +1709,38 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1489 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1709 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1490 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1710 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1491 1711
1712 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
1713 rxdesc->cipher =
1714 rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
1715 rxdesc->cipher_status =
1716 rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
1717 }
1718
1719 if (rxdesc->cipher != CIPHER_NONE) {
1720 _rt2x00_desc_read(rxd, 2, &rxdesc->iv);
1721 _rt2x00_desc_read(rxd, 3, &rxdesc->eiv);
1722 _rt2x00_desc_read(rxd, 4, &rxdesc->icv);
1723
1724 /*
1725 * Hardware has stripped IV/EIV data from 802.11 frame during
1726 * decryption. It has provided the data seperately but rt2x00lib
1727 * should decide if it should be reinserted.
1728 */
1729 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
1730
1731 /*
1732 * FIXME: Legacy driver indicates that the frame does
1733 * contain the Michael Mic. Unfortunately, in rt2x00
1734 * the MIC seems to be missing completely...
1735 */
1736 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
1737
1738 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
1739 rxdesc->flags |= RX_FLAG_DECRYPTED;
1740 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
1741 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
1742 }
1743
1492 /* 1744 /*
1493 * Obtain the status about this packet. 1745 * Obtain the status about this packet.
1494 * When frame was received with an OFDM bitrate, 1746 * When frame was received with an OFDM bitrate,
@@ -1496,11 +1748,13 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1496 * a CCK bitrate the signal is the rate in 100kbit/s. 1748 * a CCK bitrate the signal is the rate in 100kbit/s.
1497 */ 1749 */
1498 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); 1750 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
1499 rxdesc->rssi = rt73usb_agc_to_rssi(entry->queue->rt2x00dev, word1); 1751 rxdesc->rssi = rt73usb_agc_to_rssi(rt2x00dev, word1);
1500 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1752 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1501 1753
1502 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1754 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1503 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; 1755 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1756 else
1757 rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE;
1504 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) 1758 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1505 rxdesc->dev_flags |= RXDONE_MY_BSS; 1759 rxdesc->dev_flags |= RXDONE_MY_BSS;
1506 1760
@@ -1678,7 +1932,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1678 /* 1932 /*
1679 * Store led settings, for correct led behaviour. 1933 * Store led settings, for correct led behaviour.
1680 */ 1934 */
1681#ifdef CONFIG_RT73USB_LEDS 1935#ifdef CONFIG_RT2X00_LIB_LEDS
1682 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); 1936 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
1683 1937
1684 rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 1938 rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
@@ -1711,7 +1965,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1711 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A, 1965 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A,
1712 rt2x00_get_field16(eeprom, 1966 rt2x00_get_field16(eeprom,
1713 EEPROM_LED_POLARITY_RDY_A)); 1967 EEPROM_LED_POLARITY_RDY_A));
1714#endif /* CONFIG_RT73USB_LEDS */ 1968#endif /* CONFIG_RT2X00_LIB_LEDS */
1715 1969
1716 return 0; 1970 return 0;
1717} 1971}
@@ -1852,10 +2106,11 @@ static const struct rf_channel rf_vals_5225_2527[] = {
1852}; 2106};
1853 2107
1854 2108
1855static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 2109static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1856{ 2110{
1857 struct hw_mode_spec *spec = &rt2x00dev->spec; 2111 struct hw_mode_spec *spec = &rt2x00dev->spec;
1858 u8 *txpower; 2112 struct channel_info *info;
2113 char *tx_power;
1859 unsigned int i; 2114 unsigned int i;
1860 2115
1861 /* 2116 /*
@@ -1872,20 +2127,10 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1872 EEPROM_MAC_ADDR_0)); 2127 EEPROM_MAC_ADDR_0));
1873 2128
1874 /* 2129 /*
1875 * Convert tx_power array in eeprom.
1876 */
1877 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
1878 for (i = 0; i < 14; i++)
1879 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
1880
1881 /*
1882 * Initialize hw_mode information. 2130 * Initialize hw_mode information.
1883 */ 2131 */
1884 spec->supported_bands = SUPPORT_BAND_2GHZ; 2132 spec->supported_bands = SUPPORT_BAND_2GHZ;
1885 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2133 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1886 spec->tx_power_a = NULL;
1887 spec->tx_power_bg = txpower;
1888 spec->tx_power_default = DEFAULT_TXPOWER;
1889 2134
1890 if (rt2x00_rf(&rt2x00dev->chip, RF2528)) { 2135 if (rt2x00_rf(&rt2x00dev->chip, RF2528)) {
1891 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); 2136 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
@@ -1903,14 +2148,26 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1903 spec->channels = rf_vals_5225_2527; 2148 spec->channels = rf_vals_5225_2527;
1904 } 2149 }
1905 2150
1906 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 2151 /*
1907 rt2x00_rf(&rt2x00dev->chip, RF5226)) { 2152 * Create channel information array
1908 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2153 */
1909 for (i = 0; i < 14; i++) 2154 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
1910 txpower[i] = TXPOWER_FROM_DEV(txpower[i]); 2155 if (!info)
2156 return -ENOMEM;
1911 2157
1912 spec->tx_power_a = txpower; 2158 spec->channels_info = info;
2159
2160 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
2161 for (i = 0; i < 14; i++)
2162 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
2163
2164 if (spec->num_channels > 14) {
2165 tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
2166 for (i = 14; i < spec->num_channels; i++)
2167 info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
1913 } 2168 }
2169
2170 return 0;
1914} 2171}
1915 2172
1916static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) 2173static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
@@ -1931,13 +2188,17 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1931 /* 2188 /*
1932 * Initialize hw specifications. 2189 * Initialize hw specifications.
1933 */ 2190 */
1934 rt73usb_probe_hw_mode(rt2x00dev); 2191 retval = rt73usb_probe_hw_mode(rt2x00dev);
2192 if (retval)
2193 return retval;
1935 2194
1936 /* 2195 /*
1937 * This device requires firmware. 2196 * This device requires firmware.
1938 */ 2197 */
1939 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 2198 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
1940 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags); 2199 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
2200 if (!modparam_nohwcrypt)
2201 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
1941 2202
1942 /* 2203 /*
1943 * Set the rssi offset. 2204 * Set the rssi offset.
@@ -1964,6 +2225,63 @@ static int rt73usb_set_retry_limit(struct ieee80211_hw *hw,
1964 return 0; 2225 return 0;
1965} 2226}
1966 2227
2228static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2229 const struct ieee80211_tx_queue_params *params)
2230{
2231 struct rt2x00_dev *rt2x00dev = hw->priv;
2232 struct data_queue *queue;
2233 struct rt2x00_field32 field;
2234 int retval;
2235 u32 reg;
2236
2237 /*
2238 * First pass the configuration through rt2x00lib, that will
2239 * update the queue settings and validate the input. After that
2240 * we are free to update the registers based on the value
2241 * in the queue parameter.
2242 */
2243 retval = rt2x00mac_conf_tx(hw, queue_idx, params);
2244 if (retval)
2245 return retval;
2246
2247 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2248
2249 /* Update WMM TXOP register */
2250 if (queue_idx < 2) {
2251 field.bit_offset = queue_idx * 16;
2252 field.bit_mask = 0xffff << field.bit_offset;
2253
2254 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
2255 rt2x00_set_field32(&reg, field, queue->txop);
2256 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
2257 } else if (queue_idx < 4) {
2258 field.bit_offset = (queue_idx - 2) * 16;
2259 field.bit_mask = 0xffff << field.bit_offset;
2260
2261 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
2262 rt2x00_set_field32(&reg, field, queue->txop);
2263 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
2264 }
2265
2266 /* Update WMM registers */
2267 field.bit_offset = queue_idx * 4;
2268 field.bit_mask = 0xf << field.bit_offset;
2269
2270 rt73usb_register_read(rt2x00dev, AIFSN_CSR, &reg);
2271 rt2x00_set_field32(&reg, field, queue->aifs);
2272 rt73usb_register_write(rt2x00dev, AIFSN_CSR, reg);
2273
2274 rt73usb_register_read(rt2x00dev, CWMIN_CSR, &reg);
2275 rt2x00_set_field32(&reg, field, queue->cw_min);
2276 rt73usb_register_write(rt2x00dev, CWMIN_CSR, reg);
2277
2278 rt73usb_register_read(rt2x00dev, CWMAX_CSR, &reg);
2279 rt2x00_set_field32(&reg, field, queue->cw_max);
2280 rt73usb_register_write(rt2x00dev, CWMAX_CSR, reg);
2281
2282 return 0;
2283}
2284
1967#if 0 2285#if 0
1968/* 2286/*
1969 * Mac80211 demands get_tsf must be atomic. 2287 * Mac80211 demands get_tsf must be atomic.
@@ -1997,10 +2315,11 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
1997 .config = rt2x00mac_config, 2315 .config = rt2x00mac_config,
1998 .config_interface = rt2x00mac_config_interface, 2316 .config_interface = rt2x00mac_config_interface,
1999 .configure_filter = rt2x00mac_configure_filter, 2317 .configure_filter = rt2x00mac_configure_filter,
2318 .set_key = rt2x00mac_set_key,
2000 .get_stats = rt2x00mac_get_stats, 2319 .get_stats = rt2x00mac_get_stats,
2001 .set_retry_limit = rt73usb_set_retry_limit, 2320 .set_retry_limit = rt73usb_set_retry_limit,
2002 .bss_info_changed = rt2x00mac_bss_info_changed, 2321 .bss_info_changed = rt2x00mac_bss_info_changed,
2003 .conf_tx = rt2x00mac_conf_tx, 2322 .conf_tx = rt73usb_conf_tx,
2004 .get_tx_stats = rt2x00mac_get_tx_stats, 2323 .get_tx_stats = rt2x00mac_get_tx_stats,
2005 .get_tsf = rt73usb_get_tsf, 2324 .get_tsf = rt73usb_get_tsf,
2006}; 2325};
@@ -2024,6 +2343,8 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2024 .get_tx_data_len = rt73usb_get_tx_data_len, 2343 .get_tx_data_len = rt73usb_get_tx_data_len,
2025 .kick_tx_queue = rt73usb_kick_tx_queue, 2344 .kick_tx_queue = rt73usb_kick_tx_queue,
2026 .fill_rxdone = rt73usb_fill_rxdone, 2345 .fill_rxdone = rt73usb_fill_rxdone,
2346 .config_shared_key = rt73usb_config_shared_key,
2347 .config_pairwise_key = rt73usb_config_pairwise_key,
2027 .config_filter = rt73usb_config_filter, 2348 .config_filter = rt73usb_config_filter,
2028 .config_intf = rt73usb_config_intf, 2349 .config_intf = rt73usb_config_intf,
2029 .config_erp = rt73usb_config_erp, 2350 .config_erp = rt73usb_config_erp,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 148493501011..868386c457f6 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -92,6 +92,16 @@
92#define PAIRWISE_KEY_TABLE_BASE 0x1200 92#define PAIRWISE_KEY_TABLE_BASE 0x1200
93#define PAIRWISE_TA_TABLE_BASE 0x1a00 93#define PAIRWISE_TA_TABLE_BASE 0x1a00
94 94
95#define SHARED_KEY_ENTRY(__idx) \
96 ( SHARED_KEY_TABLE_BASE + \
97 ((__idx) * sizeof(struct hw_key_entry)) )
98#define PAIRWISE_KEY_ENTRY(__idx) \
99 ( PAIRWISE_KEY_TABLE_BASE + \
100 ((__idx) * sizeof(struct hw_key_entry)) )
101#define PAIRWISE_TA_ENTRY(__idx) \
102 ( PAIRWISE_TA_TABLE_BASE + \
103 ((__idx) * sizeof(struct hw_pairwise_ta_entry)) )
104
95struct hw_key_entry { 105struct hw_key_entry {
96 u8 key[16]; 106 u8 key[16];
97 u8 tx_mic[8]; 107 u8 tx_mic[8];
@@ -100,7 +110,8 @@ struct hw_key_entry {
100 110
101struct hw_pairwise_ta_entry { 111struct hw_pairwise_ta_entry {
102 u8 address[6]; 112 u8 address[6];
103 u8 reserved[2]; 113 u8 cipher;
114 u8 reserved;
104} __attribute__ ((packed)); 115} __attribute__ ((packed));
105 116
106/* 117/*
@@ -563,6 +574,10 @@ struct hw_pairwise_ta_entry {
563 * SEC_CSR4: Pairwise key table lookup control. 574 * SEC_CSR4: Pairwise key table lookup control.
564 */ 575 */
565#define SEC_CSR4 0x30b0 576#define SEC_CSR4 0x30b0
577#define SEC_CSR4_ENABLE_BSS0 FIELD32(0x00000001)
578#define SEC_CSR4_ENABLE_BSS1 FIELD32(0x00000002)
579#define SEC_CSR4_ENABLE_BSS2 FIELD32(0x00000004)
580#define SEC_CSR4_ENABLE_BSS3 FIELD32(0x00000008)
566 581
567/* 582/*
568 * SEC_CSR5: shared key table security mode register. 583 * SEC_CSR5: shared key table security mode register.
@@ -1010,8 +1025,10 @@ struct hw_pairwise_ta_entry {
1010 1025
1011/* 1026/*
1012 * Word4 1027 * Word4
1028 * ICV: Received ICV of originally encrypted.
1029 * NOTE: This is a guess, the official definition is "reserved"
1013 */ 1030 */
1014#define RXD_W4_RESERVED FIELD32(0xffffffff) 1031#define RXD_W4_ICV FIELD32(0xffffffff)
1015 1032
1016/* 1033/*
1017 * the above 20-byte is called RXINFO and will be DMAed to MAC RX block 1034 * the above 20-byte is called RXINFO and will be DMAed to MAC RX block
@@ -1033,17 +1050,10 @@ struct hw_pairwise_ta_entry {
1033#define MAX_TXPOWER 31 1050#define MAX_TXPOWER 31
1034#define DEFAULT_TXPOWER 24 1051#define DEFAULT_TXPOWER 24
1035 1052
1036#define TXPOWER_FROM_DEV(__txpower) \ 1053#define TXPOWER_FROM_DEV(__txpower) \
1037({ \ 1054 (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1038 ((__txpower) > MAX_TXPOWER) ? \ 1055
1039 DEFAULT_TXPOWER : (__txpower); \ 1056#define TXPOWER_TO_DEV(__txpower) \
1040}) 1057 clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER)
1041
1042#define TXPOWER_TO_DEV(__txpower) \
1043({ \
1044 ((__txpower) <= MIN_TXPOWER) ? MIN_TXPOWER : \
1045 (((__txpower) >= MAX_TXPOWER) ? MAX_TXPOWER : \
1046 (__txpower)); \
1047})
1048 1058
1049#endif /* RT73USB_H */ 1059#endif /* RT73USB_H */
diff --git a/drivers/net/wireless/rtl8180.h b/drivers/net/wireless/rtl8180.h
index 082a11f93beb..8721282a8185 100644
--- a/drivers/net/wireless/rtl8180.h
+++ b/drivers/net/wireless/rtl8180.h
@@ -24,20 +24,6 @@
24#define ANAPARAM_PWR1_SHIFT 20 24#define ANAPARAM_PWR1_SHIFT 20
25#define ANAPARAM_PWR1_MASK (0x7F << ANAPARAM_PWR1_SHIFT) 25#define ANAPARAM_PWR1_MASK (0x7F << ANAPARAM_PWR1_SHIFT)
26 26
27enum rtl8180_tx_desc_flags {
28 RTL8180_TX_DESC_FLAG_NO_ENC = (1 << 15),
29 RTL8180_TX_DESC_FLAG_TX_OK = (1 << 15),
30 RTL8180_TX_DESC_FLAG_SPLCP = (1 << 16),
31 RTL8180_TX_DESC_FLAG_RX_UNDER = (1 << 16),
32 RTL8180_TX_DESC_FLAG_MOREFRAG = (1 << 17),
33 RTL8180_TX_DESC_FLAG_CTS = (1 << 18),
34 RTL8180_TX_DESC_FLAG_RTS = (1 << 23),
35 RTL8180_TX_DESC_FLAG_LS = (1 << 28),
36 RTL8180_TX_DESC_FLAG_FS = (1 << 29),
37 RTL8180_TX_DESC_FLAG_DMA = (1 << 30),
38 RTL8180_TX_DESC_FLAG_OWN = (1 << 31)
39};
40
41struct rtl8180_tx_desc { 27struct rtl8180_tx_desc {
42 __le32 flags; 28 __le32 flags;
43 __le16 rts_duration; 29 __le16 rts_duration;
@@ -52,23 +38,6 @@ struct rtl8180_tx_desc {
52 u32 reserved[2]; 38 u32 reserved[2];
53} __attribute__ ((packed)); 39} __attribute__ ((packed));
54 40
55enum rtl8180_rx_desc_flags {
56 RTL8180_RX_DESC_FLAG_ICV_ERR = (1 << 12),
57 RTL8180_RX_DESC_FLAG_CRC32_ERR = (1 << 13),
58 RTL8180_RX_DESC_FLAG_PM = (1 << 14),
59 RTL8180_RX_DESC_FLAG_RX_ERR = (1 << 15),
60 RTL8180_RX_DESC_FLAG_BCAST = (1 << 16),
61 RTL8180_RX_DESC_FLAG_PAM = (1 << 17),
62 RTL8180_RX_DESC_FLAG_MCAST = (1 << 18),
63 RTL8180_RX_DESC_FLAG_SPLCP = (1 << 25),
64 RTL8180_RX_DESC_FLAG_FOF = (1 << 26),
65 RTL8180_RX_DESC_FLAG_DMA_FAIL = (1 << 27),
66 RTL8180_RX_DESC_FLAG_LS = (1 << 28),
67 RTL8180_RX_DESC_FLAG_FS = (1 << 29),
68 RTL8180_RX_DESC_FLAG_EOR = (1 << 30),
69 RTL8180_RX_DESC_FLAG_OWN = (1 << 31)
70};
71
72struct rtl8180_rx_desc { 41struct rtl8180_rx_desc {
73 __le32 flags; 42 __le32 flags;
74 __le32 flags2; 43 __le32 flags2;
diff --git a/drivers/net/wireless/rtl8180_dev.c b/drivers/net/wireless/rtl8180_dev.c
index b7172a12c057..abcd641c54be 100644
--- a/drivers/net/wireless/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl8180_dev.c
@@ -110,12 +110,12 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
110 struct sk_buff *skb = priv->rx_buf[priv->rx_idx]; 110 struct sk_buff *skb = priv->rx_buf[priv->rx_idx];
111 u32 flags = le32_to_cpu(entry->flags); 111 u32 flags = le32_to_cpu(entry->flags);
112 112
113 if (flags & RTL8180_RX_DESC_FLAG_OWN) 113 if (flags & RTL818X_RX_DESC_FLAG_OWN)
114 return; 114 return;
115 115
116 if (unlikely(flags & (RTL8180_RX_DESC_FLAG_DMA_FAIL | 116 if (unlikely(flags & (RTL818X_RX_DESC_FLAG_DMA_FAIL |
117 RTL8180_RX_DESC_FLAG_FOF | 117 RTL818X_RX_DESC_FLAG_FOF |
118 RTL8180_RX_DESC_FLAG_RX_ERR))) 118 RTL818X_RX_DESC_FLAG_RX_ERR)))
119 goto done; 119 goto done;
120 else { 120 else {
121 u32 flags2 = le32_to_cpu(entry->flags2); 121 u32 flags2 = le32_to_cpu(entry->flags2);
@@ -140,7 +140,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
140 rx_status.band = dev->conf.channel->band; 140 rx_status.band = dev->conf.channel->band;
141 rx_status.mactime = le64_to_cpu(entry->tsft); 141 rx_status.mactime = le64_to_cpu(entry->tsft);
142 rx_status.flag |= RX_FLAG_TSFT; 142 rx_status.flag |= RX_FLAG_TSFT;
143 if (flags & RTL8180_RX_DESC_FLAG_CRC32_ERR) 143 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
144 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 144 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
145 145
146 ieee80211_rx_irqsafe(dev, skb, &rx_status); 146 ieee80211_rx_irqsafe(dev, skb, &rx_status);
@@ -154,10 +154,10 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
154 154
155 done: 155 done:
156 entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb)); 156 entry->rx_buf = cpu_to_le32(*((dma_addr_t *)skb->cb));
157 entry->flags = cpu_to_le32(RTL8180_RX_DESC_FLAG_OWN | 157 entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
158 MAX_RX_SIZE); 158 MAX_RX_SIZE);
159 if (priv->rx_idx == 31) 159 if (priv->rx_idx == 31)
160 entry->flags |= cpu_to_le32(RTL8180_RX_DESC_FLAG_EOR); 160 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
161 priv->rx_idx = (priv->rx_idx + 1) % 32; 161 priv->rx_idx = (priv->rx_idx + 1) % 32;
162 } 162 }
163} 163}
@@ -173,7 +173,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
173 struct ieee80211_tx_info *info; 173 struct ieee80211_tx_info *info;
174 u32 flags = le32_to_cpu(entry->flags); 174 u32 flags = le32_to_cpu(entry->flags);
175 175
176 if (flags & RTL8180_TX_DESC_FLAG_OWN) 176 if (flags & RTL818X_TX_DESC_FLAG_OWN)
177 return; 177 return;
178 178
179 ring->idx = (ring->idx + 1) % ring->entries; 179 ring->idx = (ring->idx + 1) % ring->entries;
@@ -185,7 +185,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
185 memset(&info->status, 0, sizeof(info->status)); 185 memset(&info->status, 0, sizeof(info->status));
186 186
187 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 187 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
188 if (flags & RTL8180_TX_DESC_FLAG_TX_OK) 188 if (flags & RTL818X_TX_DESC_FLAG_TX_OK)
189 info->flags |= IEEE80211_TX_STAT_ACK; 189 info->flags |= IEEE80211_TX_STAT_ACK;
190 else 190 else
191 info->status.excessive_retries = 1; 191 info->status.excessive_retries = 1;
@@ -252,20 +252,20 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
252 mapping = pci_map_single(priv->pdev, skb->data, 252 mapping = pci_map_single(priv->pdev, skb->data,
253 skb->len, PCI_DMA_TODEVICE); 253 skb->len, PCI_DMA_TODEVICE);
254 254
255 tx_flags = RTL8180_TX_DESC_FLAG_OWN | RTL8180_TX_DESC_FLAG_FS | 255 tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
256 RTL8180_TX_DESC_FLAG_LS | 256 RTL818X_TX_DESC_FLAG_LS |
257 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) | 257 (ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
258 skb->len; 258 skb->len;
259 259
260 if (priv->r8185) 260 if (priv->r8185)
261 tx_flags |= RTL8180_TX_DESC_FLAG_DMA | 261 tx_flags |= RTL818X_TX_DESC_FLAG_DMA |
262 RTL8180_TX_DESC_FLAG_NO_ENC; 262 RTL818X_TX_DESC_FLAG_NO_ENC;
263 263
264 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 264 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
265 tx_flags |= RTL8180_TX_DESC_FLAG_RTS; 265 tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
266 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 266 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
267 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 267 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
268 tx_flags |= RTL8180_TX_DESC_FLAG_CTS; 268 tx_flags |= RTL818X_TX_DESC_FLAG_CTS;
269 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 269 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
270 } 270 }
271 271
@@ -446,10 +446,10 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
446 *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb), 446 *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
447 MAX_RX_SIZE, PCI_DMA_FROMDEVICE); 447 MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
448 entry->rx_buf = cpu_to_le32(*mapping); 448 entry->rx_buf = cpu_to_le32(*mapping);
449 entry->flags = cpu_to_le32(RTL8180_RX_DESC_FLAG_OWN | 449 entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
450 MAX_RX_SIZE); 450 MAX_RX_SIZE);
451 } 451 }
452 entry->flags |= cpu_to_le32(RTL8180_RX_DESC_FLAG_EOR); 452 entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
453 return 0; 453 return 0;
454} 454}
455 455
@@ -615,7 +615,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
615 reg |= RTL818X_CMD_TX_ENABLE; 615 reg |= RTL818X_CMD_TX_ENABLE;
616 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 616 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
617 617
618 priv->mode = IEEE80211_IF_TYPE_MNTR; 618 priv->mode = NL80211_IFTYPE_MONITOR;
619 return 0; 619 return 0;
620 620
621 err_free_rings: 621 err_free_rings:
@@ -633,7 +633,7 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
633 u8 reg; 633 u8 reg;
634 int i; 634 int i;
635 635
636 priv->mode = IEEE80211_IF_TYPE_INVALID; 636 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
637 637
638 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 638 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
639 639
@@ -661,11 +661,11 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
661{ 661{
662 struct rtl8180_priv *priv = dev->priv; 662 struct rtl8180_priv *priv = dev->priv;
663 663
664 if (priv->mode != IEEE80211_IF_TYPE_MNTR) 664 if (priv->mode != NL80211_IFTYPE_MONITOR)
665 return -EOPNOTSUPP; 665 return -EOPNOTSUPP;
666 666
667 switch (conf->type) { 667 switch (conf->type) {
668 case IEEE80211_IF_TYPE_STA: 668 case NL80211_IFTYPE_STATION:
669 priv->mode = conf->type; 669 priv->mode = conf->type;
670 break; 670 break;
671 default: 671 default:
@@ -688,7 +688,7 @@ static void rtl8180_remove_interface(struct ieee80211_hw *dev,
688 struct ieee80211_if_init_conf *conf) 688 struct ieee80211_if_init_conf *conf)
689{ 689{
690 struct rtl8180_priv *priv = dev->priv; 690 struct rtl8180_priv *priv = dev->priv;
691 priv->mode = IEEE80211_IF_TYPE_MNTR; 691 priv->mode = NL80211_IFTYPE_MONITOR;
692 priv->vif = NULL; 692 priv->vif = NULL;
693} 693}
694 694
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
index 5a9515c99960..e82bb4d289e8 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl8187.h
@@ -58,12 +58,6 @@ struct rtl8187b_rx_hdr {
58 58
59/* {rtl8187,rtl8187b}_tx_info is in skb */ 59/* {rtl8187,rtl8187b}_tx_info is in skb */
60 60
61/* Tx flags are common between rtl8187 and rtl8187b */
62#define RTL8187_TX_FLAG_NO_ENCRYPT (1 << 15)
63#define RTL8187_TX_FLAG_MORE_FRAG (1 << 17)
64#define RTL8187_TX_FLAG_CTS (1 << 18)
65#define RTL8187_TX_FLAG_RTS (1 << 23)
66
67struct rtl8187_tx_hdr { 61struct rtl8187_tx_hdr {
68 __le32 flags; 62 __le32 flags;
69 __le16 rts_duration; 63 __le16 rts_duration;
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index ca5deb6244e6..e9902613e2ee 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -187,18 +187,18 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
187 } 187 }
188 188
189 flags = skb->len; 189 flags = skb->len;
190 flags |= RTL8187_TX_FLAG_NO_ENCRYPT; 190 flags |= RTL818X_TX_DESC_FLAG_NO_ENC;
191 191
192 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24; 192 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
193 if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control)) 193 if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control))
194 flags |= RTL8187_TX_FLAG_MORE_FRAG; 194 flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
195 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 195 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) {
196 flags |= RTL8187_TX_FLAG_RTS; 196 flags |= RTL818X_TX_DESC_FLAG_RTS;
197 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 197 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
198 rts_dur = ieee80211_rts_duration(dev, priv->vif, 198 rts_dur = ieee80211_rts_duration(dev, priv->vif,
199 skb->len, info); 199 skb->len, info);
200 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 200 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) {
201 flags |= RTL8187_TX_FLAG_CTS; 201 flags |= RTL818X_TX_DESC_FLAG_CTS;
202 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 202 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
203 } 203 }
204 204
@@ -354,7 +354,7 @@ static void rtl8187_rx_cb(struct urb *urb)
354 rx_status.freq = dev->conf.channel->center_freq; 354 rx_status.freq = dev->conf.channel->center_freq;
355 rx_status.band = dev->conf.channel->band; 355 rx_status.band = dev->conf.channel->band;
356 rx_status.flag |= RX_FLAG_TSFT; 356 rx_status.flag |= RX_FLAG_TSFT;
357 if (flags & (1 << 13)) 357 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
358 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 358 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
359 ieee80211_rx_irqsafe(dev, skb, &rx_status); 359 ieee80211_rx_irqsafe(dev, skb, &rx_status);
360 360
@@ -836,11 +836,11 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev,
836 struct rtl8187_priv *priv = dev->priv; 836 struct rtl8187_priv *priv = dev->priv;
837 int i; 837 int i;
838 838
839 if (priv->mode != IEEE80211_IF_TYPE_MNTR) 839 if (priv->mode != NL80211_IFTYPE_MONITOR)
840 return -EOPNOTSUPP; 840 return -EOPNOTSUPP;
841 841
842 switch (conf->type) { 842 switch (conf->type) {
843 case IEEE80211_IF_TYPE_STA: 843 case NL80211_IFTYPE_STATION:
844 priv->mode = conf->type; 844 priv->mode = conf->type;
845 break; 845 break;
846 default: 846 default:
@@ -865,7 +865,7 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev,
865{ 865{
866 struct rtl8187_priv *priv = dev->priv; 866 struct rtl8187_priv *priv = dev->priv;
867 mutex_lock(&priv->conf_mutex); 867 mutex_lock(&priv->conf_mutex);
868 priv->mode = IEEE80211_IF_TYPE_MNTR; 868 priv->mode = NL80211_IFTYPE_MONITOR;
869 priv->vif = NULL; 869 priv->vif = NULL;
870 mutex_unlock(&priv->conf_mutex); 870 mutex_unlock(&priv->conf_mutex);
871} 871}
@@ -1057,7 +1057,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1057 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band; 1057 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
1058 1058
1059 1059
1060 priv->mode = IEEE80211_IF_TYPE_MNTR; 1060 priv->mode = NL80211_IFTYPE_MONITOR;
1061 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1061 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1062 IEEE80211_HW_RX_INCLUDES_FCS; 1062 IEEE80211_HW_RX_INCLUDES_FCS;
1063 1063
@@ -1184,6 +1184,8 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1184 dev->max_signal = 65; 1184 dev->max_signal = 65;
1185 } 1185 }
1186 1186
1187 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1188
1187 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b) 1189 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
1188 printk(KERN_INFO "rtl8187: inconsistency between id with OEM" 1190 printk(KERN_INFO "rtl8187: inconsistency between id with OEM"
1189 " info!\n"); 1191 " info!\n");
diff --git a/drivers/net/wireless/rtl818x.h b/drivers/net/wireless/rtl818x.h
index 00900fe16fce..3538b15211b1 100644
--- a/drivers/net/wireless/rtl818x.h
+++ b/drivers/net/wireless/rtl818x.h
@@ -193,4 +193,39 @@ struct rtl818x_rf_ops {
193 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *); 193 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
194}; 194};
195 195
196/* Tx/Rx flags are common between RTL818X chips */
197
198enum rtl818x_tx_desc_flags {
199 RTL818X_TX_DESC_FLAG_NO_ENC = (1 << 15),
200 RTL818X_TX_DESC_FLAG_TX_OK = (1 << 15),
201 RTL818X_TX_DESC_FLAG_SPLCP = (1 << 16),
202 RTL818X_TX_DESC_FLAG_RX_UNDER = (1 << 16),
203 RTL818X_TX_DESC_FLAG_MOREFRAG = (1 << 17),
204 RTL818X_TX_DESC_FLAG_CTS = (1 << 18),
205 RTL818X_TX_DESC_FLAG_RTS = (1 << 23),
206 RTL818X_TX_DESC_FLAG_LS = (1 << 28),
207 RTL818X_TX_DESC_FLAG_FS = (1 << 29),
208 RTL818X_TX_DESC_FLAG_DMA = (1 << 30),
209 RTL818X_TX_DESC_FLAG_OWN = (1 << 31)
210};
211
212enum rtl818x_rx_desc_flags {
213 RTL818X_RX_DESC_FLAG_ICV_ERR = (1 << 12),
214 RTL818X_RX_DESC_FLAG_CRC32_ERR = (1 << 13),
215 RTL818X_RX_DESC_FLAG_PM = (1 << 14),
216 RTL818X_RX_DESC_FLAG_RX_ERR = (1 << 15),
217 RTL818X_RX_DESC_FLAG_BCAST = (1 << 16),
218 RTL818X_RX_DESC_FLAG_PAM = (1 << 17),
219 RTL818X_RX_DESC_FLAG_MCAST = (1 << 18),
220 RTL818X_RX_DESC_FLAG_QOS = (1 << 19), /* RTL8187(B) only */
221 RTL818X_RX_DESC_FLAG_TRSW = (1 << 24), /* RTL8187(B) only */
222 RTL818X_RX_DESC_FLAG_SPLCP = (1 << 25),
223 RTL818X_RX_DESC_FLAG_FOF = (1 << 26),
224 RTL818X_RX_DESC_FLAG_DMA_FAIL = (1 << 27),
225 RTL818X_RX_DESC_FLAG_LS = (1 << 28),
226 RTL818X_RX_DESC_FLAG_FS = (1 << 29),
227 RTL818X_RX_DESC_FLAG_EOR = (1 << 30),
228 RTL818X_RX_DESC_FLAG_OWN = (1 << 31)
229};
230
196#endif /* RTL818X_H */ 231#endif /* RTL818X_H */
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 98df9bc7836a..67b26d3c3cd5 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -25,7 +25,6 @@
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/firmware.h>
29#include <pcmcia/cs_types.h> 28#include <pcmcia/cs_types.h>
30#include <pcmcia/cs.h> 29#include <pcmcia/cs.h>
31#include <pcmcia/cistpl.h> 30#include <pcmcia/cistpl.h>
@@ -34,9 +33,6 @@
34 33
35#include "orinoco.h" 34#include "orinoco.h"
36 35
37static const char primary_fw_name[] = "symbol_sp24t_prim_fw";
38static const char secondary_fw_name[] = "symbol_sp24t_sec_fw";
39
40/********************************************************************/ 36/********************************************************************/
41/* Module stuff */ 37/* Module stuff */
42/********************************************************************/ 38/********************************************************************/
@@ -71,161 +67,11 @@ struct orinoco_pccard {
71static int spectrum_cs_config(struct pcmcia_device *link); 67static int spectrum_cs_config(struct pcmcia_device *link);
72static void spectrum_cs_release(struct pcmcia_device *link); 68static void spectrum_cs_release(struct pcmcia_device *link);
73 69
74/********************************************************************/
75/* Firmware downloader */
76/********************************************************************/
77
78/* Position of PDA in the adapter memory */
79#define EEPROM_ADDR 0x3000
80#define EEPROM_LEN 0x200
81#define PDA_OFFSET 0x100
82
83#define PDA_ADDR (EEPROM_ADDR + PDA_OFFSET)
84#define PDA_WORDS ((EEPROM_LEN - PDA_OFFSET) / 2)
85
86/* Constants for the CISREG_CCSR register */ 70/* Constants for the CISREG_CCSR register */
87#define HCR_RUN 0x07 /* run firmware after reset */ 71#define HCR_RUN 0x07 /* run firmware after reset */
88#define HCR_IDLE 0x0E /* don't run firmware after reset */ 72#define HCR_IDLE 0x0E /* don't run firmware after reset */
89#define HCR_MEM16 0x10 /* memory width bit, should be preserved */ 73#define HCR_MEM16 0x10 /* memory width bit, should be preserved */
90 74
91/*
92 * AUX port access. To unlock the AUX port write the access keys to the
93 * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
94 * register. Then read it and make sure it's HERMES_AUX_ENABLED.
95 */
96#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
97#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
98#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
99
100#define HERMES_AUX_PW0 0xFE01
101#define HERMES_AUX_PW1 0xDC23
102#define HERMES_AUX_PW2 0xBA45
103
104/* End markers */
105#define PDI_END 0x00000000 /* End of PDA */
106#define BLOCK_END 0xFFFFFFFF /* Last image block */
107#define TEXT_END 0x1A /* End of text header */
108
109/*
110 * The following structures have little-endian fields denoted by
111 * the leading underscore. Don't access them directly - use inline
112 * functions defined below.
113 */
114
115/*
116 * The binary image to be downloaded consists of series of data blocks.
117 * Each block has the following structure.
118 */
119struct dblock {
120 __le32 addr; /* adapter address where to write the block */
121 __le16 len; /* length of the data only, in bytes */
122 char data[0]; /* data to be written */
123} __attribute__ ((packed));
124
125/*
126 * Plug Data References are located in in the image after the last data
127 * block. They refer to areas in the adapter memory where the plug data
128 * items with matching ID should be written.
129 */
130struct pdr {
131 __le32 id; /* record ID */
132 __le32 addr; /* adapter address where to write the data */
133 __le32 len; /* expected length of the data, in bytes */
134 char next[0]; /* next PDR starts here */
135} __attribute__ ((packed));
136
137
138/*
139 * Plug Data Items are located in the EEPROM read from the adapter by
140 * primary firmware. They refer to the device-specific data that should
141 * be plugged into the secondary firmware.
142 */
143struct pdi {
144 __le16 len; /* length of ID and data, in words */
145 __le16 id; /* record ID */
146 char data[0]; /* plug data */
147} __attribute__ ((packed));
148
149
150/* Functions for access to little-endian data */
151static inline u32
152dblock_addr(const struct dblock *blk)
153{
154 return le32_to_cpu(blk->addr);
155}
156
157static inline u32
158dblock_len(const struct dblock *blk)
159{
160 return le16_to_cpu(blk->len);
161}
162
163static inline u32
164pdr_id(const struct pdr *pdr)
165{
166 return le32_to_cpu(pdr->id);
167}
168
169static inline u32
170pdr_addr(const struct pdr *pdr)
171{
172 return le32_to_cpu(pdr->addr);
173}
174
175static inline u32
176pdr_len(const struct pdr *pdr)
177{
178 return le32_to_cpu(pdr->len);
179}
180
181static inline u32
182pdi_id(const struct pdi *pdi)
183{
184 return le16_to_cpu(pdi->id);
185}
186
187/* Return length of the data only, in bytes */
188static inline u32
189pdi_len(const struct pdi *pdi)
190{
191 return 2 * (le16_to_cpu(pdi->len) - 1);
192}
193
194
195/* Set address of the auxiliary port */
196static inline void
197spectrum_aux_setaddr(hermes_t *hw, u32 addr)
198{
199 hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
200 hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
201}
202
203
204/* Open access to the auxiliary port */
205static int
206spectrum_aux_open(hermes_t *hw)
207{
208 int i;
209
210 /* Already open? */
211 if (hermes_read_reg(hw, HERMES_CONTROL) == HERMES_AUX_ENABLED)
212 return 0;
213
214 hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
215 hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
216 hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
217 hermes_write_reg(hw, HERMES_CONTROL, HERMES_AUX_ENABLE);
218
219 for (i = 0; i < 20; i++) {
220 udelay(10);
221 if (hermes_read_reg(hw, HERMES_CONTROL) ==
222 HERMES_AUX_ENABLED)
223 return 0;
224 }
225
226 return -EBUSY;
227}
228
229 75
230#define CS_CHECK(fn, ret) \ 76#define CS_CHECK(fn, ret) \
231 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) 77 do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
@@ -292,275 +138,29 @@ spectrum_reset(struct pcmcia_device *link, int idle)
292 return -ENODEV; 138 return -ENODEV;
293} 139}
294 140
141/********************************************************************/
142/* Device methods */
143/********************************************************************/
295 144
296/*
297 * Scan PDR for the record with the specified RECORD_ID.
298 * If it's not found, return NULL.
299 */
300static struct pdr *
301spectrum_find_pdr(struct pdr *first_pdr, u32 record_id)
302{
303 struct pdr *pdr = first_pdr;
304
305 while (pdr_id(pdr) != PDI_END) {
306 /*
307 * PDR area is currently not terminated by PDI_END.
308 * It's followed by CRC records, which have the type
309 * field where PDR has length. The type can be 0 or 1.
310 */
311 if (pdr_len(pdr) < 2)
312 return NULL;
313
314 /* If the record ID matches, we are done */
315 if (pdr_id(pdr) == record_id)
316 return pdr;
317
318 pdr = (struct pdr *) pdr->next;
319 }
320 return NULL;
321}
322
323
324/* Process one Plug Data Item - find corresponding PDR and plug it */
325static int
326spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi)
327{
328 struct pdr *pdr;
329
330 /* Find the PDI corresponding to this PDR */
331 pdr = spectrum_find_pdr(first_pdr, pdi_id(pdi));
332
333 /* No match is found, safe to ignore */
334 if (!pdr)
335 return 0;
336
337 /* Lengths of the data in PDI and PDR must match */
338 if (pdi_len(pdi) != pdr_len(pdr))
339 return -EINVAL;
340
341 /* do the actual plugging */
342 spectrum_aux_setaddr(hw, pdr_addr(pdr));
343 hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
344
345 return 0;
346}
347
348
349/* Read PDA from the adapter */
350static int
351spectrum_read_pda(hermes_t *hw, __le16 *pda, int pda_len)
352{
353 int ret;
354 int pda_size;
355
356 /* Issue command to read EEPROM */
357 ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
358 if (ret)
359 return ret;
360
361 /* Open auxiliary port */
362 ret = spectrum_aux_open(hw);
363 if (ret)
364 return ret;
365
366 /* read PDA from EEPROM */
367 spectrum_aux_setaddr(hw, PDA_ADDR);
368 hermes_read_words(hw, HERMES_AUXDATA, pda, pda_len / 2);
369
370 /* Check PDA length */
371 pda_size = le16_to_cpu(pda[0]);
372 if (pda_size > pda_len)
373 return -EINVAL;
374
375 return 0;
376}
377
378
379/* Parse PDA and write the records into the adapter */
380static int
381spectrum_apply_pda(hermes_t *hw, const struct dblock *first_block,
382 __le16 *pda)
383{
384 int ret;
385 struct pdi *pdi;
386 struct pdr *first_pdr;
387 const struct dblock *blk = first_block;
388
389 /* Skip all blocks to locate Plug Data References */
390 while (dblock_addr(blk) != BLOCK_END)
391 blk = (struct dblock *) &blk->data[dblock_len(blk)];
392
393 first_pdr = (struct pdr *) blk;
394
395 /* Go through every PDI and plug them into the adapter */
396 pdi = (struct pdi *) (pda + 2);
397 while (pdi_id(pdi) != PDI_END) {
398 ret = spectrum_plug_pdi(hw, first_pdr, pdi);
399 if (ret)
400 return ret;
401
402 /* Increment to the next PDI */
403 pdi = (struct pdi *) &pdi->data[pdi_len(pdi)];
404 }
405 return 0;
406}
407
408
409/* Load firmware blocks into the adapter */
410static int
411spectrum_load_blocks(hermes_t *hw, const struct dblock *first_block)
412{
413 const struct dblock *blk;
414 u32 blkaddr;
415 u32 blklen;
416
417 blk = first_block;
418 blkaddr = dblock_addr(blk);
419 blklen = dblock_len(blk);
420
421 while (dblock_addr(blk) != BLOCK_END) {
422 spectrum_aux_setaddr(hw, blkaddr);
423 hermes_write_bytes(hw, HERMES_AUXDATA, blk->data,
424 blklen);
425
426 blk = (struct dblock *) &blk->data[blklen];
427 blkaddr = dblock_addr(blk);
428 blklen = dblock_len(blk);
429 }
430 return 0;
431}
432
433
434/*
435 * Process a firmware image - stop the card, load the firmware, reset
436 * the card and make sure it responds. For the secondary firmware take
437 * care of the PDA - read it and then write it on top of the firmware.
438 */
439static int 145static int
440spectrum_dl_image(hermes_t *hw, struct pcmcia_device *link, 146spectrum_cs_hard_reset(struct orinoco_private *priv)
441 const unsigned char *image, int secondary)
442{ 147{
443 int ret; 148 struct orinoco_pccard *card = priv->card;
444 const unsigned char *ptr; 149 struct pcmcia_device *link = card->p_dev;
445 const struct dblock *first_block;
446
447 /* Plug Data Area (PDA) */
448 __le16 pda[PDA_WORDS];
449
450 /* Binary block begins after the 0x1A marker */
451 ptr = image;
452 while (*ptr++ != TEXT_END);
453 first_block = (const struct dblock *) ptr;
454
455 /* Read the PDA */
456 if (secondary) {
457 ret = spectrum_read_pda(hw, pda, sizeof(pda));
458 if (ret)
459 return ret;
460 }
461
462 /* Stop the firmware, so that it can be safely rewritten */
463 ret = spectrum_reset(link, 1);
464 if (ret)
465 return ret;
466
467 /* Program the adapter with new firmware */
468 ret = spectrum_load_blocks(hw, first_block);
469 if (ret)
470 return ret;
471
472 /* Write the PDA to the adapter */
473 if (secondary) {
474 ret = spectrum_apply_pda(hw, first_block, pda);
475 if (ret)
476 return ret;
477 }
478
479 /* Run the firmware */
480 ret = spectrum_reset(link, 0);
481 if (ret)
482 return ret;
483
484 /* Reset hermes chip and make sure it responds */
485 ret = hermes_init(hw);
486
487 /* hermes_reset() should return 0 with the secondary firmware */
488 if (secondary && ret != 0)
489 return -ENODEV;
490 150
491 /* And this should work with any firmware */ 151 /* Soft reset using COR and HCR */
492 if (!hermes_present(hw)) 152 spectrum_reset(link, 0);
493 return -ENODEV;
494 153
495 return 0; 154 return 0;
496} 155}
497 156
498
499/*
500 * Download the firmware into the card, this also does a PCMCIA soft
501 * reset on the card, to make sure it's in a sane state.
502 */
503static int 157static int
504spectrum_dl_firmware(hermes_t *hw, struct pcmcia_device *link) 158spectrum_cs_stop_firmware(struct orinoco_private *priv, int idle)
505{
506 int ret;
507 const struct firmware *fw_entry;
508
509 if (request_firmware(&fw_entry, primary_fw_name,
510 &handle_to_dev(link)) != 0) {
511 printk(KERN_ERR PFX "Cannot find firmware: %s\n",
512 primary_fw_name);
513 return -ENOENT;
514 }
515
516 /* Load primary firmware */
517 ret = spectrum_dl_image(hw, link, fw_entry->data, 0);
518 release_firmware(fw_entry);
519 if (ret) {
520 printk(KERN_ERR PFX "Primary firmware download failed\n");
521 return ret;
522 }
523
524 if (request_firmware(&fw_entry, secondary_fw_name,
525 &handle_to_dev(link)) != 0) {
526 printk(KERN_ERR PFX "Cannot find firmware: %s\n",
527 secondary_fw_name);
528 return -ENOENT;
529 }
530
531 /* Load secondary firmware */
532 ret = spectrum_dl_image(hw, link, fw_entry->data, 1);
533 release_firmware(fw_entry);
534 if (ret) {
535 printk(KERN_ERR PFX "Secondary firmware download failed\n");
536 }
537
538 return ret;
539}
540
541/********************************************************************/
542/* Device methods */
543/********************************************************************/
544
545static int
546spectrum_cs_hard_reset(struct orinoco_private *priv)
547{ 159{
548 struct orinoco_pccard *card = priv->card; 160 struct orinoco_pccard *card = priv->card;
549 struct pcmcia_device *link = card->p_dev; 161 struct pcmcia_device *link = card->p_dev;
550 int err;
551 162
552 if (!hermes_present(&priv->hw)) { 163 return spectrum_reset(link, idle);
553 /* The firmware needs to be reloaded */
554 if (spectrum_dl_firmware(&priv->hw, link) != 0) {
555 printk(KERN_ERR PFX "Firmware download failed\n");
556 err = -ENODEV;
557 }
558 } else {
559 /* Soft reset using COR and HCR */
560 spectrum_reset(link, 0);
561 }
562
563 return 0;
564} 164}
565 165
566/********************************************************************/ 166/********************************************************************/
@@ -582,7 +182,9 @@ spectrum_cs_probe(struct pcmcia_device *link)
582 struct orinoco_private *priv; 182 struct orinoco_private *priv;
583 struct orinoco_pccard *card; 183 struct orinoco_pccard *card;
584 184
585 dev = alloc_orinocodev(sizeof(*card), spectrum_cs_hard_reset); 185 dev = alloc_orinocodev(sizeof(*card), &handle_to_dev(link),
186 spectrum_cs_hard_reset,
187 spectrum_cs_stop_firmware);
586 if (! dev) 188 if (! dev)
587 return -ENOMEM; 189 return -ENOMEM;
588 priv = netdev_priv(dev); 190 priv = netdev_priv(dev);
@@ -593,7 +195,7 @@ spectrum_cs_probe(struct pcmcia_device *link)
593 link->priv = dev; 195 link->priv = dev;
594 196
595 /* Interrupt setup */ 197 /* Interrupt setup */
596 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 198 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
597 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 199 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
598 link->irq.Handler = orinoco_interrupt; 200 link->irq.Handler = orinoco_interrupt;
599 link->irq.Instance = dev; 201 link->irq.Instance = dev;
@@ -784,7 +386,7 @@ spectrum_cs_config(struct pcmcia_device *link)
784 dev->irq = link->irq.AssignedIRQ; 386 dev->irq = link->irq.AssignedIRQ;
785 card->node.major = card->node.minor = 0; 387 card->node.major = card->node.minor = 0;
786 388
787 /* Reset card and download firmware */ 389 /* Reset card */
788 if (spectrum_cs_hard_reset(priv) != 0) { 390 if (spectrum_cs_hard_reset(priv) != 0) {
789 goto failed; 391 goto failed;
790 } 392 }
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 00a3559e5aa4..b5de38a9b791 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -4496,7 +4496,7 @@ wavelan_probe(struct pcmcia_device *p_dev)
4496 p_dev->io.IOAddrLines = 3; 4496 p_dev->io.IOAddrLines = 3;
4497 4497
4498 /* Interrupt setup */ 4498 /* Interrupt setup */
4499 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 4499 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
4500 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; 4500 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
4501 p_dev->irq.Handler = wavelan_interrupt; 4501 p_dev->irq.Handler = wavelan_interrupt;
4502 4502
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 377141995e36..74a5ad2f1223 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -79,7 +79,7 @@ static int pc_debug = PCMCIA_DEBUG;
79module_param(pc_debug, int, 0); 79module_param(pc_debug, int, 0);
80#define dprintk(n, format, args...) \ 80#define dprintk(n, format, args...) \
81 { if (pc_debug > (n)) \ 81 { if (pc_debug > (n)) \
82 printk(KERN_INFO "%s: " format "\n", __FUNCTION__ , ##args); } 82 printk(KERN_INFO "%s: " format "\n", __func__ , ##args); }
83#else 83#else
84#define dprintk(n, format, args...) 84#define dprintk(n, format, args...)
85#endif 85#endif
@@ -470,7 +470,7 @@ static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend)
470 spin_unlock_irqrestore(&this->lock, flags); 470 spin_unlock_irqrestore(&this->lock, flags);
471 rc = wait_event_interruptible(this->wait, 471 rc = wait_event_interruptible(this->wait,
472 this->sig_pwr_mgmt_confirm.status != 255); 472 this->sig_pwr_mgmt_confirm.status != 255);
473 printk(KERN_INFO "%s: %s status=%d\n", __FUNCTION__, 473 printk(KERN_INFO "%s: %s status=%d\n", __func__,
474 suspend ? "suspend" : "resume", 474 suspend ? "suspend" : "resume",
475 this->sig_pwr_mgmt_confirm.status); 475 this->sig_pwr_mgmt_confirm.status);
476 goto out; 476 goto out;
@@ -1199,7 +1199,7 @@ static int wl3501_reset_board(struct wl3501_card *this)
1199 } 1199 }
1200 WL3501_NOPLOOP(10); 1200 WL3501_NOPLOOP(10);
1201 } 1201 }
1202 printk(KERN_WARNING "%s: failed to reset the board!\n", __FUNCTION__); 1202 printk(KERN_WARNING "%s: failed to reset the board!\n", __func__);
1203 rc = -ENODEV; 1203 rc = -ENODEV;
1204out: 1204out:
1205 return rc; 1205 return rc;
@@ -1250,7 +1250,7 @@ static int wl3501_init_firmware(struct wl3501_card *this)
1250out: 1250out:
1251 return rc; 1251 return rc;
1252fail: 1252fail:
1253 printk(KERN_WARNING "%s: failed!\n", __FUNCTION__); 1253 printk(KERN_WARNING "%s: failed!\n", __func__);
1254 goto out; 1254 goto out;
1255} 1255}
1256 1256
@@ -1917,7 +1917,7 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1917 p_dev->io.IOAddrLines = 5; 1917 p_dev->io.IOAddrLines = 5;
1918 1918
1919 /* Interrupt setup */ 1919 /* Interrupt setup */
1920 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 1920 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
1921 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID; 1921 p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
1922 p_dev->irq.Handler = wl3501_interrupt; 1922 p_dev->irq.Handler = wl3501_interrupt;
1923 1923
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index cc36126cee88..1907eafb9b16 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_ZD1211RW) += zd1211rw.o 1obj-$(CONFIG_ZD1211RW) += zd1211rw.o
2 2
3zd1211rw-objs := zd_chip.o zd_ieee80211.o zd_mac.o \ 3zd1211rw-objs := zd_chip.o zd_mac.o \
4 zd_rf_al2230.o zd_rf_rf2959.o \ 4 zd_rf_al2230.o zd_rf_rf2959.o \
5 zd_rf_al7230b.o zd_rf_uw2453.o \ 5 zd_rf_al7230b.o zd_rf_uw2453.o \
6 zd_rf.o zd_usb.o 6 zd_rf.o zd_usb.o
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 0acb5c345734..e0ac58b8ff1f 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -28,7 +28,6 @@
28 28
29#include "zd_def.h" 29#include "zd_def.h"
30#include "zd_chip.h" 30#include "zd_chip.h"
31#include "zd_ieee80211.h"
32#include "zd_mac.h" 31#include "zd_mac.h"
33#include "zd_rf.h" 32#include "zd_rf.h"
34 33
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
deleted file mode 100644
index d8dc41ec0e5d..000000000000
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/* ZD1211 USB-WLAN driver for Linux
2 *
3 * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
4 * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * In the long term, we'll probably find a better way of handling regulatory
23 * requirements outside of the driver.
24 */
25
26#include <linux/kernel.h>
27#include <net/mac80211.h>
28
29#include "zd_ieee80211.h"
30#include "zd_mac.h"
31
32struct channel_range {
33 u8 regdomain;
34 u8 start;
35 u8 end; /* exclusive (channel must be less than end) */
36};
37
38static const struct channel_range channel_ranges[] = {
39 { ZD_REGDOMAIN_FCC, 1, 12 },
40 { ZD_REGDOMAIN_IC, 1, 12 },
41 { ZD_REGDOMAIN_ETSI, 1, 14 },
42 { ZD_REGDOMAIN_JAPAN, 1, 14 },
43 { ZD_REGDOMAIN_SPAIN, 1, 14 },
44 { ZD_REGDOMAIN_FRANCE, 1, 14 },
45
46 /* Japan originally only had channel 14 available (see CHNL_ID 0x40 in
47 * 802.11). However, in 2001 the range was extended to include channels
48 * 1-13. The ZyDAS devices still use the old region code but are
49 * designed to allow the extra channel access in Japan. */
50 { ZD_REGDOMAIN_JAPAN_ADD, 1, 15 },
51};
52
53static const struct channel_range *zd_channel_range(u8 regdomain)
54{
55 int i;
56 for (i = 0; i < ARRAY_SIZE(channel_ranges); i++) {
57 const struct channel_range *range = &channel_ranges[i];
58 if (range->regdomain == regdomain)
59 return range;
60 }
61 return NULL;
62}
63
64#define CHAN_TO_IDX(chan) ((chan) - 1)
65
66static void unmask_bg_channels(struct ieee80211_hw *hw,
67 const struct channel_range *range,
68 struct ieee80211_supported_band *sband)
69{
70 u8 channel;
71
72 for (channel = range->start; channel < range->end; channel++) {
73 struct ieee80211_channel *chan =
74 &sband->channels[CHAN_TO_IDX(channel)];
75 chan->flags = 0;
76 }
77}
78
79void zd_geo_init(struct ieee80211_hw *hw, u8 regdomain)
80{
81 struct zd_mac *mac = zd_hw_mac(hw);
82 const struct channel_range *range;
83
84 dev_dbg(zd_mac_dev(mac), "regdomain %#02x\n", regdomain);
85
86 range = zd_channel_range(regdomain);
87 if (!range) {
88 /* The vendor driver overrides the regulatory domain and
89 * allowed channel registers and unconditionally restricts
90 * available channels to 1-11 everywhere. Match their
91 * questionable behaviour only for regdomains which we don't
92 * recognise. */
93 dev_warn(zd_mac_dev(mac), "Unrecognised regulatory domain: "
94 "%#02x. Defaulting to FCC.\n", regdomain);
95 range = zd_channel_range(ZD_REGDOMAIN_FCC);
96 }
97
98 unmask_bg_channels(hw, range, &mac->band);
99}
100
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
deleted file mode 100644
index 26b79f197587..000000000000
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h
+++ /dev/null
@@ -1,95 +0,0 @@
1/* ZD1211 USB-WLAN driver for Linux
2 *
3 * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
4 * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef _ZD_IEEE80211_H
22#define _ZD_IEEE80211_H
23
24#include <net/mac80211.h>
25
26/* Additional definitions from the standards.
27 */
28
29#define ZD_REGDOMAIN_FCC 0x10
30#define ZD_REGDOMAIN_IC 0x20
31#define ZD_REGDOMAIN_ETSI 0x30
32#define ZD_REGDOMAIN_SPAIN 0x31
33#define ZD_REGDOMAIN_FRANCE 0x32
34#define ZD_REGDOMAIN_JAPAN_ADD 0x40
35#define ZD_REGDOMAIN_JAPAN 0x41
36
37enum {
38 MIN_CHANNEL24 = 1,
39 MAX_CHANNEL24 = 14,
40};
41
42void zd_geo_init(struct ieee80211_hw *hw, u8 regdomain);
43
44#define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80
45
46struct ofdm_plcp_header {
47 u8 prefix[3];
48 __le16 service;
49} __attribute__((packed));
50
51static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
52{
53 return header->prefix[0] & 0xf;
54}
55
56/* The following defines give the encoding of the 4-bit rate field in the
57 * OFDM (802.11a/802.11g) PLCP header. Notify that these values are used to
58 * define the zd-rate values for OFDM.
59 *
60 * See the struct zd_ctrlset definition in zd_mac.h.
61 */
62#define ZD_OFDM_PLCP_RATE_6M 0xb
63#define ZD_OFDM_PLCP_RATE_9M 0xf
64#define ZD_OFDM_PLCP_RATE_12M 0xa
65#define ZD_OFDM_PLCP_RATE_18M 0xe
66#define ZD_OFDM_PLCP_RATE_24M 0x9
67#define ZD_OFDM_PLCP_RATE_36M 0xd
68#define ZD_OFDM_PLCP_RATE_48M 0x8
69#define ZD_OFDM_PLCP_RATE_54M 0xc
70
71struct cck_plcp_header {
72 u8 signal;
73 u8 service;
74 __le16 length;
75 __le16 crc16;
76} __attribute__((packed));
77
78static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
79{
80 return header->signal;
81}
82
83/* These defines give the encodings of the signal field in the 802.11b PLCP
84 * header. The signal field gives the bit rate of the following packet. Even
85 * if technically wrong we use CCK here also for the 1 MBit/s and 2 MBit/s
86 * rate to stay consistent with Zydas and our use of the term.
87 *
88 * Notify that these values are *not* used in the zd-rates.
89 */
90#define ZD_CCK_PLCP_SIGNAL_1M 0x0a
91#define ZD_CCK_PLCP_SIGNAL_2M 0x14
92#define ZD_CCK_PLCP_SIGNAL_5M5 0x37
93#define ZD_CCK_PLCP_SIGNAL_11M 0x6e
94
95#endif /* _ZD_IEEE80211_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 4d7b98b05030..fe1867b25ff7 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -3,7 +3,7 @@
3 * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de> 3 * Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
4 * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org> 4 * Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
5 * Copyright (C) 2006-2007 Michael Wu <flamingice@sourmilk.net> 5 * Copyright (C) 2006-2007 Michael Wu <flamingice@sourmilk.net>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> 6 * Copyright (C) 2007-2008 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -29,9 +29,23 @@
29#include "zd_def.h" 29#include "zd_def.h"
30#include "zd_chip.h" 30#include "zd_chip.h"
31#include "zd_mac.h" 31#include "zd_mac.h"
32#include "zd_ieee80211.h"
33#include "zd_rf.h" 32#include "zd_rf.h"
34 33
34struct zd_reg_alpha2_map {
35 u32 reg;
36 char alpha2[2];
37};
38
39static struct zd_reg_alpha2_map reg_alpha2_map[] = {
40 { ZD_REGDOMAIN_FCC, "US" },
41 { ZD_REGDOMAIN_IC, "CA" },
42 { ZD_REGDOMAIN_ETSI, "DE" }, /* Generic ETSI, use most restrictive */
43 { ZD_REGDOMAIN_JAPAN, "JP" },
44 { ZD_REGDOMAIN_JAPAN_ADD, "JP" },
45 { ZD_REGDOMAIN_SPAIN, "ES" },
46 { ZD_REGDOMAIN_FRANCE, "FR" },
47};
48
35/* This table contains the hardware specific values for the modulation rates. */ 49/* This table contains the hardware specific values for the modulation rates. */
36static const struct ieee80211_rate zd_rates[] = { 50static const struct ieee80211_rate zd_rates[] = {
37 { .bitrate = 10, 51 { .bitrate = 10,
@@ -95,6 +109,21 @@ static void housekeeping_init(struct zd_mac *mac);
95static void housekeeping_enable(struct zd_mac *mac); 109static void housekeeping_enable(struct zd_mac *mac);
96static void housekeeping_disable(struct zd_mac *mac); 110static void housekeeping_disable(struct zd_mac *mac);
97 111
112static int zd_reg2alpha2(u8 regdomain, char *alpha2)
113{
114 unsigned int i;
115 struct zd_reg_alpha2_map *reg_map;
116 for (i = 0; i < ARRAY_SIZE(reg_alpha2_map); i++) {
117 reg_map = &reg_alpha2_map[i];
118 if (regdomain == reg_map->reg) {
119 alpha2[0] = reg_map->alpha2[0];
120 alpha2[1] = reg_map->alpha2[1];
121 return 0;
122 }
123 }
124 return 1;
125}
126
98int zd_mac_preinit_hw(struct ieee80211_hw *hw) 127int zd_mac_preinit_hw(struct ieee80211_hw *hw)
99{ 128{
100 int r; 129 int r;
@@ -115,6 +144,7 @@ int zd_mac_init_hw(struct ieee80211_hw *hw)
115 int r; 144 int r;
116 struct zd_mac *mac = zd_hw_mac(hw); 145 struct zd_mac *mac = zd_hw_mac(hw);
117 struct zd_chip *chip = &mac->chip; 146 struct zd_chip *chip = &mac->chip;
147 char alpha2[2];
118 u8 default_regdomain; 148 u8 default_regdomain;
119 149
120 r = zd_chip_enable_int(chip); 150 r = zd_chip_enable_int(chip);
@@ -139,7 +169,9 @@ int zd_mac_init_hw(struct ieee80211_hw *hw)
139 if (r) 169 if (r)
140 goto disable_int; 170 goto disable_int;
141 171
142 zd_geo_init(hw, mac->regdomain); 172 r = zd_reg2alpha2(mac->regdomain, alpha2);
173 if (!r)
174 regulatory_hint(hw->wiphy, alpha2, NULL);
143 175
144 r = 0; 176 r = 0;
145disable_int: 177disable_int:
@@ -579,7 +611,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
579 611
580 q = &zd_hw_mac(hw)->ack_wait_queue; 612 q = &zd_hw_mac(hw)->ack_wait_queue;
581 spin_lock_irqsave(&q->lock, flags); 613 spin_lock_irqsave(&q->lock, flags);
582 for (skb = q->next; skb != (struct sk_buff *)q; skb = skb->next) { 614 skb_queue_walk(q, skb) {
583 struct ieee80211_hdr *tx_hdr; 615 struct ieee80211_hdr *tx_hdr;
584 616
585 tx_hdr = (struct ieee80211_hdr *)skb->data; 617 tx_hdr = (struct ieee80211_hdr *)skb->data;
@@ -684,15 +716,15 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
684{ 716{
685 struct zd_mac *mac = zd_hw_mac(hw); 717 struct zd_mac *mac = zd_hw_mac(hw);
686 718
687 /* using IEEE80211_IF_TYPE_INVALID to indicate no mode selected */ 719 /* using NL80211_IFTYPE_UNSPECIFIED to indicate no mode selected */
688 if (mac->type != IEEE80211_IF_TYPE_INVALID) 720 if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
689 return -EOPNOTSUPP; 721 return -EOPNOTSUPP;
690 722
691 switch (conf->type) { 723 switch (conf->type) {
692 case IEEE80211_IF_TYPE_MNTR: 724 case NL80211_IFTYPE_MONITOR:
693 case IEEE80211_IF_TYPE_MESH_POINT: 725 case NL80211_IFTYPE_MESH_POINT:
694 case IEEE80211_IF_TYPE_STA: 726 case NL80211_IFTYPE_STATION:
695 case IEEE80211_IF_TYPE_IBSS: 727 case NL80211_IFTYPE_ADHOC:
696 mac->type = conf->type; 728 mac->type = conf->type;
697 break; 729 break;
698 default: 730 default:
@@ -706,7 +738,7 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
706 struct ieee80211_if_init_conf *conf) 738 struct ieee80211_if_init_conf *conf)
707{ 739{
708 struct zd_mac *mac = zd_hw_mac(hw); 740 struct zd_mac *mac = zd_hw_mac(hw);
709 mac->type = IEEE80211_IF_TYPE_INVALID; 741 mac->type = NL80211_IFTYPE_UNSPECIFIED;
710 zd_set_beacon_interval(&mac->chip, 0); 742 zd_set_beacon_interval(&mac->chip, 0);
711 zd_write_mac_addr(&mac->chip, NULL); 743 zd_write_mac_addr(&mac->chip, NULL);
712} 744}
@@ -725,8 +757,8 @@ static int zd_op_config_interface(struct ieee80211_hw *hw,
725 int associated; 757 int associated;
726 int r; 758 int r;
727 759
728 if (mac->type == IEEE80211_IF_TYPE_MESH_POINT || 760 if (mac->type == NL80211_IFTYPE_MESH_POINT ||
729 mac->type == IEEE80211_IF_TYPE_IBSS) { 761 mac->type == NL80211_IFTYPE_ADHOC) {
730 associated = true; 762 associated = true;
731 if (conf->changed & IEEE80211_IFCC_BEACON) { 763 if (conf->changed & IEEE80211_IFCC_BEACON) {
732 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 764 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
@@ -753,7 +785,7 @@ static int zd_op_config_interface(struct ieee80211_hw *hw,
753 return 0; 785 return 0;
754} 786}
755 787
756void zd_process_intr(struct work_struct *work) 788static void zd_process_intr(struct work_struct *work)
757{ 789{
758 u16 int_status; 790 u16 int_status;
759 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); 791 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
@@ -923,7 +955,7 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
923 spin_lock_init(&mac->lock); 955 spin_lock_init(&mac->lock);
924 mac->hw = hw; 956 mac->hw = hw;
925 957
926 mac->type = IEEE80211_IF_TYPE_INVALID; 958 mac->type = NL80211_IFTYPE_UNSPECIFIED;
927 959
928 memcpy(mac->channels, zd_channels, sizeof(zd_channels)); 960 memcpy(mac->channels, zd_channels, sizeof(zd_channels));
929 memcpy(mac->rates, zd_rates, sizeof(zd_rates)); 961 memcpy(mac->rates, zd_rates, sizeof(zd_rates));
@@ -937,6 +969,11 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
937 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 969 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
938 IEEE80211_HW_SIGNAL_DB; 970 IEEE80211_HW_SIGNAL_DB;
939 971
972 hw->wiphy->interface_modes =
973 BIT(NL80211_IFTYPE_MESH_POINT) |
974 BIT(NL80211_IFTYPE_STATION) |
975 BIT(NL80211_IFTYPE_ADHOC);
976
940 hw->max_signal = 100; 977 hw->max_signal = 100;
941 hw->queues = 1; 978 hw->queues = 1;
942 hw->extra_tx_headroom = sizeof(struct zd_ctrlset); 979 hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 18c1d56d3dd7..4c05d3ee4c37 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -25,7 +25,6 @@
25#include <net/mac80211.h> 25#include <net/mac80211.h>
26 26
27#include "zd_chip.h" 27#include "zd_chip.h"
28#include "zd_ieee80211.h"
29 28
30struct zd_ctrlset { 29struct zd_ctrlset {
31 u8 modulation; 30 u8 modulation;
@@ -187,6 +186,70 @@ struct zd_mac {
187 unsigned int pass_ctrl:1; 186 unsigned int pass_ctrl:1;
188}; 187};
189 188
189#define ZD_REGDOMAIN_FCC 0x10
190#define ZD_REGDOMAIN_IC 0x20
191#define ZD_REGDOMAIN_ETSI 0x30
192#define ZD_REGDOMAIN_SPAIN 0x31
193#define ZD_REGDOMAIN_FRANCE 0x32
194#define ZD_REGDOMAIN_JAPAN_ADD 0x40
195#define ZD_REGDOMAIN_JAPAN 0x41
196
197enum {
198 MIN_CHANNEL24 = 1,
199 MAX_CHANNEL24 = 14,
200};
201
202#define ZD_PLCP_SERVICE_LENGTH_EXTENSION 0x80
203
204struct ofdm_plcp_header {
205 u8 prefix[3];
206 __le16 service;
207} __attribute__((packed));
208
209static inline u8 zd_ofdm_plcp_header_rate(const struct ofdm_plcp_header *header)
210{
211 return header->prefix[0] & 0xf;
212}
213
214/* The following defines give the encoding of the 4-bit rate field in the
215 * OFDM (802.11a/802.11g) PLCP header. Notify that these values are used to
216 * define the zd-rate values for OFDM.
217 *
218 * See the struct zd_ctrlset definition in zd_mac.h.
219 */
220#define ZD_OFDM_PLCP_RATE_6M 0xb
221#define ZD_OFDM_PLCP_RATE_9M 0xf
222#define ZD_OFDM_PLCP_RATE_12M 0xa
223#define ZD_OFDM_PLCP_RATE_18M 0xe
224#define ZD_OFDM_PLCP_RATE_24M 0x9
225#define ZD_OFDM_PLCP_RATE_36M 0xd
226#define ZD_OFDM_PLCP_RATE_48M 0x8
227#define ZD_OFDM_PLCP_RATE_54M 0xc
228
229struct cck_plcp_header {
230 u8 signal;
231 u8 service;
232 __le16 length;
233 __le16 crc16;
234} __attribute__((packed));
235
236static inline u8 zd_cck_plcp_header_signal(const struct cck_plcp_header *header)
237{
238 return header->signal;
239}
240
241/* These defines give the encodings of the signal field in the 802.11b PLCP
242 * header. The signal field gives the bit rate of the following packet. Even
243 * if technically wrong we use CCK here also for the 1 MBit/s and 2 MBit/s
244 * rate to stay consistent with Zydas and our use of the term.
245 *
246 * Notify that these values are *not* used in the zd-rates.
247 */
248#define ZD_CCK_PLCP_SIGNAL_1M 0x0a
249#define ZD_CCK_PLCP_SIGNAL_2M 0x14
250#define ZD_CCK_PLCP_SIGNAL_5M5 0x37
251#define ZD_CCK_PLCP_SIGNAL_11M 0x6e
252
190static inline struct zd_mac *zd_hw_mac(struct ieee80211_hw *hw) 253static inline struct zd_mac *zd_hw_mac(struct ieee80211_hw *hw)
191{ 254{
192 return hw->priv; 255 return hw->priv;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index ec4129312813..7207bfd2e6cd 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -23,7 +23,7 @@
23 23
24#include "zd_def.h" 24#include "zd_def.h"
25#include "zd_rf.h" 25#include "zd_rf.h"
26#include "zd_ieee80211.h" 26#include "zd_mac.h"
27#include "zd_chip.h" 27#include "zd_chip.h"
28 28
29static const char * const rfs[] = { 29static const char * const rfs[] = {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c7ab1b864516..908f50b17e26 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -760,7 +760,7 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
760 if (sense[SENSE_COMMAND_REJECT_BYTE] & 760 if (sense[SENSE_COMMAND_REJECT_BYTE] &
761 SENSE_COMMAND_REJECT_FLAG) { 761 SENSE_COMMAND_REJECT_FLAG) {
762 QETH_DBF_TEXT(TRACE, 2, "CMDREJi"); 762 QETH_DBF_TEXT(TRACE, 2, "CMDREJi");
763 return 0; 763 return 1;
764 } 764 }
765 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 765 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
766 QETH_DBF_TEXT(TRACE, 2, "AFFE"); 766 QETH_DBF_TEXT(TRACE, 2, "AFFE");
@@ -884,6 +884,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
884 } 884 }
885 rc = qeth_get_problem(cdev, irb); 885 rc = qeth_get_problem(cdev, irb);
886 if (rc) { 886 if (rc) {
887 qeth_clear_ipacmd_list(card);
887 qeth_schedule_recovery(card); 888 qeth_schedule_recovery(card);
888 goto out; 889 goto out;
889 } 890 }
@@ -4147,6 +4148,7 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
4147 unsigned long flags; 4148 unsigned long flags;
4148 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 4149 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
4149 4150
4151 QETH_DBF_TEXT(SETUP, 2, "removedv");
4150 if (card->discipline.ccwgdriver) { 4152 if (card->discipline.ccwgdriver) {
4151 card->discipline.ccwgdriver->remove(gdev); 4153 card->discipline.ccwgdriver->remove(gdev);
4152 qeth_core_free_discipline(card); 4154 qeth_core_free_discipline(card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 3ac3cc1e03cc..955ba7a31b90 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -395,7 +395,8 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
395 } 395 }
396 if (card->state == CARD_STATE_SOFTSETUP) { 396 if (card->state == CARD_STATE_SOFTSETUP) {
397 qeth_l2_process_vlans(card, 1); 397 qeth_l2_process_vlans(card, 1);
398 qeth_l2_del_all_mc(card); 398 if (!card->use_hard_stop)
399 qeth_l2_del_all_mc(card);
399 qeth_clear_ipacmd_list(card); 400 qeth_clear_ipacmd_list(card);
400 card->state = CARD_STATE_HARDSETUP; 401 card->state = CARD_STATE_HARDSETUP;
401 } 402 }
@@ -559,7 +560,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
559 "device %s: x%x\n", CARD_BUS_ID(card), rc); 560 "device %s: x%x\n", CARD_BUS_ID(card), rc);
560 } 561 }
561 562
562 if (card->info.guestlan) { 563 if ((card->info.type == QETH_CARD_TYPE_IQD) ||
564 (card->info.guestlan)) {
563 rc = qeth_setadpparms_change_macaddr(card); 565 rc = qeth_setadpparms_change_macaddr(card);
564 if (rc) { 566 if (rc) {
565 QETH_DBF_MESSAGE(2, "couldn't get MAC address on " 567 QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
@@ -825,7 +827,6 @@ static int qeth_l2_open(struct net_device *dev)
825 } 827 }
826 card->data.state = CH_STATE_UP; 828 card->data.state = CH_STATE_UP;
827 card->state = CARD_STATE_UP; 829 card->state = CARD_STATE_UP;
828 card->dev->flags |= IFF_UP;
829 netif_start_queue(dev); 830 netif_start_queue(dev);
830 831
831 if (!card->lan_online && netif_carrier_ok(dev)) 832 if (!card->lan_online && netif_carrier_ok(dev))
@@ -840,7 +841,6 @@ static int qeth_l2_stop(struct net_device *dev)
840 841
841 QETH_DBF_TEXT(TRACE, 4, "qethstop"); 842 QETH_DBF_TEXT(TRACE, 4, "qethstop");
842 netif_tx_disable(dev); 843 netif_tx_disable(dev);
843 card->dev->flags &= ~IFF_UP;
844 if (card->state == CARD_STATE_UP) 844 if (card->state == CARD_STATE_UP)
845 card->state = CARD_STATE_SOFTSETUP; 845 card->state = CARD_STATE_SOFTSETUP;
846 return 0; 846 return 0;
@@ -1137,9 +1137,13 @@ static int qeth_l2_recover(void *ptr)
1137 if (!rc) 1137 if (!rc)
1138 PRINT_INFO("Device %s successfully recovered!\n", 1138 PRINT_INFO("Device %s successfully recovered!\n",
1139 CARD_BUS_ID(card)); 1139 CARD_BUS_ID(card));
1140 else 1140 else {
1141 rtnl_lock();
1142 dev_close(card->dev);
1143 rtnl_unlock();
1141 PRINT_INFO("Device %s could not be recovered!\n", 1144 PRINT_INFO("Device %s could not be recovered!\n",
1142 CARD_BUS_ID(card)); 1145 CARD_BUS_ID(card));
1146 }
1143 return 0; 1147 return 0;
1144} 1148}
1145 1149
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index dd72c3c20165..99547dea44de 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2795,7 +2795,6 @@ static int qeth_l3_open(struct net_device *dev)
2795 return -ENODEV; 2795 return -ENODEV;
2796 card->data.state = CH_STATE_UP; 2796 card->data.state = CH_STATE_UP;
2797 card->state = CARD_STATE_UP; 2797 card->state = CARD_STATE_UP;
2798 card->dev->flags |= IFF_UP;
2799 netif_start_queue(dev); 2798 netif_start_queue(dev);
2800 2799
2801 if (!card->lan_online && netif_carrier_ok(dev)) 2800 if (!card->lan_online && netif_carrier_ok(dev))
@@ -2809,7 +2808,6 @@ static int qeth_l3_stop(struct net_device *dev)
2809 2808
2810 QETH_DBF_TEXT(TRACE, 4, "qethstop"); 2809 QETH_DBF_TEXT(TRACE, 4, "qethstop");
2811 netif_tx_disable(dev); 2810 netif_tx_disable(dev);
2812 card->dev->flags &= ~IFF_UP;
2813 if (card->state == CARD_STATE_UP) 2811 if (card->state == CARD_STATE_UP)
2814 card->state = CARD_STATE_SOFTSETUP; 2812 card->state = CARD_STATE_SOFTSETUP;
2815 return 0; 2813 return 0;
@@ -3218,9 +3216,13 @@ static int qeth_l3_recover(void *ptr)
3218 if (!rc) 3216 if (!rc)
3219 PRINT_INFO("Device %s successfully recovered!\n", 3217 PRINT_INFO("Device %s successfully recovered!\n",
3220 CARD_BUS_ID(card)); 3218 CARD_BUS_ID(card));
3221 else 3219 else {
3220 rtnl_lock();
3221 dev_close(card->dev);
3222 rtnl_unlock();
3222 PRINT_INFO("Device %s could not be recovered!\n", 3223 PRINT_INFO("Device %s could not be recovered!\n",
3223 CARD_BUS_ID(card)); 3224 CARD_BUS_ID(card));
3225 }
3224 return 0; 3226 return 0;
3225} 3227}
3226 3228
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index f883dcfffe06..d5cde051806b 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -327,11 +327,9 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
327 s8 gain; 327 s8 gain;
328 u16 loc[3]; 328 u16 loc[3];
329 329
330 if (out->revision == 3) { /* rev 3 moved MAC */ 330 if (out->revision == 3) /* rev 3 moved MAC */
331 loc[0] = SSB_SPROM3_IL0MAC; 331 loc[0] = SSB_SPROM3_IL0MAC;
332 loc[1] = SSB_SPROM3_ET0MAC; 332 else {
333 loc[2] = SSB_SPROM3_ET1MAC;
334 } else {
335 loc[0] = SSB_SPROM1_IL0MAC; 333 loc[0] = SSB_SPROM1_IL0MAC;
336 loc[1] = SSB_SPROM1_ET0MAC; 334 loc[1] = SSB_SPROM1_ET0MAC;
337 loc[2] = SSB_SPROM1_ET1MAC; 335 loc[2] = SSB_SPROM1_ET1MAC;
@@ -340,13 +338,15 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
340 v = in[SPOFF(loc[0]) + i]; 338 v = in[SPOFF(loc[0]) + i];
341 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v); 339 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
342 } 340 }
343 for (i = 0; i < 3; i++) { 341 if (out->revision < 3) { /* only rev 1-2 have et0, et1 */
344 v = in[SPOFF(loc[1]) + i]; 342 for (i = 0; i < 3; i++) {
345 *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v); 343 v = in[SPOFF(loc[1]) + i];
346 } 344 *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
347 for (i = 0; i < 3; i++) { 345 }
348 v = in[SPOFF(loc[2]) + i]; 346 for (i = 0; i < 3; i++) {
349 *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v); 347 v = in[SPOFF(loc[2]) + i];
348 *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
349 }
350 } 350 }
351 SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0); 351 SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0);
352 SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A, 352 SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A,
@@ -399,30 +399,33 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
399 out->antenna_gain.ghz5.a3 = gain; 399 out->antenna_gain.ghz5.a3 = gain;
400} 400}
401 401
402static void sprom_extract_r4(struct ssb_sprom *out, const u16 *in) 402static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
403{ 403{
404 int i; 404 int i;
405 u16 v; 405 u16 v;
406 u16 il0mac_offset;
406 407
407 /* extract the equivalent of the r1 variables */ 408 if (out->revision == 4)
409 il0mac_offset = SSB_SPROM4_IL0MAC;
410 else
411 il0mac_offset = SSB_SPROM5_IL0MAC;
412 /* extract the MAC address */
408 for (i = 0; i < 3; i++) { 413 for (i = 0; i < 3; i++) {
409 v = in[SPOFF(SSB_SPROM4_IL0MAC) + i]; 414 v = in[SPOFF(il0mac_offset) + i];
410 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v); 415 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
411 } 416 }
412 for (i = 0; i < 3; i++) {
413 v = in[SPOFF(SSB_SPROM4_ET0MAC) + i];
414 *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
415 }
416 for (i = 0; i < 3; i++) {
417 v = in[SPOFF(SSB_SPROM4_ET1MAC) + i];
418 *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
419 }
420 SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0); 417 SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0);
421 SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A, 418 SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A,
422 SSB_SPROM4_ETHPHY_ET1A_SHIFT); 419 SSB_SPROM4_ETHPHY_ET1A_SHIFT);
423 SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0); 420 if (out->revision == 4) {
424 SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0); 421 SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0);
425 SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0); 422 SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0);
423 SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0);
424 } else {
425 SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0);
426 SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0);
427 SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0);
428 }
426 SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A, 429 SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A,
427 SSB_SPROM4_ANTAVAIL_A_SHIFT); 430 SSB_SPROM4_ANTAVAIL_A_SHIFT);
428 SPEX(ant_available_bg, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_BG, 431 SPEX(ant_available_bg, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_BG,
@@ -433,12 +436,21 @@ static void sprom_extract_r4(struct ssb_sprom *out, const u16 *in)
433 SPEX(maxpwr_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_MAXP_A_MASK, 0); 436 SPEX(maxpwr_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_MAXP_A_MASK, 0);
434 SPEX(itssi_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_ITSSI_A, 437 SPEX(itssi_a, SSB_SPROM4_MAXP_A, SSB_SPROM4_ITSSI_A,
435 SSB_SPROM4_ITSSI_A_SHIFT); 438 SSB_SPROM4_ITSSI_A_SHIFT);
436 SPEX(gpio0, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P0, 0); 439 if (out->revision == 4) {
437 SPEX(gpio1, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P1, 440 SPEX(gpio0, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P0, 0);
438 SSB_SPROM4_GPIOA_P1_SHIFT); 441 SPEX(gpio1, SSB_SPROM4_GPIOA, SSB_SPROM4_GPIOA_P1,
439 SPEX(gpio2, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P2, 0); 442 SSB_SPROM4_GPIOA_P1_SHIFT);
440 SPEX(gpio3, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P3, 443 SPEX(gpio2, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P2, 0);
441 SSB_SPROM4_GPIOB_P3_SHIFT); 444 SPEX(gpio3, SSB_SPROM4_GPIOB, SSB_SPROM4_GPIOB_P3,
445 SSB_SPROM4_GPIOB_P3_SHIFT);
446 } else {
447 SPEX(gpio0, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P0, 0);
448 SPEX(gpio1, SSB_SPROM5_GPIOA, SSB_SPROM5_GPIOA_P1,
449 SSB_SPROM5_GPIOA_P1_SHIFT);
450 SPEX(gpio2, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P2, 0);
451 SPEX(gpio3, SSB_SPROM5_GPIOB, SSB_SPROM5_GPIOB_P3,
452 SSB_SPROM5_GPIOB_P3_SHIFT);
453 }
442 454
443 /* Extract the antenna gain values. */ 455 /* Extract the antenna gain values. */
444 SPEX(antenna_gain.ghz24.a0, SSB_SPROM4_AGAIN01, 456 SPEX(antenna_gain.ghz24.a0, SSB_SPROM4_AGAIN01,
@@ -462,6 +474,8 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
462 474
463 out->revision = in[size - 1] & 0x00FF; 475 out->revision = in[size - 1] & 0x00FF;
464 ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision); 476 ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision);
477 memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */
478 memset(out->et1mac, 0xFF, 6);
465 if ((bus->chip_id & 0xFF00) == 0x4400) { 479 if ((bus->chip_id & 0xFF00) == 0x4400) {
466 /* Workaround: The BCM44XX chip has a stupid revision 480 /* Workaround: The BCM44XX chip has a stupid revision
467 * number stored in the SPROM. 481 * number stored in the SPROM.
@@ -471,16 +485,16 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
471 } else if (bus->chip_id == 0x4321) { 485 } else if (bus->chip_id == 0x4321) {
472 /* the BCM4328 has a chipid == 0x4321 and a rev 4 SPROM */ 486 /* the BCM4328 has a chipid == 0x4321 and a rev 4 SPROM */
473 out->revision = 4; 487 out->revision = 4;
474 sprom_extract_r4(out, in); 488 sprom_extract_r45(out, in);
475 } else { 489 } else {
476 if (out->revision == 0) 490 if (out->revision == 0)
477 goto unsupported; 491 goto unsupported;
478 if (out->revision >= 1 && out->revision <= 3) { 492 if (out->revision >= 1 && out->revision <= 3) {
479 sprom_extract_r123(out, in); 493 sprom_extract_r123(out, in);
480 } 494 }
481 if (out->revision == 4) 495 if (out->revision == 4 || out->revision == 5)
482 sprom_extract_r4(out, in); 496 sprom_extract_r45(out, in);
483 if (out->revision >= 5) 497 if (out->revision > 5)
484 goto unsupported; 498 goto unsupported;
485 } 499 }
486 500
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 07228721cafe..0da2c25bab3b 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -640,14 +640,13 @@ static void usbatm_cancel_send(struct usbatm_data *instance,
640 640
641 atm_dbg(instance, "%s entered\n", __func__); 641 atm_dbg(instance, "%s entered\n", __func__);
642 spin_lock_irq(&instance->sndqueue.lock); 642 spin_lock_irq(&instance->sndqueue.lock);
643 for (skb = instance->sndqueue.next, n = skb->next; 643 skb_queue_walk_safe(&instance->sndqueue, skb, n) {
644 skb != (struct sk_buff *)&instance->sndqueue;
645 skb = n, n = skb->next)
646 if (UDSL_SKB(skb)->atm.vcc == vcc) { 644 if (UDSL_SKB(skb)->atm.vcc == vcc) {
647 atm_dbg(instance, "%s: popping skb 0x%p\n", __func__, skb); 645 atm_dbg(instance, "%s: popping skb 0x%p\n", __func__, skb);
648 __skb_unlink(skb, &instance->sndqueue); 646 __skb_unlink(skb, &instance->sndqueue);
649 usbatm_pop(vcc, skb); 647 usbatm_pop(vcc, skb);
650 } 648 }
649 }
651 spin_unlock_irq(&instance->sndqueue.lock); 650 spin_unlock_irq(&instance->sndqueue.lock);
652 651
653 tasklet_disable(&instance->tx_channel.tasklet); 652 tasklet_disable(&instance->tx_channel.tasklet);
diff --git a/firmware/Makefile b/firmware/Makefile
index da75a6fbc6ba..ca8cd305ff93 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -22,6 +22,7 @@ fw-external-y := $(subst ",,$(CONFIG_EXTRA_FIRMWARE))
22 22
23fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin 23fw-shipped-$(CONFIG_ATARI_DSP56K) += dsp56k/bootstrap.bin
24fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw 24fw-shipped-$(CONFIG_ATM_AMBASSADOR) += atmsar11.fw
25fw-shipped-$(CONFIG_CASSINI) += sun/cassini.bin
25fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin 26fw-shipped-$(CONFIG_COMPUTONE) += intelliport2.bin
26fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin 27fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
27fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin 28fw-shipped-$(CONFIG_SMCTR) += tr_smctr.bin
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 66c51b275e9e..57002cdecd42 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -339,3 +339,13 @@ Licence: Allegedly GPLv2+, but no source visible. Marked:
339Found in hex form in kernel source. 339Found in hex form in kernel source.
340 340
341-------------------------------------------------------------------------- 341--------------------------------------------------------------------------
342
343Driver: CASSINI - Sun Cassini
344
345File: sun/cassini.bin
346
347Licence: Unknown
348
349Found in hex form in kernel source.
350
351--------------------------------------------------------------------------
diff --git a/firmware/sun/cassini.bin.ihex b/firmware/sun/cassini.bin.ihex
new file mode 100644
index 000000000000..5cd7ae70e71f
--- /dev/null
+++ b/firmware/sun/cassini.bin.ihex
@@ -0,0 +1,143 @@
1:1000000000827E82090000000000008E8EFFCE82FA
2:1000100025FF010FCE8426FF0111CE853DDFE58649
3:1000200039B78FF87EC3C2964784F38A009747CECC
4:100030008233FF010F9646840C8104270B96468479
5:100040000C810827577E8425964784F38A049747B6
6:10005000CE8254FF010F9646840C81042638B612D6
7:1000600020842026037E8425967BD67CFE8F56BD79
8:10007000F7B6FE8F4EBDEC8EBDFAF7BDF728CE82E7
9:1000800082FF010F9646840C8104260AB612208452
10:100090002027B57E8425BDF71F7E841F964784F3F5
11:1000A0008A089747DEE1AD00CE82AFFF010F7E8464
12:1000B00025964185102606962385402706BDED002E
13:1000C0007E83A2DE42BDEB8E9624840827037E83C6
14:1000D000DF967BD67CFE8F56BDF7B6FE8F50BDEC0B
15:1000E0008EBDFAF78611C649BDE412CE82EFFF013C
16:1000F0000F9646840C81002717C649BDE491240D54
17:10010000B612208520260CCE82C1FF010F7E8425E9
18:100110007E8416FE8F52BDEC8EBDFAF7866AC64904
19:10012000BDE412CE8327FF010F9646840C81002781
20:100130000AC649BDE49125067E84257E8416B6183C
21:1001400070BB19702A0481AF2E19967BF62007FA2E
22:100150002027C4388138270BF62007FA2027CB0840
23:100160007E82D3BDF7668674C649BDE412CE837124
24:10017000FF010F9646840C8108260AC649BDE4910A
25:1001800025067E84257E8416BDF73E260EBDE50934
26:100190002606CE82C1FF010F7E8425FE8F54BDEC62
27:1001A0008EBDFAF7BDF733860FC651BDE412CE837C
28:1001B000B2FF010F9646840C8108265CB61220849B
29:1001C0003F813A271C9623854027037E8425C6510C
30:1001D000BDE49125037E8425CE82C1FF010F7E847C
31:1001E00025BDF8377C007ACE83EEFF010F7E842593
32:1001F0009646840C81082620962484082629B61861
33:1002000082BB1982B1013B2209B6122084378132A8
34:100210002715BDF8447E82C1BDF71FBDF844BDFC63
35:1002200029CE8225FF010F39964784FC8A00974723
36:10023000CE8434FF011196468403810227037E8514
37:100240001E964784FC8A029747DEE1AD008601B71F
38:100250001251BDF714B6103184FDB71031BDF81E30
39:100260009681D682FE8F5ABDF7B6FE8F5CBDEC8EAE
40:10027000BDFAF78608D600C51026028B20C651BDF0
41:10028000E412CE8486FF011196468403810227037F
42:100290007E850FC651BDE49125037E851E9644855B
43:1002A00010260AB61250BA013C851027A8BDF76681
44:1002B000CE84B7FF01117E851E96468403810226F7
45:1002C00050B612308403810127037E851E96448533
46:1002D000102613B61250BA013C85102609CE84535D
47:1002E000FF01117E851EB610318A02B71031BD851F
48:1002F0001FBDF8377C0080CE84FEFF01117E851E75
49:100300009646840381022609B612308403810127B0
50:100310000FBDF844BDF70BBDFC29CE8426FF0111AB
51:1003200039D622C40FB61230BA12328404270D9681
52:100330002285042705CA107E853ACA20D72239862D
53:1003400000978318CE1C00BDEB4696578501270207
54:100350004F3985022701397F8F7D8604B7120486C5
55:1003600008B712078610B7120C8607B71206B68FA9
56:100370007DB712708601BA1204B71204010101019F
57:100380000101B6120484FE8A02B7120401010101C0
58:10039000010186FDB41204B71204B612008408816C
59:1003A000082716B68F7D810C27088B04B78F7D7EBA
60:1003B000856C860397407E896E8607B712065FF7C5
61:1003C0008F825FF78F7FF78F70F78F71F78F72F7DC
62:1003D0008F73F78F74F78F75F78F76F78F77F78FA7
63:1003E00078F78F79F78F7AF78F7BB612048A10B778
64:1003F000120486E4B71270B71207F71205F7120954
65:100400008608BA1204B7120486F7B41204B71204AD
66:10041000010101010101B61208277F8180260B86A8
67:1004200008CE8F79BD897B7E868E8140260B86041F
68:10043000CE8F76BD897B7E868E8120260B8602CE6E
69:100440008F73BD897B7E868E8110260B8601CE8FB1
70:1004500070BD897B7E868E8108260B8608CE8F79BB
71:10046000BD897F7E868E8104260B8604CE8F76BD65
72:10047000897F7E868E8102260B8A02CE8F73BD898C
73:100480007F7E868E810126088601CE8F70BD897F92
74:10049000B68F7F810F26037E8747B61209840381BA
75:1004A0000327067C12097E85FEB6120684078107A3
76:1004B00027088B01B712067E86D5B68F82260A7C66
77:1004C0008F824FB712067E85C0B61206843F813FE9
78:1004D00027108B08B71206B6120984FCB712097EE2
79:1004E00085FECE8F7018CE8F84C60CBD896FCE8FDF
80:1004F0008418CE8F70C60CBD896FD683C14F2D0373
81:100500007E8740B68F7F8107270F810B2715810DCE
82:10051000271B810E27217E8740F78F7B8602B78FAE
83:100520007A201CF78F788602B78F772012F78F75A5
84:100530008602B78F742008F78F728602B78F717E9C
85:100540008747860497407E896ECE8F72BD89F7CE2D
86:100550008F75BD89F7CE8F78BD89F7CE8F7BBD892A
87:10056000F74FB78F7DB78F81B68F7227477C8F7D0E
88:10057000B68F75273F7C8F7DB68F7827377C8F7D30
89:10058000B68F7B272F7F8F7D7C8F817A8F72271B81
90:100590007C8F7D7A8F7527167C8F7D7A8F782711D7
91:1005A0007C8F7D7A8F7B270C7E87837A8F757A8FFD
92:1005B000787A8F7BCEC1FCF68F7D3AA600B7127099
93:1005C000B68F7226037E87FAB68F75260A18CE8FED
94:1005D00073BD89D57E87FAB68F78260A18CE8F76B6
95:1005E000BD89D57E87FAB68F7B260A18CE8F79BD56
96:1005F00089D57E87FA860597407E8900B68F7581FA
97:10060000072EF2F61206C4F81BB71206B68F7881D1
98:10061000072EE2484848F61206C4C71BB71206B6B2
99:100620008F7B81072ECFF61205C4F81BB712058603
100:1006300000F68F71BD89948601F68F74BD8994860A
101:1006400002F68F77BD89948603F68F7ABD8994CEA2
102:100650008F70A60181012707810327037E8866A684
103:1006600000B88F818401260B8C8F792C0E08080826
104:100670007E8850B612048A40B71204B6120484FB76
105:1006800084EFB71204B6120736B68F7C4848B7120B
106:10069000078601BA1204B7120401010101010186A3
107:1006A000FEB41204B712048602BA1204B71204860A
108:1006B000FDB41204B7120432B71207B61200840850
109:1006C0008108270F7C82082607867697407E896EF0
110:1006D0007E86ECB68F7F810F273CBDE6C7B7120D33
111:1006E000BDE6CBB612048A20B71204CEFFFFB612C5
112:1006F00000810C26050926F6271CB6120484DFB7F4
113:100700001204968381072C057C0083200696838B38
114:100710000897837E85417F8F7E8680B7120C860185
115:10072000B78F7DB6120C847FB7120C8A80B7120C7B
116:10073000860ABD8A06B6120A2A09B6120CBA8F7D3D
117:10074000B7120CB68F7E8160271A8B20B78F7EB6CA
118:10075000120C849FBA8F7EB7120CB68F7D48B78F6C
119:100760007D7E8921B612048A20B71204BD8A0A4F01
120:1007700039A60018A7000818085A26F539366C0063
121:1007800032BA8F7FB78F7FB612098403A701B612E2
122:1007900006843FA70239368603B78F8032C1002610
123:1007A00006B78F7C7E89C9C1012718C102270CC1F9
124:1007B000032700F68F800505F78F80F68F800505EB
125:1007C000F78F80F68F800505F78F80F68F8053F4C2
126:1007D00012071BB7120739CE8F70A60018E6001853
127:1007E000A700E700A60118E60118A701E701A60285
128:1007F00018E60218A702E70239A6008407E600C43B
129:10080000385454541BA700394A26FD399622840FC8
130:1008100097228601B78F70B61207B78F71F6120C48
131:10082000C40FC80FF78F72F68F72B68F71840327CB
132:10083000148101271C81022724F48F70272A962215
133:100840008A807E8A64F48F70271E96228A107E8AA0
134:1008500064F48F70271296228A207E8A64F48F7047
135:10086000270696228A409722748F71748F71788F31
136:1008700070B68F70851027AFD622C41058B612708C
137:1008800081E4273681E1260C96228420441BD6225F
138:10089000C4CF20235881C6260D9622844044441B91
139:1008A000D622C4AF2011588127260F962284804477
140:1008B00044441BD622C46F1B972239270C7C820626
141:0D08C000BDD9EDB682077E8AB97F82063968
142:00000001FF
143/* firmware patch for NS_DP83065 */
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index f0ee4fb55911..90fc708b320e 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -118,4 +118,11 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
118 BUG(); 118 BUG();
119} 119}
120 120
121static inline int
122dma_mapping_error(struct device *dev, dma_addr_t dma_handle)
123{
124 BUG();
125 return 0;
126}
127
121#endif 128#endif
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b68ec09399be..f431e40725d6 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -126,6 +126,7 @@ header-y += pci_regs.h
126header-y += pfkeyv2.h 126header-y += pfkeyv2.h
127header-y += pg.h 127header-y += pg.h
128header-y += phantom.h 128header-y += phantom.h
129header-y += phonet.h
129header-y += pkt_cls.h 130header-y += pkt_cls.h
130header-y += pkt_sched.h 131header-y += pkt_sched.h
131header-y += posix_types.h 132header-y += posix_types.h
@@ -232,6 +233,7 @@ unifdef-y += if_fddi.h
232unifdef-y += if_frad.h 233unifdef-y += if_frad.h
233unifdef-y += if_ltalk.h 234unifdef-y += if_ltalk.h
234unifdef-y += if_link.h 235unifdef-y += if_link.h
236unifdef-y += if_phonet.h
235unifdef-y += if_pppol2tp.h 237unifdef-y += if_pppol2tp.h
236unifdef-y += if_pppox.h 238unifdef-y += if_pppox.h
237unifdef-y += if_tr.h 239unifdef-y += if_tr.h
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 7f4df7c7659d..14126bc36641 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -471,6 +471,11 @@ struct ieee80211s_hdr {
471 u8 eaddr3[6]; 471 u8 eaddr3[6];
472} __attribute__ ((packed)); 472} __attribute__ ((packed));
473 473
474/* Mesh flags */
475#define MESH_FLAGS_AE_A4 0x1
476#define MESH_FLAGS_AE_A5_A6 0x2
477#define MESH_FLAGS_PS_DEEP 0x4
478
474/** 479/**
475 * struct ieee80211_quiet_ie 480 * struct ieee80211_quiet_ie
476 * 481 *
@@ -643,6 +648,9 @@ struct ieee80211_mgmt {
643 } u; 648 } u;
644} __attribute__ ((packed)); 649} __attribute__ ((packed));
645 650
651/* mgmt header + 1 byte category code */
652#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
653
646 654
647/* Control frames */ 655/* Control frames */
648struct ieee80211_rts { 656struct ieee80211_rts {
@@ -708,12 +716,13 @@ struct ieee80211_ht_addt_info {
708 716
709/* 802.11n HT capabilities masks */ 717/* 802.11n HT capabilities masks */
710#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002 718#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002
711#define IEEE80211_HT_CAP_MIMO_PS 0x000C 719#define IEEE80211_HT_CAP_SM_PS 0x000C
712#define IEEE80211_HT_CAP_GRN_FLD 0x0010 720#define IEEE80211_HT_CAP_GRN_FLD 0x0010
713#define IEEE80211_HT_CAP_SGI_20 0x0020 721#define IEEE80211_HT_CAP_SGI_20 0x0020
714#define IEEE80211_HT_CAP_SGI_40 0x0040 722#define IEEE80211_HT_CAP_SGI_40 0x0040
715#define IEEE80211_HT_CAP_DELAY_BA 0x0400 723#define IEEE80211_HT_CAP_DELAY_BA 0x0400
716#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 724#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
725#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
717/* 802.11n HT capability AMPDU settings */ 726/* 802.11n HT capability AMPDU settings */
718#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03 727#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
719#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C 728#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
@@ -736,11 +745,26 @@ struct ieee80211_ht_addt_info {
736#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004 745#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
737#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010 746#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
738 747
739/* MIMO Power Save Modes */ 748/* block-ack parameters */
740#define WLAN_HT_CAP_MIMO_PS_STATIC 0 749#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
741#define WLAN_HT_CAP_MIMO_PS_DYNAMIC 1 750#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
742#define WLAN_HT_CAP_MIMO_PS_INVALID 2 751#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
743#define WLAN_HT_CAP_MIMO_PS_DISABLED 3 752#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
753#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
754
755/*
756 * A-PMDU buffer sizes
757 * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
758 */
759#define IEEE80211_MIN_AMPDU_BUF 0x8
760#define IEEE80211_MAX_AMPDU_BUF 0x40
761
762
763/* Spatial Multiplexing Power Save Modes */
764#define WLAN_HT_CAP_SM_PS_STATIC 0
765#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
766#define WLAN_HT_CAP_SM_PS_INVALID 2
767#define WLAN_HT_CAP_SM_PS_DISABLED 3
744 768
745/* Authentication algorithms */ 769/* Authentication algorithms */
746#define WLAN_AUTH_OPEN 0 770#define WLAN_AUTH_OPEN 0
diff --git a/include/linux/if.h b/include/linux/if.h
index 5c9d1fa93fef..65246846c844 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -24,6 +24,7 @@
24#include <linux/compiler.h> /* for "__user" et al */ 24#include <linux/compiler.h> /* for "__user" et al */
25 25
26#define IFNAMSIZ 16 26#define IFNAMSIZ 16
27#define IFALIASZ 256
27#include <linux/hdlc/ioctl.h> 28#include <linux/hdlc/ioctl.h>
28 29
29/* Standard interface flags (netdevice->flags). */ 30/* Standard interface flags (netdevice->flags). */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index e157c1399b61..723a1c5fbc6c 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -74,6 +74,7 @@
74#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport 74#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport
75 * over Ethernet 75 * over Ethernet
76 */ 76 */
77#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
77#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ 78#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
78#define ETH_P_TIPC 0x88CA /* TIPC */ 79#define ETH_P_TIPC 0x88CA /* TIPC */
79 80
@@ -99,6 +100,7 @@
99#define ETH_P_ECONET 0x0018 /* Acorn Econet */ 100#define ETH_P_ECONET 0x0018 /* Acorn Econet */
100#define ETH_P_HDLC 0x0019 /* HDLC frames */ 101#define ETH_P_HDLC 0x0019 /* HDLC frames */
101#define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */ 102#define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */
103#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */
102 104
103/* 105/*
104 * This is an Ethernet frame header. 106 * This is an Ethernet frame header.
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 84c3492ae5cb..f9032c88716a 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -79,6 +79,7 @@ enum
79 IFLA_LINKINFO, 79 IFLA_LINKINFO,
80#define IFLA_LINKINFO IFLA_LINKINFO 80#define IFLA_LINKINFO IFLA_LINKINFO
81 IFLA_NET_NS_PID, 81 IFLA_NET_NS_PID,
82 IFLA_IFALIAS,
82 __IFLA_MAX 83 __IFLA_MAX
83}; 84};
84 85
diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h
new file mode 100644
index 000000000000..d70034bcec05
--- /dev/null
+++ b/include/linux/if_phonet.h
@@ -0,0 +1,19 @@
1/*
2 * File: if_phonet.h
3 *
4 * Phonet interface kernel definitions
5 *
6 * Copyright (C) 2008 Nokia Corporation. All rights reserved.
7 */
8#ifndef LINUX_IF_PHONET_H
9#define LINUX_IF_PHONET_H
10
11#define PHONET_MIN_MTU 6 /* pn_length = 0 */
12#define PHONET_MAX_MTU 65541 /* pn_length = 0xffff */
13#define PHONET_DEV_MTU PHONET_MAX_MTU
14
15#ifdef __KERNEL__
16extern struct header_ops phonet_header_ops;
17#endif
18
19#endif
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index ec6eb49af2d8..0f434a28fb58 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -242,4 +242,164 @@ struct ip_vs_daemon_user {
242 int syncid; 242 int syncid;
243}; 243};
244 244
245/*
246 *
247 * IPVS Generic Netlink interface definitions
248 *
249 */
250
251/* Generic Netlink family info */
252
253#define IPVS_GENL_NAME "IPVS"
254#define IPVS_GENL_VERSION 0x1
255
256struct ip_vs_flags {
257 __be32 flags;
258 __be32 mask;
259};
260
261/* Generic Netlink command attributes */
262enum {
263 IPVS_CMD_UNSPEC = 0,
264
265 IPVS_CMD_NEW_SERVICE, /* add service */
266 IPVS_CMD_SET_SERVICE, /* modify service */
267 IPVS_CMD_DEL_SERVICE, /* delete service */
268 IPVS_CMD_GET_SERVICE, /* get service info */
269
270 IPVS_CMD_NEW_DEST, /* add destination */
271 IPVS_CMD_SET_DEST, /* modify destination */
272 IPVS_CMD_DEL_DEST, /* delete destination */
273 IPVS_CMD_GET_DEST, /* get destination info */
274
275 IPVS_CMD_NEW_DAEMON, /* start sync daemon */
276 IPVS_CMD_DEL_DAEMON, /* stop sync daemon */
277 IPVS_CMD_GET_DAEMON, /* get sync daemon status */
278
279 IPVS_CMD_SET_CONFIG, /* set config settings */
280 IPVS_CMD_GET_CONFIG, /* get config settings */
281
282 IPVS_CMD_SET_INFO, /* only used in GET_INFO reply */
283 IPVS_CMD_GET_INFO, /* get general IPVS info */
284
285 IPVS_CMD_ZERO, /* zero all counters and stats */
286 IPVS_CMD_FLUSH, /* flush services and dests */
287
288 __IPVS_CMD_MAX,
289};
290
291#define IPVS_CMD_MAX (__IPVS_CMD_MAX - 1)
292
293/* Attributes used in the first level of commands */
294enum {
295 IPVS_CMD_ATTR_UNSPEC = 0,
296 IPVS_CMD_ATTR_SERVICE, /* nested service attribute */
297 IPVS_CMD_ATTR_DEST, /* nested destination attribute */
298 IPVS_CMD_ATTR_DAEMON, /* nested sync daemon attribute */
299 IPVS_CMD_ATTR_TIMEOUT_TCP, /* TCP connection timeout */
300 IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, /* TCP FIN wait timeout */
301 IPVS_CMD_ATTR_TIMEOUT_UDP, /* UDP timeout */
302 __IPVS_CMD_ATTR_MAX,
303};
304
305#define IPVS_CMD_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
306
307/*
308 * Attributes used to describe a service
309 *
310 * Used inside nested attribute IPVS_CMD_ATTR_SERVICE
311 */
312enum {
313 IPVS_SVC_ATTR_UNSPEC = 0,
314 IPVS_SVC_ATTR_AF, /* address family */
315 IPVS_SVC_ATTR_PROTOCOL, /* virtual service protocol */
316 IPVS_SVC_ATTR_ADDR, /* virtual service address */
317 IPVS_SVC_ATTR_PORT, /* virtual service port */
318 IPVS_SVC_ATTR_FWMARK, /* firewall mark of service */
319
320 IPVS_SVC_ATTR_SCHED_NAME, /* name of scheduler */
321 IPVS_SVC_ATTR_FLAGS, /* virtual service flags */
322 IPVS_SVC_ATTR_TIMEOUT, /* persistent timeout */
323 IPVS_SVC_ATTR_NETMASK, /* persistent netmask */
324
325 IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */
326 __IPVS_SVC_ATTR_MAX,
327};
328
329#define IPVS_SVC_ATTR_MAX (__IPVS_SVC_ATTR_MAX - 1)
330
331/*
332 * Attributes used to describe a destination (real server)
333 *
334 * Used inside nested attribute IPVS_CMD_ATTR_DEST
335 */
336enum {
337 IPVS_DEST_ATTR_UNSPEC = 0,
338 IPVS_DEST_ATTR_ADDR, /* real server address */
339 IPVS_DEST_ATTR_PORT, /* real server port */
340
341 IPVS_DEST_ATTR_FWD_METHOD, /* forwarding method */
342 IPVS_DEST_ATTR_WEIGHT, /* destination weight */
343
344 IPVS_DEST_ATTR_U_THRESH, /* upper threshold */
345 IPVS_DEST_ATTR_L_THRESH, /* lower threshold */
346
347 IPVS_DEST_ATTR_ACTIVE_CONNS, /* active connections */
348 IPVS_DEST_ATTR_INACT_CONNS, /* inactive connections */
349 IPVS_DEST_ATTR_PERSIST_CONNS, /* persistent connections */
350
351 IPVS_DEST_ATTR_STATS, /* nested attribute for dest stats */
352 __IPVS_DEST_ATTR_MAX,
353};
354
355#define IPVS_DEST_ATTR_MAX (__IPVS_DEST_ATTR_MAX - 1)
356
357/*
358 * Attributes describing a sync daemon
359 *
360 * Used inside nested attribute IPVS_CMD_ATTR_DAEMON
361 */
362enum {
363 IPVS_DAEMON_ATTR_UNSPEC = 0,
364 IPVS_DAEMON_ATTR_STATE, /* sync daemon state (master/backup) */
365 IPVS_DAEMON_ATTR_MCAST_IFN, /* multicast interface name */
366 IPVS_DAEMON_ATTR_SYNC_ID, /* SyncID we belong to */
367 __IPVS_DAEMON_ATTR_MAX,
368};
369
370#define IPVS_DAEMON_ATTR_MAX (__IPVS_DAEMON_ATTR_MAX - 1)
371
372/*
373 * Attributes used to describe service or destination entry statistics
374 *
375 * Used inside nested attributes IPVS_SVC_ATTR_STATS and IPVS_DEST_ATTR_STATS
376 */
377enum {
378 IPVS_STATS_ATTR_UNSPEC = 0,
379 IPVS_STATS_ATTR_CONNS, /* connections scheduled */
380 IPVS_STATS_ATTR_INPKTS, /* incoming packets */
381 IPVS_STATS_ATTR_OUTPKTS, /* outgoing packets */
382 IPVS_STATS_ATTR_INBYTES, /* incoming bytes */
383 IPVS_STATS_ATTR_OUTBYTES, /* outgoing bytes */
384
385 IPVS_STATS_ATTR_CPS, /* current connection rate */
386 IPVS_STATS_ATTR_INPPS, /* current in packet rate */
387 IPVS_STATS_ATTR_OUTPPS, /* current out packet rate */
388 IPVS_STATS_ATTR_INBPS, /* current in byte rate */
389 IPVS_STATS_ATTR_OUTBPS, /* current out byte rate */
390 __IPVS_STATS_ATTR_MAX,
391};
392
393#define IPVS_STATS_ATTR_MAX (__IPVS_STATS_ATTR_MAX - 1)
394
395/* Attributes used in response to IPVS_CMD_GET_INFO command */
396enum {
397 IPVS_INFO_ATTR_UNSPEC = 0,
398 IPVS_INFO_ATTR_VERSION, /* IPVS version number */
399 IPVS_INFO_ATTR_CONN_TAB_SIZE, /* size of connection hash table */
400 __IPVS_INFO_ATTR_MAX,
401};
402
403#define IPVS_INFO_ATTR_MAX (__IPVS_INFO_ATTR_MAX - 1)
404
245#endif /* _IP_VS_H */ 405#endif /* _IP_VS_H */
diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
index 8687a7dc0632..4c218ee7587a 100644
--- a/include/linux/isdn_ppp.h
+++ b/include/linux/isdn_ppp.h
@@ -157,7 +157,7 @@ typedef struct {
157 157
158typedef struct { 158typedef struct {
159 int mp_mrru; /* unused */ 159 int mp_mrru; /* unused */
160 struct sk_buff * frags; /* fragments sl list -- use skb->next */ 160 struct sk_buff_head frags; /* fragments sl list */
161 long frames; /* number of frames in the frame list */ 161 long frames; /* number of frames in the frame list */
162 unsigned int seq; /* last processed packet seq #: any packets 162 unsigned int seq; /* last processed packet seq #: any packets
163 * with smaller seq # will be dropped 163 * with smaller seq # will be dropped
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 12078577aef6..cbbbe9bfecad 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -17,9 +17,14 @@
17 17
18struct mv643xx_eth_shared_platform_data { 18struct mv643xx_eth_shared_platform_data {
19 struct mbus_dram_target_info *dram; 19 struct mbus_dram_target_info *dram;
20 struct platform_device *shared_smi;
20 unsigned int t_clk; 21 unsigned int t_clk;
21}; 22};
22 23
24#define MV643XX_ETH_PHY_ADDR_DEFAULT 0
25#define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x))
26#define MV643XX_ETH_PHY_NONE 0xff
27
23struct mv643xx_eth_platform_data { 28struct mv643xx_eth_platform_data {
24 /* 29 /*
25 * Pointer back to our parent instance, and our port number. 30 * Pointer back to our parent instance, and our port number.
@@ -30,8 +35,6 @@ struct mv643xx_eth_platform_data {
30 /* 35 /*
31 * Whether a PHY is present, and if yes, at which address. 36 * Whether a PHY is present, and if yes, at which address.
32 */ 37 */
33 struct platform_device *shared_smi;
34 int force_phy_addr;
35 int phy_addr; 38 int phy_addr;
36 39
37 /* 40 /*
@@ -49,10 +52,10 @@ struct mv643xx_eth_platform_data {
49 int duplex; 52 int duplex;
50 53
51 /* 54 /*
52 * Which RX/TX queues to use. 55 * How many RX/TX queues to use.
53 */ 56 */
54 int rx_queue_mask; 57 int rx_queue_count;
55 int tx_queue_mask; 58 int tx_queue_count;
56 59
57 /* 60 /*
58 * Override default RX/TX queue sizes if nonzero. 61 * Override default RX/TX queue sizes if nonzero.
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 488c56e649b5..9cfd20be8b7f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -471,6 +471,8 @@ struct net_device
471 char name[IFNAMSIZ]; 471 char name[IFNAMSIZ];
472 /* device name hash chain */ 472 /* device name hash chain */
473 struct hlist_node name_hlist; 473 struct hlist_node name_hlist;
474 /* snmp alias */
475 char *ifalias;
474 476
475 /* 477 /*
476 * I/O specific fields 478 * I/O specific fields
@@ -1223,7 +1225,8 @@ extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1223extern int dev_ethtool(struct net *net, struct ifreq *); 1225extern int dev_ethtool(struct net *net, struct ifreq *);
1224extern unsigned dev_get_flags(const struct net_device *); 1226extern unsigned dev_get_flags(const struct net_device *);
1225extern int dev_change_flags(struct net_device *, unsigned); 1227extern int dev_change_flags(struct net_device *, unsigned);
1226extern int dev_change_name(struct net_device *, char *); 1228extern int dev_change_name(struct net_device *, const char *);
1229extern int dev_set_alias(struct net_device *, const char *, size_t);
1227extern int dev_change_net_namespace(struct net_device *, 1230extern int dev_change_net_namespace(struct net_device *,
1228 struct net *, const char *); 1231 struct net *, const char *);
1229extern int dev_set_mtu(struct net_device *, int); 1232extern int dev_set_mtu(struct net_device *, int);
@@ -1667,7 +1670,7 @@ extern void dev_seq_stop(struct seq_file *seq, void *v);
1667extern int netdev_class_create_file(struct class_attribute *class_attr); 1670extern int netdev_class_create_file(struct class_attribute *class_attr);
1668extern void netdev_class_remove_file(struct class_attribute *class_attr); 1671extern void netdev_class_remove_file(struct class_attribute *class_attr);
1669 1672
1670extern char *netdev_drivername(struct net_device *dev, char *buffer, int len); 1673extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
1671 1674
1672extern void linkwatch_run_queue(void); 1675extern void linkwatch_run_queue(void);
1673 1676
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9ff1b54908f3..cbba7760545b 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -220,7 +220,7 @@ struct netlink_callback
220 int (*dump)(struct sk_buff * skb, struct netlink_callback *cb); 220 int (*dump)(struct sk_buff * skb, struct netlink_callback *cb);
221 int (*done)(struct netlink_callback *cb); 221 int (*done)(struct netlink_callback *cb);
222 int family; 222 int family;
223 long args[6]; 223 long args[7];
224}; 224};
225 225
226struct netlink_notify 226struct netlink_notify
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 2be7c63bc0f2..9bad65400fba 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -89,6 +89,22 @@
89 * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC 89 * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC
90 * or, if no MAC address given, all mesh paths, on the interface identified 90 * or, if no MAC address given, all mesh paths, on the interface identified
91 * by %NL80211_ATTR_IFINDEX. 91 * by %NL80211_ATTR_IFINDEX.
92 * @NL80211_CMD_SET_BSS: Set BSS attributes for BSS identified by
93 * %NL80211_ATTR_IFINDEX.
94 *
95 * @NL80211_CMD_SET_REG: Set current regulatory domain. CRDA sends this command
96 * after being queried by the kernel. CRDA replies by sending a regulatory
97 * domain structure which consists of %NL80211_ATTR_REG_ALPHA set to our
98 * current alpha2 if it found a match. It also provides
99 * NL80211_ATTR_REG_RULE_FLAGS, and a set of regulatory rules. Each
100 * regulatory rule is a nested set of attributes given by
101 * %NL80211_ATTR_REG_RULE_FREQ_[START|END] and
102 * %NL80211_ATTR_FREQ_RANGE_MAX_BW with an attached power rule given by
103 * %NL80211_ATTR_REG_RULE_POWER_MAX_ANT_GAIN and
104 * %NL80211_ATTR_REG_RULE_POWER_MAX_EIRP.
105 * @NL80211_CMD_REQ_SET_REG: ask the wireless core to set the regulatory domain
106 * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will
107 * store this as a valid request and then query userspace for it.
92 * 108 *
93 * @NL80211_CMD_MAX: highest used command number 109 * @NL80211_CMD_MAX: highest used command number
94 * @__NL80211_CMD_AFTER_LAST: internal use 110 * @__NL80211_CMD_AFTER_LAST: internal use
@@ -127,13 +143,23 @@ enum nl80211_commands {
127 NL80211_CMD_NEW_MPATH, 143 NL80211_CMD_NEW_MPATH,
128 NL80211_CMD_DEL_MPATH, 144 NL80211_CMD_DEL_MPATH,
129 145
130 /* add commands here */ 146 NL80211_CMD_SET_BSS,
147
148 NL80211_CMD_SET_REG,
149 NL80211_CMD_REQ_SET_REG,
150
151 /* add new commands above here */
131 152
132 /* used to define NL80211_CMD_MAX below */ 153 /* used to define NL80211_CMD_MAX below */
133 __NL80211_CMD_AFTER_LAST, 154 __NL80211_CMD_AFTER_LAST,
134 NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1 155 NL80211_CMD_MAX = __NL80211_CMD_AFTER_LAST - 1
135}; 156};
136 157
158/*
159 * Allow user space programs to use #ifdef on new commands by defining them
160 * here
161 */
162#define NL80211_CMD_SET_BSS NL80211_CMD_SET_BSS
137 163
138/** 164/**
139 * enum nl80211_attrs - nl80211 netlink attributes 165 * enum nl80211_attrs - nl80211 netlink attributes
@@ -188,10 +214,34 @@ enum nl80211_commands {
188 * info given for %NL80211_CMD_GET_MPATH, nested attribute described at 214 * info given for %NL80211_CMD_GET_MPATH, nested attribute described at
189 * &enum nl80211_mpath_info. 215 * &enum nl80211_mpath_info.
190 * 216 *
191 *
192 * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of 217 * @NL80211_ATTR_MNTR_FLAGS: flags, nested element with NLA_FLAG attributes of
193 * &enum nl80211_mntr_flags. 218 * &enum nl80211_mntr_flags.
194 * 219 *
220 * @NL80211_ATTR_REG_ALPHA2: an ISO-3166-alpha2 country code for which the
221 * current regulatory domain should be set to or is already set to.
222 * For example, 'CR', for Costa Rica. This attribute is used by the kernel
223 * to query the CRDA to retrieve one regulatory domain. This attribute can
224 * also be used by userspace to query the kernel for the currently set
225 * regulatory domain. We chose an alpha2 as that is also used by the
226 * IEEE-802.11d country information element to identify a country.
227 * Users can also simply ask the wireless core to set regulatory domain
228 * to a specific alpha2.
229 * @NL80211_ATTR_REG_RULES: a nested array of regulatory domain regulatory
230 * rules.
231 *
232 * @NL80211_ATTR_BSS_CTS_PROT: whether CTS protection is enabled (u8, 0 or 1)
233 * @NL80211_ATTR_BSS_SHORT_PREAMBLE: whether short preamble is enabled
234 * (u8, 0 or 1)
235 * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled
236 * (u8, 0 or 1)
237 *
238 * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from
239 * association request when used with NL80211_CMD_NEW_STATION)
240 *
241 * @NL80211_ATTR_SUPPORTED_IFTYPES: nested attribute containing all
242 * supported interface types, each a flag attribute with the number
243 * of the interface mode.
244 *
195 * @NL80211_ATTR_MAX: highest attribute number currently defined 245 * @NL80211_ATTR_MAX: highest attribute number currently defined
196 * @__NL80211_ATTR_AFTER_LAST: internal use 246 * @__NL80211_ATTR_AFTER_LAST: internal use
197 */ 247 */
@@ -235,16 +285,35 @@ enum nl80211_attrs {
235 NL80211_ATTR_MPATH_NEXT_HOP, 285 NL80211_ATTR_MPATH_NEXT_HOP,
236 NL80211_ATTR_MPATH_INFO, 286 NL80211_ATTR_MPATH_INFO,
237 287
288 NL80211_ATTR_BSS_CTS_PROT,
289 NL80211_ATTR_BSS_SHORT_PREAMBLE,
290 NL80211_ATTR_BSS_SHORT_SLOT_TIME,
291
292 NL80211_ATTR_HT_CAPABILITY,
293
294 NL80211_ATTR_SUPPORTED_IFTYPES,
295
296 NL80211_ATTR_REG_ALPHA2,
297 NL80211_ATTR_REG_RULES,
298
238 /* add attributes here, update the policy in nl80211.c */ 299 /* add attributes here, update the policy in nl80211.c */
239 300
240 __NL80211_ATTR_AFTER_LAST, 301 __NL80211_ATTR_AFTER_LAST,
241 NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1 302 NL80211_ATTR_MAX = __NL80211_ATTR_AFTER_LAST - 1
242}; 303};
243 304
305/*
306 * Allow user space programs to use #ifdef on new attributes by defining them
307 * here
308 */
309#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY
310
244#define NL80211_MAX_SUPP_RATES 32 311#define NL80211_MAX_SUPP_RATES 32
312#define NL80211_MAX_SUPP_REG_RULES 32
245#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0 313#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0
246#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16 314#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16
247#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24 315#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
316#define NL80211_HT_CAPABILITY_LEN 26
248 317
249/** 318/**
250 * enum nl80211_iftype - (virtual) interface types 319 * enum nl80211_iftype - (virtual) interface types
@@ -436,6 +505,66 @@ enum nl80211_bitrate_attr {
436}; 505};
437 506
438/** 507/**
508 * enum nl80211_reg_rule_attr - regulatory rule attributes
509 * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional
510 * considerations for a given frequency range. These are the
511 * &enum nl80211_reg_rule_flags.
512 * @NL80211_ATTR_FREQ_RANGE_START: starting frequencry for the regulatory
513 * rule in KHz. This is not a center of frequency but an actual regulatory
514 * band edge.
515 * @NL80211_ATTR_FREQ_RANGE_END: ending frequency for the regulatory rule
516 * in KHz. This is not a center a frequency but an actual regulatory
517 * band edge.
518 * @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this
519 * frequency range, in KHz.
520 * @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain
521 * for a given frequency range. The value is in mBi (100 * dBi).
522 * If you don't have one then don't send this.
523 * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for
524 * a given frequency range. The value is in mBm (100 * dBm).
525 */
526enum nl80211_reg_rule_attr {
527 __NL80211_REG_RULE_ATTR_INVALID,
528 NL80211_ATTR_REG_RULE_FLAGS,
529
530 NL80211_ATTR_FREQ_RANGE_START,
531 NL80211_ATTR_FREQ_RANGE_END,
532 NL80211_ATTR_FREQ_RANGE_MAX_BW,
533
534 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
535 NL80211_ATTR_POWER_RULE_MAX_EIRP,
536
537 /* keep last */
538 __NL80211_REG_RULE_ATTR_AFTER_LAST,
539 NL80211_REG_RULE_ATTR_MAX = __NL80211_REG_RULE_ATTR_AFTER_LAST - 1
540};
541
542/**
543 * enum nl80211_reg_rule_flags - regulatory rule flags
544 *
545 * @NL80211_RRF_NO_OFDM: OFDM modulation not allowed
546 * @NL80211_RRF_NO_CCK: CCK modulation not allowed
547 * @NL80211_RRF_NO_INDOOR: indoor operation not allowed
548 * @NL80211_RRF_NO_OUTDOOR: outdoor operation not allowed
549 * @NL80211_RRF_DFS: DFS support is required to be used
550 * @NL80211_RRF_PTP_ONLY: this is only for Point To Point links
551 * @NL80211_RRF_PTMP_ONLY: this is only for Point To Multi Point links
552 * @NL80211_RRF_PASSIVE_SCAN: passive scan is required
553 * @NL80211_RRF_NO_IBSS: no IBSS is allowed
554 */
555enum nl80211_reg_rule_flags {
556 NL80211_RRF_NO_OFDM = 1<<0,
557 NL80211_RRF_NO_CCK = 1<<1,
558 NL80211_RRF_NO_INDOOR = 1<<2,
559 NL80211_RRF_NO_OUTDOOR = 1<<3,
560 NL80211_RRF_DFS = 1<<4,
561 NL80211_RRF_PTP_ONLY = 1<<5,
562 NL80211_RRF_PTMP_ONLY = 1<<6,
563 NL80211_RRF_PASSIVE_SCAN = 1<<7,
564 NL80211_RRF_NO_IBSS = 1<<8,
565};
566
567/**
439 * enum nl80211_mntr_flags - monitor configuration flags 568 * enum nl80211_mntr_flags - monitor configuration flags
440 * 569 *
441 * Monitor configuration flags. 570 * Monitor configuration flags.
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index f1624b396754..a65b082a888a 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1411,6 +1411,8 @@
1411#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013 1411#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013
1412#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014 1412#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014
1413 1413
1414#define PCI_VENDOR_ID_CISCO 0x1137
1415
1414#define PCI_VENDOR_ID_ZIATECH 0x1138 1416#define PCI_VENDOR_ID_ZIATECH 0x1138
1415#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 1417#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
1416 1418
@@ -2213,6 +2215,7 @@
2213 2215
2214#define PCI_VENDOR_ID_ATTANSIC 0x1969 2216#define PCI_VENDOR_ID_ATTANSIC 0x1969
2215#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048 2217#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048
2218#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048
2216 2219
2217#define PCI_VENDOR_ID_JMICRON 0x197B 2220#define PCI_VENDOR_ID_JMICRON 0x197B
2218#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 2221#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
@@ -2244,6 +2247,16 @@
2244#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 2247#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007
2245#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 2248#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009
2246 2249
2250#define PCI_VENDOR_ID_NETXEN 0x4040
2251#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001
2252#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002
2253#define PCI_DEVICE_ID_NX2031_4GCU 0x0003
2254#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004
2255#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005
2256#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024
2257#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025
2258#define PCI_DEVICE_ID_NX3031 0x0100
2259
2247#define PCI_VENDOR_ID_AKS 0x416c 2260#define PCI_VENDOR_ID_AKS 0x416c
2248#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 2261#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100
2249 2262
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
new file mode 100644
index 000000000000..3a027f588a4a
--- /dev/null
+++ b/include/linux/phonet.h
@@ -0,0 +1,160 @@
1/**
2 * file phonet.h
3 *
4 * Phonet sockets kernel interface
5 *
6 * Copyright (C) 2008 Nokia Corporation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef LINUX_PHONET_H
24#define LINUX_PHONET_H
25
26/* Automatic protocol selection */
27#define PN_PROTO_TRANSPORT 0
28/* Phonet datagram socket */
29#define PN_PROTO_PHONET 1
30#define PHONET_NPROTO 2
31
32#define PNADDR_ANY 0
33#define PNPORT_RESOURCE_ROUTING 0
34
35/* ioctls */
36#define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0)
37
38/* Phonet protocol header */
39struct phonethdr {
40 __u8 pn_rdev;
41 __u8 pn_sdev;
42 __u8 pn_res;
43 __be16 pn_length;
44 __u8 pn_robj;
45 __u8 pn_sobj;
46} __attribute__((packed));
47
48/* Common Phonet payload header */
49struct phonetmsg {
50 __u8 pn_trans_id; /* transaction ID */
51 __u8 pn_msg_id; /* message type */
52 union {
53 struct {
54 __u8 pn_submsg_id; /* message subtype */
55 __u8 pn_data[5];
56 } base;
57 struct {
58 __u16 pn_e_res_id; /* extended resource ID */
59 __u8 pn_e_submsg_id; /* message subtype */
60 __u8 pn_e_data[3];
61 } ext;
62 } pn_msg_u;
63};
64#define PN_COMMON_MESSAGE 0xF0
65#define PN_PREFIX 0xE0 /* resource for extended messages */
66#define pn_submsg_id pn_msg_u.base.pn_submsg_id
67#define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id
68#define pn_e_res_id pn_msg_u.ext.pn_e_res_id
69#define pn_data pn_msg_u.base.pn_data
70#define pn_e_data pn_msg_u.ext.pn_e_data
71
72/* data for unreachable errors */
73#define PN_COMM_SERVICE_NOT_IDENTIFIED_RESP 0x01
74#define PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP 0x14
75#define pn_orig_msg_id pn_data[0]
76#define pn_status pn_data[1]
77#define pn_e_orig_msg_id pn_e_data[0]
78#define pn_e_status pn_e_data[1]
79
80/* Phonet socket address structure */
81struct sockaddr_pn {
82 sa_family_t spn_family;
83 __u8 spn_obj;
84 __u8 spn_dev;
85 __u8 spn_resource;
86 __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3];
87} __attribute__ ((packed));
88
89static inline __u16 pn_object(__u8 addr, __u16 port)
90{
91 return (addr << 8) | (port & 0x3ff);
92}
93
94static inline __u8 pn_obj(__u16 handle)
95{
96 return handle & 0xff;
97}
98
99static inline __u8 pn_dev(__u16 handle)
100{
101 return handle >> 8;
102}
103
104static inline __u16 pn_port(__u16 handle)
105{
106 return handle & 0x3ff;
107}
108
109static inline __u8 pn_addr(__u16 handle)
110{
111 return (handle >> 8) & 0xfc;
112}
113
114static inline void pn_sockaddr_set_addr(struct sockaddr_pn *spn, __u8 addr)
115{
116 spn->spn_dev &= 0x03;
117 spn->spn_dev |= addr & 0xfc;
118}
119
120static inline void pn_sockaddr_set_port(struct sockaddr_pn *spn, __u16 port)
121{
122 spn->spn_dev &= 0xfc;
123 spn->spn_dev |= (port >> 8) & 0x03;
124 spn->spn_obj = port & 0xff;
125}
126
127static inline void pn_sockaddr_set_object(struct sockaddr_pn *spn,
128 __u16 handle)
129{
130 spn->spn_dev = pn_dev(handle);
131 spn->spn_obj = pn_obj(handle);
132}
133
134static inline void pn_sockaddr_set_resource(struct sockaddr_pn *spn,
135 __u8 resource)
136{
137 spn->spn_resource = resource;
138}
139
140static inline __u8 pn_sockaddr_get_addr(const struct sockaddr_pn *spn)
141{
142 return spn->spn_dev & 0xfc;
143}
144
145static inline __u16 pn_sockaddr_get_port(const struct sockaddr_pn *spn)
146{
147 return ((spn->spn_dev & 0x03) << 8) | spn->spn_obj;
148}
149
150static inline __u16 pn_sockaddr_get_object(const struct sockaddr_pn *spn)
151{
152 return pn_object(spn->spn_dev, spn->spn_obj);
153}
154
155static inline __u8 pn_sockaddr_get_resource(const struct sockaddr_pn *spn)
156{
157 return spn->spn_resource;
158}
159
160#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 7224c4099a28..5f170f5b1a30 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -410,6 +410,8 @@ int phy_start_aneg(struct phy_device *phydev);
410 410
411int mdiobus_register(struct mii_bus *bus); 411int mdiobus_register(struct mii_bus *bus);
412void mdiobus_unregister(struct mii_bus *bus); 412void mdiobus_unregister(struct mii_bus *bus);
413struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
414
413void phy_sanitize_settings(struct phy_device *phydev); 415void phy_sanitize_settings(struct phy_device *phydev);
414int phy_stop_interrupts(struct phy_device *phydev); 416int phy_stop_interrupts(struct phy_device *phydev);
415int phy_enable_interrupts(struct phy_device *phydev); 417int phy_enable_interrupts(struct phy_device *phydev);
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index e5de421ac7b4..5d921fa91a5b 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -123,6 +123,13 @@ struct tc_prio_qopt
123 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ 123 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
124}; 124};
125 125
126/* MULTIQ section */
127
128struct tc_multiq_qopt {
129 __u16 bands; /* Number of bands */
130 __u16 max_bands; /* Maximum number of queues */
131};
132
126/* TBF section */ 133/* TBF section */
127 134
128struct tc_tbf_qopt 135struct tc_tbf_qopt
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 741d1a62cc3f..4cd64b0d9825 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -49,6 +49,7 @@ enum rfkill_state {
49 RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */ 49 RFKILL_STATE_SOFT_BLOCKED = 0, /* Radio output blocked */
50 RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */ 50 RFKILL_STATE_UNBLOCKED = 1, /* Radio output allowed */
51 RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */ 51 RFKILL_STATE_HARD_BLOCKED = 2, /* Output blocked, non-overrideable */
52 RFKILL_STATE_MAX, /* marker for last valid state */
52}; 53};
53 54
54/* 55/*
@@ -110,12 +111,14 @@ struct rfkill {
110}; 111};
111#define to_rfkill(d) container_of(d, struct rfkill, dev) 112#define to_rfkill(d) container_of(d, struct rfkill, dev)
112 113
113struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type); 114struct rfkill * __must_check rfkill_allocate(struct device *parent,
115 enum rfkill_type type);
114void rfkill_free(struct rfkill *rfkill); 116void rfkill_free(struct rfkill *rfkill);
115int rfkill_register(struct rfkill *rfkill); 117int __must_check rfkill_register(struct rfkill *rfkill);
116void rfkill_unregister(struct rfkill *rfkill); 118void rfkill_unregister(struct rfkill *rfkill);
117 119
118int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state); 120int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state);
121int rfkill_set_default(enum rfkill_type type, enum rfkill_state state);
119 122
120/** 123/**
121 * rfkill_state_complement - return complementar state 124 * rfkill_state_complement - return complementar state
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index ca643b13b026..2b3d51c6ec9c 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -582,6 +582,10 @@ enum rtnetlink_groups {
582#define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE 582#define RTNLGRP_IPV6_RULE RTNLGRP_IPV6_RULE
583 RTNLGRP_ND_USEROPT, 583 RTNLGRP_ND_USEROPT,
584#define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT 584#define RTNLGRP_ND_USEROPT RTNLGRP_ND_USEROPT
585 RTNLGRP_PHONET_IFADDR,
586#define RTNLGRP_PHONET_IFADDR RTNLGRP_PHONET_IFADDR
587 RTNLGRP_PHONET_ROUTE,
588#define RTNLGRP_PHONET_ROUTE RTNLGRP_PHONET_ROUTE
585 __RTNLGRP_MAX 589 __RTNLGRP_MAX
586}; 590};
587#define RTNLGRP_MAX (__RTNLGRP_MAX - 1) 591#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 909923717830..720b688c22b6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -146,8 +146,14 @@ struct skb_shared_info {
146 unsigned short gso_segs; 146 unsigned short gso_segs;
147 unsigned short gso_type; 147 unsigned short gso_type;
148 __be32 ip6_frag_id; 148 __be32 ip6_frag_id;
149#ifdef CONFIG_HAS_DMA
150 unsigned int num_dma_maps;
151#endif
149 struct sk_buff *frag_list; 152 struct sk_buff *frag_list;
150 skb_frag_t frags[MAX_SKB_FRAGS]; 153 skb_frag_t frags[MAX_SKB_FRAGS];
154#ifdef CONFIG_HAS_DMA
155 dma_addr_t dma_maps[MAX_SKB_FRAGS + 1];
156#endif
151}; 157};
152 158
153/* We divide dataref into two halves. The higher 16 bits hold references 159/* We divide dataref into two halves. The higher 16 bits hold references
@@ -353,6 +359,14 @@ struct sk_buff {
353 359
354#include <asm/system.h> 360#include <asm/system.h>
355 361
362#ifdef CONFIG_HAS_DMA
363#include <linux/dma-mapping.h>
364extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
365 enum dma_data_direction dir);
366extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
367 enum dma_data_direction dir);
368#endif
369
356extern void kfree_skb(struct sk_buff *skb); 370extern void kfree_skb(struct sk_buff *skb);
357extern void __kfree_skb(struct sk_buff *skb); 371extern void __kfree_skb(struct sk_buff *skb);
358extern struct sk_buff *__alloc_skb(unsigned int size, 372extern struct sk_buff *__alloc_skb(unsigned int size,
@@ -369,6 +383,8 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
369 return __alloc_skb(size, priority, 1, -1); 383 return __alloc_skb(size, priority, 1, -1);
370} 384}
371 385
386extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
387
372extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 388extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
373extern struct sk_buff *skb_clone(struct sk_buff *skb, 389extern struct sk_buff *skb_clone(struct sk_buff *skb,
374 gfp_t priority); 390 gfp_t priority);
@@ -459,6 +475,37 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
459} 475}
460 476
461/** 477/**
478 * skb_queue_is_last - check if skb is the last entry in the queue
479 * @list: queue head
480 * @skb: buffer
481 *
482 * Returns true if @skb is the last buffer on the list.
483 */
484static inline bool skb_queue_is_last(const struct sk_buff_head *list,
485 const struct sk_buff *skb)
486{
487 return (skb->next == (struct sk_buff *) list);
488}
489
490/**
491 * skb_queue_next - return the next packet in the queue
492 * @list: queue head
493 * @skb: current buffer
494 *
495 * Return the next packet in @list after @skb. It is only valid to
496 * call this if skb_queue_is_last() evaluates to false.
497 */
498static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
499 const struct sk_buff *skb)
500{
501 /* This BUG_ON may seem severe, but if we just return then we
502 * are going to dereference garbage.
503 */
504 BUG_ON(skb_queue_is_last(list, skb));
505 return skb->next;
506}
507
508/**
462 * skb_get - reference buffer 509 * skb_get - reference buffer
463 * @skb: buffer to reference 510 * @skb: buffer to reference
464 * 511 *
@@ -646,6 +693,22 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
646 return list_->qlen; 693 return list_->qlen;
647} 694}
648 695
696/**
697 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
698 * @list: queue to initialize
699 *
700 * This initializes only the list and queue length aspects of
701 * an sk_buff_head object. This allows to initialize the list
702 * aspects of an sk_buff_head without reinitializing things like
703 * the spinlock. It can also be used for on-stack sk_buff_head
704 * objects where the spinlock is known to not be used.
705 */
706static inline void __skb_queue_head_init(struct sk_buff_head *list)
707{
708 list->prev = list->next = (struct sk_buff *)list;
709 list->qlen = 0;
710}
711
649/* 712/*
650 * This function creates a split out lock class for each invocation; 713 * This function creates a split out lock class for each invocation;
651 * this is needed for now since a whole lot of users of the skb-queue 714 * this is needed for now since a whole lot of users of the skb-queue
@@ -657,8 +720,7 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
657static inline void skb_queue_head_init(struct sk_buff_head *list) 720static inline void skb_queue_head_init(struct sk_buff_head *list)
658{ 721{
659 spin_lock_init(&list->lock); 722 spin_lock_init(&list->lock);
660 list->prev = list->next = (struct sk_buff *)list; 723 __skb_queue_head_init(list);
661 list->qlen = 0;
662} 724}
663 725
664static inline void skb_queue_head_init_class(struct sk_buff_head *list, 726static inline void skb_queue_head_init_class(struct sk_buff_head *list,
@@ -685,6 +747,83 @@ static inline void __skb_insert(struct sk_buff *newsk,
685 list->qlen++; 747 list->qlen++;
686} 748}
687 749
750static inline void __skb_queue_splice(const struct sk_buff_head *list,
751 struct sk_buff *prev,
752 struct sk_buff *next)
753{
754 struct sk_buff *first = list->next;
755 struct sk_buff *last = list->prev;
756
757 first->prev = prev;
758 prev->next = first;
759
760 last->next = next;
761 next->prev = last;
762}
763
764/**
765 * skb_queue_splice - join two skb lists, this is designed for stacks
766 * @list: the new list to add
767 * @head: the place to add it in the first list
768 */
769static inline void skb_queue_splice(const struct sk_buff_head *list,
770 struct sk_buff_head *head)
771{
772 if (!skb_queue_empty(list)) {
773 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
774 head->qlen += list->qlen;
775 }
776}
777
778/**
779 * skb_queue_splice - join two skb lists and reinitialise the emptied list
780 * @list: the new list to add
781 * @head: the place to add it in the first list
782 *
783 * The list at @list is reinitialised
784 */
785static inline void skb_queue_splice_init(struct sk_buff_head *list,
786 struct sk_buff_head *head)
787{
788 if (!skb_queue_empty(list)) {
789 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
790 head->qlen += list->qlen;
791 __skb_queue_head_init(list);
792 }
793}
794
795/**
796 * skb_queue_splice_tail - join two skb lists, each list being a queue
797 * @list: the new list to add
798 * @head: the place to add it in the first list
799 */
800static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
801 struct sk_buff_head *head)
802{
803 if (!skb_queue_empty(list)) {
804 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
805 head->qlen += list->qlen;
806 }
807}
808
809/**
810 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
811 * @list: the new list to add
812 * @head: the place to add it in the first list
813 *
814 * Each of the lists is a queue.
815 * The list at @list is reinitialised
816 */
817static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
818 struct sk_buff_head *head)
819{
820 if (!skb_queue_empty(list)) {
821 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
822 head->qlen += list->qlen;
823 __skb_queue_head_init(list);
824 }
825}
826
688/** 827/**
689 * __skb_queue_after - queue a buffer at the list head 828 * __skb_queue_after - queue a buffer at the list head
690 * @list: list to use 829 * @list: list to use
@@ -1434,6 +1573,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1434 skb != (struct sk_buff *)(queue); \ 1573 skb != (struct sk_buff *)(queue); \
1435 skb = tmp, tmp = skb->next) 1574 skb = tmp, tmp = skb->next)
1436 1575
1576#define skb_queue_walk_from(queue, skb) \
1577 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1578 skb = skb->next)
1579
1580#define skb_queue_walk_from_safe(queue, skb, tmp) \
1581 for (tmp = skb->next; \
1582 skb != (struct sk_buff *)(queue); \
1583 skb = tmp, tmp = skb->next)
1584
1437#define skb_queue_reverse_walk(queue, skb) \ 1585#define skb_queue_reverse_walk(queue, skb) \
1438 for (skb = (queue)->prev; \ 1586 for (skb = (queue)->prev; \
1439 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1587 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
diff --git a/include/linux/socket.h b/include/linux/socket.h
index dc5086fe7736..818ca33bf79f 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -190,7 +190,8 @@ struct ucred {
190#define AF_IUCV 32 /* IUCV sockets */ 190#define AF_IUCV 32 /* IUCV sockets */
191#define AF_RXRPC 33 /* RxRPC sockets */ 191#define AF_RXRPC 33 /* RxRPC sockets */
192#define AF_ISDN 34 /* mISDN sockets */ 192#define AF_ISDN 34 /* mISDN sockets */
193#define AF_MAX 35 /* For now.. */ 193#define AF_PHONET 35 /* Phonet sockets */
194#define AF_MAX 36 /* For now.. */
194 195
195/* Protocol families, same as address families. */ 196/* Protocol families, same as address families. */
196#define PF_UNSPEC AF_UNSPEC 197#define PF_UNSPEC AF_UNSPEC
@@ -227,6 +228,7 @@ struct ucred {
227#define PF_IUCV AF_IUCV 228#define PF_IUCV AF_IUCV
228#define PF_RXRPC AF_RXRPC 229#define PF_RXRPC AF_RXRPC
229#define PF_ISDN AF_ISDN 230#define PF_ISDN AF_ISDN
231#define PF_PHONET AF_PHONET
230#define PF_MAX AF_MAX 232#define PF_MAX AF_MAX
231 233
232/* Maximum queue length specifiable by listen. */ 234/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
index ebad0bac9801..99a0f991e850 100644
--- a/include/linux/ssb/ssb_regs.h
+++ b/include/linux/ssb/ssb_regs.h
@@ -245,8 +245,6 @@
245 245
246/* SPROM Revision 3 (inherits most data from rev 2) */ 246/* SPROM Revision 3 (inherits most data from rev 2) */
247#define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */ 247#define SSB_SPROM3_IL0MAC 0x104A /* 6 bytes MAC address for 802.11b/g */
248#define SSB_SPROM3_ET0MAC 0x1050 /* 6 bytes MAC address for Ethernet ?? */
249#define SSB_SPROM3_ET1MAC 0x1050 /* 6 bytes MAC address for 802.11a ?? */
250#define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */ 248#define SSB_SPROM3_OFDMAPO 0x102C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */
251#define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */ 249#define SSB_SPROM3_OFDMALPO 0x1030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */
252#define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */ 250#define SSB_SPROM3_OFDMAHPO 0x1034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */
@@ -267,8 +265,6 @@
267 265
268/* SPROM Revision 4 */ 266/* SPROM Revision 4 */
269#define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */ 267#define SSB_SPROM4_IL0MAC 0x104C /* 6 byte MAC address for a/b/g/n */
270#define SSB_SPROM4_ET0MAC 0x1018 /* 6 bytes MAC address for Ethernet ?? */
271#define SSB_SPROM4_ET1MAC 0x1018 /* 6 bytes MAC address for 802.11a ?? */
272#define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */ 268#define SSB_SPROM4_ETHPHY 0x105A /* Ethernet PHY settings ?? */
273#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */ 269#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */
274#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */ 270#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */
@@ -316,6 +312,21 @@
316#define SSB_SPROM4_PA1B1 0x1090 312#define SSB_SPROM4_PA1B1 0x1090
317#define SSB_SPROM4_PA1B2 0x1092 313#define SSB_SPROM4_PA1B2 0x1092
318 314
315/* SPROM Revision 5 (inherits most data from rev 4) */
316#define SSB_SPROM5_BFLLO 0x104A /* Boardflags (low 16 bits) */
317#define SSB_SPROM5_BFLHI 0x104C /* Board Flags Hi */
318#define SSB_SPROM5_IL0MAC 0x1052 /* 6 byte MAC address for a/b/g/n */
319#define SSB_SPROM5_CCODE 0x1044 /* Country Code (2 bytes) */
320#define SSB_SPROM5_GPIOA 0x1076 /* Gen. Purpose IO # 0 and 1 */
321#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */
322#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */
323#define SSB_SPROM5_GPIOA_P1_SHIFT 8
324#define SSB_SPROM5_GPIOB 0x1078 /* Gen. Purpose IO # 2 and 3 */
325#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */
326#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */
327#define SSB_SPROM5_GPIOB_P3_SHIFT 8
328
329
319/* Values for SSB_SPROM1_BINF_CCODE */ 330/* Values for SSB_SPROM1_BINF_CCODE */
320enum { 331enum {
321 SSB_SPROM1CCODE_WORLD = 0, 332 SSB_SPROM1CCODE_WORLD = 0,
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild
index 6dac0d7365cc..76990937f4c9 100644
--- a/include/linux/tc_act/Kbuild
+++ b/include/linux/tc_act/Kbuild
@@ -3,3 +3,4 @@ header-y += tc_ipt.h
3header-y += tc_mirred.h 3header-y += tc_mirred.h
4header-y += tc_pedit.h 4header-y += tc_pedit.h
5header-y += tc_nat.h 5header-y += tc_nat.h
6header-y += tc_skbedit.h
diff --git a/include/linux/tc_act/tc_skbedit.h b/include/linux/tc_act/tc_skbedit.h
new file mode 100644
index 000000000000..a14e461a7af7
--- /dev/null
+++ b/include/linux/tc_act/tc_skbedit.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
18 */
19
20#ifndef __LINUX_TC_SKBEDIT_H
21#define __LINUX_TC_SKBEDIT_H
22
23#include <linux/pkt_cls.h>
24
25#define TCA_ACT_SKBEDIT 11
26
27#define SKBEDIT_F_PRIORITY 0x1
28#define SKBEDIT_F_QUEUE_MAPPING 0x2
29
30struct tc_skbedit {
31 tc_gen;
32};
33
34enum {
35 TCA_SKBEDIT_UNSPEC,
36 TCA_SKBEDIT_TM,
37 TCA_SKBEDIT_PARMS,
38 TCA_SKBEDIT_PRIORITY,
39 TCA_SKBEDIT_QUEUE_MAPPING,
40 __TCA_SKBEDIT_MAX
41};
42#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
43
44#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 2e2557388e36..767290628292 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -342,7 +342,6 @@ struct tcp_sock {
342 struct sk_buff* lost_skb_hint; 342 struct sk_buff* lost_skb_hint;
343 struct sk_buff *scoreboard_skb_hint; 343 struct sk_buff *scoreboard_skb_hint;
344 struct sk_buff *retransmit_skb_hint; 344 struct sk_buff *retransmit_skb_hint;
345 struct sk_buff *forward_skb_hint;
346 345
347 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ 346 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
348 347
@@ -358,7 +357,7 @@ struct tcp_sock {
358 */ 357 */
359 358
360 int lost_cnt_hint; 359 int lost_cnt_hint;
361 int retransmit_cnt_hint; 360 u32 retransmit_high; /* L-bits may be on up to this seqno */
362 361
363 u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ 362 u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
364 363
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index e00750836ba5..0e85ec39b638 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -152,6 +152,7 @@ struct station_parameters {
152 u16 aid; 152 u16 aid;
153 u8 supported_rates_len; 153 u8 supported_rates_len;
154 u8 plink_action; 154 u8 plink_action;
155 struct ieee80211_ht_cap *ht_capa;
155}; 156};
156 157
157/** 158/**
@@ -268,6 +269,83 @@ struct mpath_info {
268 u8 flags; 269 u8 flags;
269}; 270};
270 271
272/**
273 * struct bss_parameters - BSS parameters
274 *
275 * Used to change BSS parameters (mainly for AP mode).
276 *
277 * @use_cts_prot: Whether to use CTS protection
278 * (0 = no, 1 = yes, -1 = do not change)
279 * @use_short_preamble: Whether the use of short preambles is allowed
280 * (0 = no, 1 = yes, -1 = do not change)
281 * @use_short_slot_time: Whether the use of short slot time is allowed
282 * (0 = no, 1 = yes, -1 = do not change)
283 */
284struct bss_parameters {
285 int use_cts_prot;
286 int use_short_preamble;
287 int use_short_slot_time;
288};
289
290/**
291 * enum reg_set_by - Indicates who is trying to set the regulatory domain
292 * @REGDOM_SET_BY_INIT: regulatory domain was set by initialization. We will be
293 * using a static world regulatory domain by default.
294 * @REGDOM_SET_BY_CORE: Core queried CRDA for a dynamic world regulatory domain.
295 * @REGDOM_SET_BY_USER: User asked the wireless core to set the
296 * regulatory domain.
297 * @REGDOM_SET_BY_DRIVER: a wireless drivers has hinted to the wireless core
298 * it thinks its knows the regulatory domain we should be in.
299 * @REGDOM_SET_BY_COUNTRY_IE: the wireless core has received an 802.11 country
300 * information element with regulatory information it thinks we
301 * should consider.
302 */
303enum reg_set_by {
304 REGDOM_SET_BY_INIT,
305 REGDOM_SET_BY_CORE,
306 REGDOM_SET_BY_USER,
307 REGDOM_SET_BY_DRIVER,
308 REGDOM_SET_BY_COUNTRY_IE,
309};
310
311struct ieee80211_freq_range {
312 u32 start_freq_khz;
313 u32 end_freq_khz;
314 u32 max_bandwidth_khz;
315};
316
317struct ieee80211_power_rule {
318 u32 max_antenna_gain;
319 u32 max_eirp;
320};
321
322struct ieee80211_reg_rule {
323 struct ieee80211_freq_range freq_range;
324 struct ieee80211_power_rule power_rule;
325 u32 flags;
326};
327
328struct ieee80211_regdomain {
329 u32 n_reg_rules;
330 char alpha2[2];
331 struct ieee80211_reg_rule reg_rules[];
332};
333
334#define MHZ_TO_KHZ(freq) (freq * 1000)
335#define KHZ_TO_MHZ(freq) (freq / 1000)
336#define DBI_TO_MBI(gain) (gain * 100)
337#define MBI_TO_DBI(gain) (gain / 100)
338#define DBM_TO_MBM(gain) (gain * 100)
339#define MBM_TO_DBM(gain) (gain / 100)
340
341#define REG_RULE(start, end, bw, gain, eirp, reg_flags) { \
342 .freq_range.start_freq_khz = (start) * 1000, \
343 .freq_range.end_freq_khz = (end) * 1000, \
344 .freq_range.max_bandwidth_khz = (bw) * 1000, \
345 .power_rule.max_antenna_gain = (gain) * 100, \
346 .power_rule.max_eirp = (eirp) * 100, \
347 .flags = reg_flags, \
348 }
271 349
272/* from net/wireless.h */ 350/* from net/wireless.h */
273struct wiphy; 351struct wiphy;
@@ -285,11 +363,13 @@ struct wiphy;
285 * wireless extensions but this is subject to reevaluation as soon as this 363 * wireless extensions but this is subject to reevaluation as soon as this
286 * code is used more widely and we have a first user without wext. 364 * code is used more widely and we have a first user without wext.
287 * 365 *
288 * @add_virtual_intf: create a new virtual interface with the given name 366 * @add_virtual_intf: create a new virtual interface with the given name,
367 * must set the struct wireless_dev's iftype.
289 * 368 *
290 * @del_virtual_intf: remove the virtual interface determined by ifindex. 369 * @del_virtual_intf: remove the virtual interface determined by ifindex.
291 * 370 *
292 * @change_virtual_intf: change type of virtual interface 371 * @change_virtual_intf: change type/configuration of virtual interface,
372 * keep the struct wireless_dev's iftype updated.
293 * 373 *
294 * @add_key: add a key with the given parameters. @mac_addr will be %NULL 374 * @add_key: add a key with the given parameters. @mac_addr will be %NULL
295 * when adding a group key. 375 * when adding a group key.
@@ -318,6 +398,8 @@ struct wiphy;
318 * @change_station: Modify a given station. 398 * @change_station: Modify a given station.
319 * 399 *
320 * @set_mesh_cfg: set mesh parameters (by now, just mesh id) 400 * @set_mesh_cfg: set mesh parameters (by now, just mesh id)
401 *
402 * @change_bss: Modify parameters for a given BSS.
321 */ 403 */
322struct cfg80211_ops { 404struct cfg80211_ops {
323 int (*add_virtual_intf)(struct wiphy *wiphy, char *name, 405 int (*add_virtual_intf)(struct wiphy *wiphy, char *name,
@@ -370,6 +452,9 @@ struct cfg80211_ops {
370 int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev, 452 int (*dump_mpath)(struct wiphy *wiphy, struct net_device *dev,
371 int idx, u8 *dst, u8 *next_hop, 453 int idx, u8 *dst, u8 *next_hop,
372 struct mpath_info *pinfo); 454 struct mpath_info *pinfo);
455
456 int (*change_bss)(struct wiphy *wiphy, struct net_device *dev,
457 struct bss_parameters *params);
373}; 458};
374 459
375#endif /* __NET_CFG80211_H */ 460#endif /* __NET_CFG80211_H */
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index b31399e1fd83..6048579d0b24 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -190,10 +190,6 @@ const char *escape_essid(const char *essid, u8 essid_len);
190#endif 190#endif
191#include <net/iw_handler.h> /* new driver API */ 191#include <net/iw_handler.h> /* new driver API */
192 192
193#ifndef ETH_P_PAE
194#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
195#endif /* ETH_P_PAE */
196
197#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */ 193#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
198 194
199#ifndef ETH_P_80211_RAW 195#ifndef ETH_P_80211_RAW
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 2ff545a56fb5..03cffd9f64e3 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -51,12 +51,14 @@ struct inet_connection_sock_af_ops {
51 char __user *optval, int optlen); 51 char __user *optval, int optlen);
52 int (*getsockopt)(struct sock *sk, int level, int optname, 52 int (*getsockopt)(struct sock *sk, int level, int optname,
53 char __user *optval, int __user *optlen); 53 char __user *optval, int __user *optlen);
54#ifdef CONFIG_COMPAT
54 int (*compat_setsockopt)(struct sock *sk, 55 int (*compat_setsockopt)(struct sock *sk,
55 int level, int optname, 56 int level, int optname,
56 char __user *optval, int optlen); 57 char __user *optval, int optlen);
57 int (*compat_getsockopt)(struct sock *sk, 58 int (*compat_getsockopt)(struct sock *sk,
58 int level, int optname, 59 int level, int optname,
59 char __user *optval, int __user *optlen); 60 char __user *optval, int __user *optlen);
61#endif
60 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); 62 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
61 int (*bind_conflict)(const struct sock *sk, 63 int (*bind_conflict)(const struct sock *sk,
62 const struct inet_bind_bucket *tb); 64 const struct inet_bind_bucket *tb);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 7312c3dd309f..33e2ac6ceb3e 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -21,11 +21,103 @@
21#include <linux/timer.h> 21#include <linux/timer.h>
22 22
23#include <net/checksum.h> 23#include <net/checksum.h>
24#include <linux/netfilter.h> /* for union nf_inet_addr */
25#include <linux/ipv6.h> /* for struct ipv6hdr */
26#include <net/ipv6.h> /* for ipv6_addr_copy */
27
28struct ip_vs_iphdr {
29 int len;
30 __u8 protocol;
31 union nf_inet_addr saddr;
32 union nf_inet_addr daddr;
33};
34
35static inline void
36ip_vs_fill_iphdr(int af, const void *nh, struct ip_vs_iphdr *iphdr)
37{
38#ifdef CONFIG_IP_VS_IPV6
39 if (af == AF_INET6) {
40 const struct ipv6hdr *iph = nh;
41 iphdr->len = sizeof(struct ipv6hdr);
42 iphdr->protocol = iph->nexthdr;
43 ipv6_addr_copy(&iphdr->saddr.in6, &iph->saddr);
44 ipv6_addr_copy(&iphdr->daddr.in6, &iph->daddr);
45 } else
46#endif
47 {
48 const struct iphdr *iph = nh;
49 iphdr->len = iph->ihl * 4;
50 iphdr->protocol = iph->protocol;
51 iphdr->saddr.ip = iph->saddr;
52 iphdr->daddr.ip = iph->daddr;
53 }
54}
55
56static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
57 const union nf_inet_addr *src)
58{
59#ifdef CONFIG_IP_VS_IPV6
60 if (af == AF_INET6)
61 ipv6_addr_copy(&dst->in6, &src->in6);
62 else
63#endif
64 dst->ip = src->ip;
65}
66
67static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
68 const union nf_inet_addr *b)
69{
70#ifdef CONFIG_IP_VS_IPV6
71 if (af == AF_INET6)
72 return ipv6_addr_equal(&a->in6, &b->in6);
73#endif
74 return a->ip == b->ip;
75}
24 76
25#ifdef CONFIG_IP_VS_DEBUG 77#ifdef CONFIG_IP_VS_DEBUG
26#include <linux/net.h> 78#include <linux/net.h>
27 79
28extern int ip_vs_get_debug_level(void); 80extern int ip_vs_get_debug_level(void);
81
82static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
83 const union nf_inet_addr *addr,
84 int *idx)
85{
86 int len;
87#ifdef CONFIG_IP_VS_IPV6
88 if (af == AF_INET6)
89 len = snprintf(&buf[*idx], buf_len - *idx, "[" NIP6_FMT "]",
90 NIP6(addr->in6)) + 1;
91 else
92#endif
93 len = snprintf(&buf[*idx], buf_len - *idx, NIPQUAD_FMT,
94 NIPQUAD(addr->ip)) + 1;
95
96 *idx += len;
97 BUG_ON(*idx > buf_len + 1);
98 return &buf[*idx - len];
99}
100
101#define IP_VS_DBG_BUF(level, msg...) \
102 do { \
103 char ip_vs_dbg_buf[160]; \
104 int ip_vs_dbg_idx = 0; \
105 if (level <= ip_vs_get_debug_level()) \
106 printk(KERN_DEBUG "IPVS: " msg); \
107 } while (0)
108#define IP_VS_ERR_BUF(msg...) \
109 do { \
110 char ip_vs_dbg_buf[160]; \
111 int ip_vs_dbg_idx = 0; \
112 printk(KERN_ERR "IPVS: " msg); \
113 } while (0)
114
115/* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */
116#define IP_VS_DBG_ADDR(af, addr) \
117 ip_vs_dbg_addr(af, ip_vs_dbg_buf, \
118 sizeof(ip_vs_dbg_buf), addr, \
119 &ip_vs_dbg_idx)
120
29#define IP_VS_DBG(level, msg...) \ 121#define IP_VS_DBG(level, msg...) \
30 do { \ 122 do { \
31 if (level <= ip_vs_get_debug_level()) \ 123 if (level <= ip_vs_get_debug_level()) \
@@ -48,6 +140,8 @@ extern int ip_vs_get_debug_level(void);
48 pp->debug_packet(pp, skb, ofs, msg); \ 140 pp->debug_packet(pp, skb, ofs, msg); \
49 } while (0) 141 } while (0)
50#else /* NO DEBUGGING at ALL */ 142#else /* NO DEBUGGING at ALL */
143#define IP_VS_DBG_BUF(level, msg...) do {} while (0)
144#define IP_VS_ERR_BUF(msg...) do {} while (0)
51#define IP_VS_DBG(level, msg...) do {} while (0) 145#define IP_VS_DBG(level, msg...) do {} while (0)
52#define IP_VS_DBG_RL(msg...) do {} while (0) 146#define IP_VS_DBG_RL(msg...) do {} while (0)
53#define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) do {} while (0) 147#define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) do {} while (0)
@@ -160,27 +254,10 @@ struct ip_vs_estimator {
160 254
161struct ip_vs_stats 255struct ip_vs_stats
162{ 256{
163 __u32 conns; /* connections scheduled */ 257 struct ip_vs_stats_user ustats; /* statistics */
164 __u32 inpkts; /* incoming packets */ 258 struct ip_vs_estimator est; /* estimator */
165 __u32 outpkts; /* outgoing packets */
166 __u64 inbytes; /* incoming bytes */
167 __u64 outbytes; /* outgoing bytes */
168
169 __u32 cps; /* current connection rate */
170 __u32 inpps; /* current in packet rate */
171 __u32 outpps; /* current out packet rate */
172 __u32 inbps; /* current in byte rate */
173 __u32 outbps; /* current out byte rate */
174
175 /*
176 * Don't add anything before the lock, because we use memcpy() to copy
177 * the members before the lock to struct ip_vs_stats_user in
178 * ip_vs_ctl.c.
179 */
180 259
181 spinlock_t lock; /* spin lock */ 260 spinlock_t lock; /* spin lock */
182
183 struct ip_vs_estimator est; /* estimator */
184}; 261};
185 262
186struct dst_entry; 263struct dst_entry;
@@ -202,21 +279,23 @@ struct ip_vs_protocol {
202 279
203 void (*exit)(struct ip_vs_protocol *pp); 280 void (*exit)(struct ip_vs_protocol *pp);
204 281
205 int (*conn_schedule)(struct sk_buff *skb, 282 int (*conn_schedule)(int af, struct sk_buff *skb,
206 struct ip_vs_protocol *pp, 283 struct ip_vs_protocol *pp,
207 int *verdict, struct ip_vs_conn **cpp); 284 int *verdict, struct ip_vs_conn **cpp);
208 285
209 struct ip_vs_conn * 286 struct ip_vs_conn *
210 (*conn_in_get)(const struct sk_buff *skb, 287 (*conn_in_get)(int af,
288 const struct sk_buff *skb,
211 struct ip_vs_protocol *pp, 289 struct ip_vs_protocol *pp,
212 const struct iphdr *iph, 290 const struct ip_vs_iphdr *iph,
213 unsigned int proto_off, 291 unsigned int proto_off,
214 int inverse); 292 int inverse);
215 293
216 struct ip_vs_conn * 294 struct ip_vs_conn *
217 (*conn_out_get)(const struct sk_buff *skb, 295 (*conn_out_get)(int af,
296 const struct sk_buff *skb,
218 struct ip_vs_protocol *pp, 297 struct ip_vs_protocol *pp,
219 const struct iphdr *iph, 298 const struct ip_vs_iphdr *iph,
220 unsigned int proto_off, 299 unsigned int proto_off,
221 int inverse); 300 int inverse);
222 301
@@ -226,7 +305,8 @@ struct ip_vs_protocol {
226 int (*dnat_handler)(struct sk_buff *skb, 305 int (*dnat_handler)(struct sk_buff *skb,
227 struct ip_vs_protocol *pp, struct ip_vs_conn *cp); 306 struct ip_vs_protocol *pp, struct ip_vs_conn *cp);
228 307
229 int (*csum_check)(struct sk_buff *skb, struct ip_vs_protocol *pp); 308 int (*csum_check)(int af, struct sk_buff *skb,
309 struct ip_vs_protocol *pp);
230 310
231 const char *(*state_name)(int state); 311 const char *(*state_name)(int state);
232 312
@@ -259,9 +339,10 @@ struct ip_vs_conn {
259 struct list_head c_list; /* hashed list heads */ 339 struct list_head c_list; /* hashed list heads */
260 340
261 /* Protocol, addresses and port numbers */ 341 /* Protocol, addresses and port numbers */
262 __be32 caddr; /* client address */ 342 u16 af; /* address family */
263 __be32 vaddr; /* virtual address */ 343 union nf_inet_addr caddr; /* client address */
264 __be32 daddr; /* destination address */ 344 union nf_inet_addr vaddr; /* virtual address */
345 union nf_inet_addr daddr; /* destination address */
265 __be16 cport; 346 __be16 cport;
266 __be16 vport; 347 __be16 vport;
267 __be16 dport; 348 __be16 dport;
@@ -305,6 +386,45 @@ struct ip_vs_conn {
305 386
306 387
307/* 388/*
389 * Extended internal versions of struct ip_vs_service_user and
390 * ip_vs_dest_user for IPv6 support.
391 *
392 * We need these to conveniently pass around service and destination
393 * options, but unfortunately, we also need to keep the old definitions to
394 * maintain userspace backwards compatibility for the setsockopt interface.
395 */
396struct ip_vs_service_user_kern {
397 /* virtual service addresses */
398 u16 af;
399 u16 protocol;
400 union nf_inet_addr addr; /* virtual ip address */
401 u16 port;
402 u32 fwmark; /* firwall mark of service */
403
404 /* virtual service options */
405 char *sched_name;
406 unsigned flags; /* virtual service flags */
407 unsigned timeout; /* persistent timeout in sec */
408 u32 netmask; /* persistent netmask */
409};
410
411
412struct ip_vs_dest_user_kern {
413 /* destination server address */
414 union nf_inet_addr addr;
415 u16 port;
416
417 /* real server options */
418 unsigned conn_flags; /* connection flags */
419 int weight; /* destination weight */
420
421 /* thresholds for active connections */
422 u32 u_threshold; /* upper threshold */
423 u32 l_threshold; /* lower threshold */
424};
425
426
427/*
308 * The information about the virtual service offered to the net 428 * The information about the virtual service offered to the net
309 * and the forwarding entries 429 * and the forwarding entries
310 */ 430 */
@@ -314,8 +434,9 @@ struct ip_vs_service {
314 atomic_t refcnt; /* reference counter */ 434 atomic_t refcnt; /* reference counter */
315 atomic_t usecnt; /* use counter */ 435 atomic_t usecnt; /* use counter */
316 436
437 u16 af; /* address family */
317 __u16 protocol; /* which protocol (TCP/UDP) */ 438 __u16 protocol; /* which protocol (TCP/UDP) */
318 __be32 addr; /* IP address for virtual service */ 439 union nf_inet_addr addr; /* IP address for virtual service */
319 __be16 port; /* port number for the service */ 440 __be16 port; /* port number for the service */
320 __u32 fwmark; /* firewall mark of the service */ 441 __u32 fwmark; /* firewall mark of the service */
321 unsigned flags; /* service status flags */ 442 unsigned flags; /* service status flags */
@@ -342,7 +463,8 @@ struct ip_vs_dest {
342 struct list_head n_list; /* for the dests in the service */ 463 struct list_head n_list; /* for the dests in the service */
343 struct list_head d_list; /* for table with all the dests */ 464 struct list_head d_list; /* for table with all the dests */
344 465
345 __be32 addr; /* IP address of the server */ 466 u16 af; /* address family */
467 union nf_inet_addr addr; /* IP address of the server */
346 __be16 port; /* port number of the server */ 468 __be16 port; /* port number of the server */
347 volatile unsigned flags; /* dest status flags */ 469 volatile unsigned flags; /* dest status flags */
348 atomic_t conn_flags; /* flags to copy to conn */ 470 atomic_t conn_flags; /* flags to copy to conn */
@@ -366,7 +488,7 @@ struct ip_vs_dest {
366 /* for virtual service */ 488 /* for virtual service */
367 struct ip_vs_service *svc; /* service it belongs to */ 489 struct ip_vs_service *svc; /* service it belongs to */
368 __u16 protocol; /* which protocol (TCP/UDP) */ 490 __u16 protocol; /* which protocol (TCP/UDP) */
369 __be32 vaddr; /* virtual IP address */ 491 union nf_inet_addr vaddr; /* virtual IP address */
370 __be16 vport; /* virtual port number */ 492 __be16 vport; /* virtual port number */
371 __u32 vfwmark; /* firewall mark of service */ 493 __u32 vfwmark; /* firewall mark of service */
372}; 494};
@@ -380,6 +502,9 @@ struct ip_vs_scheduler {
380 char *name; /* scheduler name */ 502 char *name; /* scheduler name */
381 atomic_t refcnt; /* reference counter */ 503 atomic_t refcnt; /* reference counter */
382 struct module *module; /* THIS_MODULE/NULL */ 504 struct module *module; /* THIS_MODULE/NULL */
505#ifdef CONFIG_IP_VS_IPV6
506 int supports_ipv6; /* scheduler has IPv6 support */
507#endif
383 508
384 /* scheduler initializing service */ 509 /* scheduler initializing service */
385 int (*init_service)(struct ip_vs_service *svc); 510 int (*init_service)(struct ip_vs_service *svc);
@@ -479,16 +604,8 @@ extern void ip_vs_init_hash_table(struct list_head *table, int rows);
479#ifndef CONFIG_IP_VS_TAB_BITS 604#ifndef CONFIG_IP_VS_TAB_BITS
480#define CONFIG_IP_VS_TAB_BITS 12 605#define CONFIG_IP_VS_TAB_BITS 12
481#endif 606#endif
482/* make sure that IP_VS_CONN_TAB_BITS is located in [8, 20] */ 607
483#if CONFIG_IP_VS_TAB_BITS < 8
484#define IP_VS_CONN_TAB_BITS 8
485#endif
486#if CONFIG_IP_VS_TAB_BITS > 20
487#define IP_VS_CONN_TAB_BITS 20
488#endif
489#if 8 <= CONFIG_IP_VS_TAB_BITS && CONFIG_IP_VS_TAB_BITS <= 20
490#define IP_VS_CONN_TAB_BITS CONFIG_IP_VS_TAB_BITS 608#define IP_VS_CONN_TAB_BITS CONFIG_IP_VS_TAB_BITS
491#endif
492#define IP_VS_CONN_TAB_SIZE (1 << IP_VS_CONN_TAB_BITS) 609#define IP_VS_CONN_TAB_SIZE (1 << IP_VS_CONN_TAB_BITS)
493#define IP_VS_CONN_TAB_MASK (IP_VS_CONN_TAB_SIZE - 1) 610#define IP_VS_CONN_TAB_MASK (IP_VS_CONN_TAB_SIZE - 1)
494 611
@@ -500,11 +617,16 @@ enum {
500}; 617};
501 618
502extern struct ip_vs_conn *ip_vs_conn_in_get 619extern struct ip_vs_conn *ip_vs_conn_in_get
503(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port); 620(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
621 const union nf_inet_addr *d_addr, __be16 d_port);
622
504extern struct ip_vs_conn *ip_vs_ct_in_get 623extern struct ip_vs_conn *ip_vs_ct_in_get
505(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port); 624(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
625 const union nf_inet_addr *d_addr, __be16 d_port);
626
506extern struct ip_vs_conn *ip_vs_conn_out_get 627extern struct ip_vs_conn *ip_vs_conn_out_get
507(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port); 628(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
629 const union nf_inet_addr *d_addr, __be16 d_port);
508 630
509/* put back the conn without restarting its timer */ 631/* put back the conn without restarting its timer */
510static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 632static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
@@ -515,8 +637,9 @@ extern void ip_vs_conn_put(struct ip_vs_conn *cp);
515extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); 637extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
516 638
517extern struct ip_vs_conn * 639extern struct ip_vs_conn *
518ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport, 640ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
519 __be32 daddr, __be16 dport, unsigned flags, 641 const union nf_inet_addr *vaddr, __be16 vport,
642 const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
520 struct ip_vs_dest *dest); 643 struct ip_vs_dest *dest);
521extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); 644extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
522 645
@@ -532,24 +655,32 @@ static inline void ip_vs_control_del(struct ip_vs_conn *cp)
532{ 655{
533 struct ip_vs_conn *ctl_cp = cp->control; 656 struct ip_vs_conn *ctl_cp = cp->control;
534 if (!ctl_cp) { 657 if (!ctl_cp) {
535 IP_VS_ERR("request control DEL for uncontrolled: " 658 IP_VS_ERR_BUF("request control DEL for uncontrolled: "
536 "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", 659 "%s:%d to %s:%d\n",
537 NIPQUAD(cp->caddr),ntohs(cp->cport), 660 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
538 NIPQUAD(cp->vaddr),ntohs(cp->vport)); 661 ntohs(cp->cport),
662 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
663 ntohs(cp->vport));
664
539 return; 665 return;
540 } 666 }
541 667
542 IP_VS_DBG(7, "DELeting control for: " 668 IP_VS_DBG_BUF(7, "DELeting control for: "
543 "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n", 669 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
544 NIPQUAD(cp->caddr),ntohs(cp->cport), 670 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
545 NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport)); 671 ntohs(cp->cport),
672 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
673 ntohs(ctl_cp->cport));
546 674
547 cp->control = NULL; 675 cp->control = NULL;
548 if (atomic_read(&ctl_cp->n_control) == 0) { 676 if (atomic_read(&ctl_cp->n_control) == 0) {
549 IP_VS_ERR("BUG control DEL with n=0 : " 677 IP_VS_ERR_BUF("BUG control DEL with n=0 : "
550 "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", 678 "%s:%d to %s:%d\n",
551 NIPQUAD(cp->caddr),ntohs(cp->cport), 679 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
552 NIPQUAD(cp->vaddr),ntohs(cp->vport)); 680 ntohs(cp->cport),
681 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
682 ntohs(cp->vport));
683
553 return; 684 return;
554 } 685 }
555 atomic_dec(&ctl_cp->n_control); 686 atomic_dec(&ctl_cp->n_control);
@@ -559,17 +690,22 @@ static inline void
559ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) 690ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
560{ 691{
561 if (cp->control) { 692 if (cp->control) {
562 IP_VS_ERR("request control ADD for already controlled: " 693 IP_VS_ERR_BUF("request control ADD for already controlled: "
563 "%d.%d.%d.%d:%d to %d.%d.%d.%d:%d\n", 694 "%s:%d to %s:%d\n",
564 NIPQUAD(cp->caddr),ntohs(cp->cport), 695 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
565 NIPQUAD(cp->vaddr),ntohs(cp->vport)); 696 ntohs(cp->cport),
697 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
698 ntohs(cp->vport));
699
566 ip_vs_control_del(cp); 700 ip_vs_control_del(cp);
567 } 701 }
568 702
569 IP_VS_DBG(7, "ADDing control for: " 703 IP_VS_DBG_BUF(7, "ADDing control for: "
570 "cp.dst=%d.%d.%d.%d:%d ctl_cp.dst=%d.%d.%d.%d:%d\n", 704 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n",
571 NIPQUAD(cp->caddr),ntohs(cp->cport), 705 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
572 NIPQUAD(ctl_cp->caddr),ntohs(ctl_cp->cport)); 706 ntohs(cp->cport),
707 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr),
708 ntohs(ctl_cp->cport));
573 709
574 cp->control = ctl_cp; 710 cp->control = ctl_cp;
575 atomic_inc(&ctl_cp->n_control); 711 atomic_inc(&ctl_cp->n_control);
@@ -647,7 +783,8 @@ extern struct ip_vs_stats ip_vs_stats;
647extern const struct ctl_path net_vs_ctl_path[]; 783extern const struct ctl_path net_vs_ctl_path[];
648 784
649extern struct ip_vs_service * 785extern struct ip_vs_service *
650ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport); 786ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
787 const union nf_inet_addr *vaddr, __be16 vport);
651 788
652static inline void ip_vs_service_put(struct ip_vs_service *svc) 789static inline void ip_vs_service_put(struct ip_vs_service *svc)
653{ 790{
@@ -655,14 +792,16 @@ static inline void ip_vs_service_put(struct ip_vs_service *svc)
655} 792}
656 793
657extern struct ip_vs_dest * 794extern struct ip_vs_dest *
658ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport); 795ip_vs_lookup_real_service(int af, __u16 protocol,
796 const union nf_inet_addr *daddr, __be16 dport);
797
659extern int ip_vs_use_count_inc(void); 798extern int ip_vs_use_count_inc(void);
660extern void ip_vs_use_count_dec(void); 799extern void ip_vs_use_count_dec(void);
661extern int ip_vs_control_init(void); 800extern int ip_vs_control_init(void);
662extern void ip_vs_control_cleanup(void); 801extern void ip_vs_control_cleanup(void);
663extern struct ip_vs_dest * 802extern struct ip_vs_dest *
664ip_vs_find_dest(__be32 daddr, __be16 dport, 803ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
665 __be32 vaddr, __be16 vport, __u16 protocol); 804 const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
666extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); 805extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
667 806
668 807
@@ -683,6 +822,8 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
683/* 822/*
684 * IPVS rate estimator prototypes (from ip_vs_est.c) 823 * IPVS rate estimator prototypes (from ip_vs_est.c)
685 */ 824 */
825extern int ip_vs_estimator_init(void);
826extern void ip_vs_estimator_cleanup(void);
686extern void ip_vs_new_estimator(struct ip_vs_stats *stats); 827extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
687extern void ip_vs_kill_estimator(struct ip_vs_stats *stats); 828extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
688extern void ip_vs_zero_estimator(struct ip_vs_stats *stats); 829extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
@@ -704,6 +845,19 @@ extern int ip_vs_icmp_xmit
704(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset); 845(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset);
705extern void ip_vs_dst_reset(struct ip_vs_dest *dest); 846extern void ip_vs_dst_reset(struct ip_vs_dest *dest);
706 847
848#ifdef CONFIG_IP_VS_IPV6
849extern int ip_vs_bypass_xmit_v6
850(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
851extern int ip_vs_nat_xmit_v6
852(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
853extern int ip_vs_tunnel_xmit_v6
854(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
855extern int ip_vs_dr_xmit_v6
856(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
857extern int ip_vs_icmp_xmit_v6
858(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
859 int offset);
860#endif
707 861
708/* 862/*
709 * This is a simple mechanism to ignore packets when 863 * This is a simple mechanism to ignore packets when
@@ -748,7 +902,12 @@ static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
748} 902}
749 903
750extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, 904extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
751 struct ip_vs_conn *cp, int dir); 905 struct ip_vs_conn *cp, int dir);
906
907#ifdef CONFIG_IP_VS_IPV6
908extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
909 struct ip_vs_conn *cp, int dir);
910#endif
752 911
753extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); 912extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
754 913
@@ -759,6 +918,17 @@ static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
759 return csum_partial((char *) diff, sizeof(diff), oldsum); 918 return csum_partial((char *) diff, sizeof(diff), oldsum);
760} 919}
761 920
921#ifdef CONFIG_IP_VS_IPV6
922static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new,
923 __wsum oldsum)
924{
925 __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0],
926 new[3], new[2], new[1], new[0] };
927
928 return csum_partial((char *) diff, sizeof(diff), oldsum);
929}
930#endif
931
762static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) 932static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum)
763{ 933{
764 __be16 diff[2] = { ~old, new }; 934 __be16 diff[2] = { ~old, new };
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 113028fb8f66..dfa7ae3c5607 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -576,6 +576,8 @@ extern int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
576extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, 576extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
577 struct group_filter __user *optval, 577 struct group_filter __user *optval,
578 int __user *optlen); 578 int __user *optlen);
579extern unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
580 const struct in6_addr *daddr, u32 rnd);
579 581
580#ifdef CONFIG_PROC_FS 582#ifdef CONFIG_PROC_FS
581extern int ac6_proc_init(struct net *net); 583extern int ac6_proc_init(struct net *net);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index ff137fd7714f..f5f5b1ff1584 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -158,13 +158,17 @@ struct ieee80211_low_level_stats {
158 * also implies a change in the AID. 158 * also implies a change in the AID.
159 * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed 159 * @BSS_CHANGED_ERP_CTS_PROT: CTS protection changed
160 * @BSS_CHANGED_ERP_PREAMBLE: preamble changed 160 * @BSS_CHANGED_ERP_PREAMBLE: preamble changed
161 * @BSS_CHANGED_ERP_SLOT: slot timing changed
161 * @BSS_CHANGED_HT: 802.11n parameters changed 162 * @BSS_CHANGED_HT: 802.11n parameters changed
163 * @BSS_CHANGED_BASIC_RATES: Basic rateset changed
162 */ 164 */
163enum ieee80211_bss_change { 165enum ieee80211_bss_change {
164 BSS_CHANGED_ASSOC = 1<<0, 166 BSS_CHANGED_ASSOC = 1<<0,
165 BSS_CHANGED_ERP_CTS_PROT = 1<<1, 167 BSS_CHANGED_ERP_CTS_PROT = 1<<1,
166 BSS_CHANGED_ERP_PREAMBLE = 1<<2, 168 BSS_CHANGED_ERP_PREAMBLE = 1<<2,
169 BSS_CHANGED_ERP_SLOT = 1<<3,
167 BSS_CHANGED_HT = 1<<4, 170 BSS_CHANGED_HT = 1<<4,
171 BSS_CHANGED_BASIC_RATES = 1<<5,
168}; 172};
169 173
170/** 174/**
@@ -177,6 +181,7 @@ enum ieee80211_bss_change {
177 * @aid: association ID number, valid only when @assoc is true 181 * @aid: association ID number, valid only when @assoc is true
178 * @use_cts_prot: use CTS protection 182 * @use_cts_prot: use CTS protection
179 * @use_short_preamble: use 802.11b short preamble 183 * @use_short_preamble: use 802.11b short preamble
184 * @use_short_slot: use short slot time (only relevant for ERP)
180 * @dtim_period: num of beacons before the next DTIM, for PSM 185 * @dtim_period: num of beacons before the next DTIM, for PSM
181 * @timestamp: beacon timestamp 186 * @timestamp: beacon timestamp
182 * @beacon_int: beacon interval 187 * @beacon_int: beacon interval
@@ -184,6 +189,9 @@ enum ieee80211_bss_change {
184 * @assoc_ht: association in HT mode 189 * @assoc_ht: association in HT mode
185 * @ht_conf: ht capabilities 190 * @ht_conf: ht capabilities
186 * @ht_bss_conf: ht extended capabilities 191 * @ht_bss_conf: ht extended capabilities
192 * @basic_rates: bitmap of basic rates, each bit stands for an
193 * index into the rate table configured by the driver in
194 * the current band.
187 */ 195 */
188struct ieee80211_bss_conf { 196struct ieee80211_bss_conf {
189 /* association related data */ 197 /* association related data */
@@ -192,10 +200,12 @@ struct ieee80211_bss_conf {
192 /* erp related data */ 200 /* erp related data */
193 bool use_cts_prot; 201 bool use_cts_prot;
194 bool use_short_preamble; 202 bool use_short_preamble;
203 bool use_short_slot;
195 u8 dtim_period; 204 u8 dtim_period;
196 u16 beacon_int; 205 u16 beacon_int;
197 u16 assoc_capability; 206 u16 assoc_capability;
198 u64 timestamp; 207 u64 timestamp;
208 u64 basic_rates;
199 /* ht related data */ 209 /* ht related data */
200 bool assoc_ht; 210 bool assoc_ht;
201 struct ieee80211_ht_info *ht_conf; 211 struct ieee80211_ht_info *ht_conf;
@@ -290,6 +300,9 @@ enum mac80211_tx_control_flags {
290 * (2) driver internal use (if applicable) 300 * (2) driver internal use (if applicable)
291 * (3) TX status information - driver tells mac80211 what happened 301 * (3) TX status information - driver tells mac80211 what happened
292 * 302 *
303 * The TX control's sta pointer is only valid during the ->tx call,
304 * it may be NULL.
305 *
293 * @flags: transmit info flags, defined above 306 * @flags: transmit info flags, defined above
294 * @band: TBD 307 * @band: TBD
295 * @tx_rate_idx: TBD 308 * @tx_rate_idx: TBD
@@ -317,10 +330,11 @@ struct ieee80211_tx_info {
317 330
318 union { 331 union {
319 struct { 332 struct {
333 /* NB: vif can be NULL for injected frames */
320 struct ieee80211_vif *vif; 334 struct ieee80211_vif *vif;
321 struct ieee80211_key_conf *hw_key; 335 struct ieee80211_key_conf *hw_key;
336 struct ieee80211_sta *sta;
322 unsigned long jiffies; 337 unsigned long jiffies;
323 u16 aid;
324 s8 rts_cts_rate_idx, alt_retry_rate_idx; 338 s8 rts_cts_rate_idx, alt_retry_rate_idx;
325 u8 retry_limit; 339 u8 retry_limit;
326 u8 icv_len; 340 u8 icv_len;
@@ -363,6 +377,7 @@ static inline struct ieee80211_tx_info *IEEE80211_SKB_CB(struct sk_buff *skb)
363 * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field) 377 * @RX_FLAG_TSFT: The timestamp passed in the RX status (@mactime field)
364 * is valid. This is useful in monitor mode and necessary for beacon frames 378 * is valid. This is useful in monitor mode and necessary for beacon frames
365 * to enable IBSS merging. 379 * to enable IBSS merging.
380 * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
366 */ 381 */
367enum mac80211_rx_flags { 382enum mac80211_rx_flags {
368 RX_FLAG_MMIC_ERROR = 1<<0, 383 RX_FLAG_MMIC_ERROR = 1<<0,
@@ -373,6 +388,7 @@ enum mac80211_rx_flags {
373 RX_FLAG_FAILED_FCS_CRC = 1<<5, 388 RX_FLAG_FAILED_FCS_CRC = 1<<5,
374 RX_FLAG_FAILED_PLCP_CRC = 1<<6, 389 RX_FLAG_FAILED_PLCP_CRC = 1<<6,
375 RX_FLAG_TSFT = 1<<7, 390 RX_FLAG_TSFT = 1<<7,
391 RX_FLAG_SHORTPRE = 1<<8
376}; 392};
377 393
378/** 394/**
@@ -418,6 +434,11 @@ struct ieee80211_rx_status {
418 * @IEEE80211_CONF_PS: Enable 802.11 power save mode 434 * @IEEE80211_CONF_PS: Enable 802.11 power save mode
419 */ 435 */
420enum ieee80211_conf_flags { 436enum ieee80211_conf_flags {
437 /*
438 * TODO: IEEE80211_CONF_SHORT_SLOT_TIME will be removed once drivers
439 * have been converted to use bss_info_changed() for slot time
440 * configuration
441 */
421 IEEE80211_CONF_SHORT_SLOT_TIME = (1<<0), 442 IEEE80211_CONF_SHORT_SLOT_TIME = (1<<0),
422 IEEE80211_CONF_RADIOTAP = (1<<1), 443 IEEE80211_CONF_RADIOTAP = (1<<1),
423 IEEE80211_CONF_SUPPORT_HT_MODE = (1<<2), 444 IEEE80211_CONF_SUPPORT_HT_MODE = (1<<2),
@@ -461,33 +482,6 @@ struct ieee80211_conf {
461}; 482};
462 483
463/** 484/**
464 * enum ieee80211_if_types - types of 802.11 network interfaces
465 *
466 * @IEEE80211_IF_TYPE_INVALID: invalid interface type, not used
467 * by mac80211 itself
468 * @IEEE80211_IF_TYPE_AP: interface in AP mode.
469 * @IEEE80211_IF_TYPE_MGMT: special interface for communication with hostap
470 * daemon. Drivers should never see this type.
471 * @IEEE80211_IF_TYPE_STA: interface in STA (client) mode.
472 * @IEEE80211_IF_TYPE_IBSS: interface in IBSS (ad-hoc) mode.
473 * @IEEE80211_IF_TYPE_MNTR: interface in monitor (rfmon) mode.
474 * @IEEE80211_IF_TYPE_WDS: interface in WDS mode.
475 * @IEEE80211_IF_TYPE_VLAN: VLAN interface bound to an AP, drivers
476 * will never see this type.
477 * @IEEE80211_IF_TYPE_MESH_POINT: 802.11s mesh point
478 */
479enum ieee80211_if_types {
480 IEEE80211_IF_TYPE_INVALID,
481 IEEE80211_IF_TYPE_AP,
482 IEEE80211_IF_TYPE_STA,
483 IEEE80211_IF_TYPE_IBSS,
484 IEEE80211_IF_TYPE_MESH_POINT,
485 IEEE80211_IF_TYPE_MNTR,
486 IEEE80211_IF_TYPE_WDS,
487 IEEE80211_IF_TYPE_VLAN,
488};
489
490/**
491 * struct ieee80211_vif - per-interface data 485 * struct ieee80211_vif - per-interface data
492 * 486 *
493 * Data in this structure is continually present for driver 487 * Data in this structure is continually present for driver
@@ -498,7 +492,7 @@ enum ieee80211_if_types {
498 * sizeof(void *). 492 * sizeof(void *).
499 */ 493 */
500struct ieee80211_vif { 494struct ieee80211_vif {
501 enum ieee80211_if_types type; 495 enum nl80211_iftype type;
502 /* must be last */ 496 /* must be last */
503 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); 497 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
504}; 498};
@@ -506,7 +500,7 @@ struct ieee80211_vif {
506static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) 500static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
507{ 501{
508#ifdef CONFIG_MAC80211_MESH 502#ifdef CONFIG_MAC80211_MESH
509 return vif->type == IEEE80211_IF_TYPE_MESH_POINT; 503 return vif->type == NL80211_IFTYPE_MESH_POINT;
510#endif 504#endif
511 return false; 505 return false;
512} 506}
@@ -517,7 +511,7 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
517 * @vif: pointer to a driver-use per-interface structure. The pointer 511 * @vif: pointer to a driver-use per-interface structure. The pointer
518 * itself is also used for various functions including 512 * itself is also used for various functions including
519 * ieee80211_beacon_get() and ieee80211_get_buffered_bc(). 513 * ieee80211_beacon_get() and ieee80211_get_buffered_bc().
520 * @type: one of &enum ieee80211_if_types constants. Determines the type of 514 * @type: one of &enum nl80211_iftype constants. Determines the type of
521 * added/removed interface. 515 * added/removed interface.
522 * @mac_addr: pointer to MAC address of the interface. This pointer is valid 516 * @mac_addr: pointer to MAC address of the interface. This pointer is valid
523 * until the interface is removed (i.e. it cannot be used after 517 * until the interface is removed (i.e. it cannot be used after
@@ -533,7 +527,7 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
533 * in pure monitor mode. 527 * in pure monitor mode.
534 */ 528 */
535struct ieee80211_if_init_conf { 529struct ieee80211_if_init_conf {
536 enum ieee80211_if_types type; 530 enum nl80211_iftype type;
537 struct ieee80211_vif *vif; 531 struct ieee80211_vif *vif;
538 void *mac_addr; 532 void *mac_addr;
539}; 533};
@@ -662,6 +656,33 @@ enum set_key_cmd {
662}; 656};
663 657
664/** 658/**
659 * struct ieee80211_sta - station table entry
660 *
661 * A station table entry represents a station we are possibly
662 * communicating with. Since stations are RCU-managed in
663 * mac80211, any ieee80211_sta pointer you get access to must
664 * either be protected by rcu_read_lock() explicitly or implicitly,
665 * or you must take good care to not use such a pointer after a
666 * call to your sta_notify callback that removed it.
667 *
668 * @addr: MAC address
669 * @aid: AID we assigned to the station if we're an AP
670 * @supp_rates: Bitmap of supported rates (per band)
671 * @ht_info: HT capabilities of this STA
672 * @drv_priv: data area for driver use, will always be aligned to
673 * sizeof(void *), size is determined in hw information.
674 */
675struct ieee80211_sta {
676 u64 supp_rates[IEEE80211_NUM_BANDS];
677 u8 addr[ETH_ALEN];
678 u16 aid;
679 struct ieee80211_ht_info ht_info;
680
681 /* must be last */
682 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
683};
684
685/**
665 * enum sta_notify_cmd - sta notify command 686 * enum sta_notify_cmd - sta notify command
666 * 687 *
667 * Used with the sta_notify() callback in &struct ieee80211_ops, this 688 * Used with the sta_notify() callback in &struct ieee80211_ops, this
@@ -805,6 +826,8 @@ enum ieee80211_hw_flags {
805 * 826 *
806 * @vif_data_size: size (in bytes) of the drv_priv data area 827 * @vif_data_size: size (in bytes) of the drv_priv data area
807 * within &struct ieee80211_vif. 828 * within &struct ieee80211_vif.
829 * @sta_data_size: size (in bytes) of the drv_priv data area
830 * within &struct ieee80211_sta.
808 */ 831 */
809struct ieee80211_hw { 832struct ieee80211_hw {
810 struct ieee80211_conf conf; 833 struct ieee80211_conf conf;
@@ -816,12 +839,15 @@ struct ieee80211_hw {
816 unsigned int extra_tx_headroom; 839 unsigned int extra_tx_headroom;
817 int channel_change_time; 840 int channel_change_time;
818 int vif_data_size; 841 int vif_data_size;
842 int sta_data_size;
819 u16 queues; 843 u16 queues;
820 u16 ampdu_queues; 844 u16 ampdu_queues;
821 u16 max_listen_interval; 845 u16 max_listen_interval;
822 s8 max_signal; 846 s8 max_signal;
823}; 847};
824 848
849struct ieee80211_hw *wiphy_to_hw(struct wiphy *wiphy);
850
825/** 851/**
826 * SET_IEEE80211_DEV - set device for 802.11 hardware 852 * SET_IEEE80211_DEV - set device for 802.11 hardware
827 * 853 *
@@ -1097,7 +1123,7 @@ enum ieee80211_ampdu_mlme_action {
1097 * This callback must be implemented and atomic. 1123 * This callback must be implemented and atomic.
1098 * 1124 *
1099 * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit 1125 * @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
1100 * must be set or cleared for a given AID. Must be atomic. 1126 * must be set or cleared for a given STA. Must be atomic.
1101 * 1127 *
1102 * @set_key: See the section "Hardware crypto acceleration" 1128 * @set_key: See the section "Hardware crypto acceleration"
1103 * This callback can sleep, and is only called between add_interface 1129 * This callback can sleep, and is only called between add_interface
@@ -1111,7 +1137,9 @@ enum ieee80211_ampdu_mlme_action {
1111 * @hw_scan: Ask the hardware to service the scan request, no need to start 1137 * @hw_scan: Ask the hardware to service the scan request, no need to start
1112 * the scan state machine in stack. The scan must honour the channel 1138 * the scan state machine in stack. The scan must honour the channel
1113 * configuration done by the regulatory agent in the wiphy's registered 1139 * configuration done by the regulatory agent in the wiphy's registered
1114 * bands. 1140 * bands. When the scan finishes, ieee80211_scan_completed() must be
1141 * called; note that it also must be called when the scan cannot finish
1142 * because the hardware is turned off! Anything else is a bug!
1115 * 1143 *
1116 * @get_stats: return low-level statistics 1144 * @get_stats: return low-level statistics
1117 * 1145 *
@@ -1131,7 +1159,7 @@ enum ieee80211_ampdu_mlme_action {
1131 * of assocaited station or AP. 1159 * of assocaited station or AP.
1132 * 1160 *
1133 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), 1161 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
1134 * bursting) for a hardware TX queue. Must be atomic. 1162 * bursting) for a hardware TX queue.
1135 * 1163 *
1136 * @get_tx_stats: Get statistics of the current TX queue status. This is used 1164 * @get_tx_stats: Get statistics of the current TX queue status. This is used
1137 * to get number of currently queued packets (queue length), maximum queue 1165 * to get number of currently queued packets (queue length), maximum queue
@@ -1181,7 +1209,8 @@ struct ieee80211_ops {
1181 unsigned int changed_flags, 1209 unsigned int changed_flags,
1182 unsigned int *total_flags, 1210 unsigned int *total_flags,
1183 int mc_count, struct dev_addr_list *mc_list); 1211 int mc_count, struct dev_addr_list *mc_list);
1184 int (*set_tim)(struct ieee80211_hw *hw, int aid, int set); 1212 int (*set_tim)(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
1213 bool set);
1185 int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1214 int (*set_key)(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1186 const u8 *local_address, const u8 *address, 1215 const u8 *local_address, const u8 *address,
1187 struct ieee80211_key_conf *key); 1216 struct ieee80211_key_conf *key);
@@ -1198,7 +1227,7 @@ struct ieee80211_ops {
1198 int (*set_retry_limit)(struct ieee80211_hw *hw, 1227 int (*set_retry_limit)(struct ieee80211_hw *hw,
1199 u32 short_retry, u32 long_retr); 1228 u32 short_retry, u32 long_retr);
1200 void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1229 void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1201 enum sta_notify_cmd, const u8 *addr); 1230 enum sta_notify_cmd, struct ieee80211_sta *sta);
1202 int (*conf_tx)(struct ieee80211_hw *hw, u16 queue, 1231 int (*conf_tx)(struct ieee80211_hw *hw, u16 queue,
1203 const struct ieee80211_tx_queue_params *params); 1232 const struct ieee80211_tx_queue_params *params);
1204 int (*get_tx_stats)(struct ieee80211_hw *hw, 1233 int (*get_tx_stats)(struct ieee80211_hw *hw,
@@ -1208,7 +1237,7 @@ struct ieee80211_ops {
1208 int (*tx_last_beacon)(struct ieee80211_hw *hw); 1237 int (*tx_last_beacon)(struct ieee80211_hw *hw);
1209 int (*ampdu_action)(struct ieee80211_hw *hw, 1238 int (*ampdu_action)(struct ieee80211_hw *hw,
1210 enum ieee80211_ampdu_mlme_action action, 1239 enum ieee80211_ampdu_mlme_action action,
1211 const u8 *addr, u16 tid, u16 *ssn); 1240 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
1212}; 1241};
1213 1242
1214/** 1243/**
@@ -1557,16 +1586,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
1557unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); 1586unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb);
1558 1587
1559/** 1588/**
1560 * ieee80211_get_hdrlen - get header length from frame control
1561 *
1562 * This function returns the 802.11 header length in bytes (not including
1563 * encryption headers.)
1564 *
1565 * @fc: the frame control field (in CPU endianness)
1566 */
1567int ieee80211_get_hdrlen(u16 fc);
1568
1569/**
1570 * ieee80211_hdrlen - get header length in bytes from frame control 1589 * ieee80211_hdrlen - get header length in bytes from frame control
1571 * @fc: frame control field in little-endian format 1590 * @fc: frame control field in little-endian format
1572 */ 1591 */
@@ -1608,6 +1627,16 @@ void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue);
1608void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue); 1627void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue);
1609 1628
1610/** 1629/**
1630 * ieee80211_queue_stopped - test status of the queue
1631 * @hw: pointer as obtained from ieee80211_alloc_hw().
1632 * @queue: queue number (counted from zero).
1633 *
1634 * Drivers should use this function instead of netif_stop_queue.
1635 */
1636
1637int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue);
1638
1639/**
1611 * ieee80211_stop_queues - stop all queues 1640 * ieee80211_stop_queues - stop all queues
1612 * @hw: pointer as obtained from ieee80211_alloc_hw(). 1641 * @hw: pointer as obtained from ieee80211_alloc_hw().
1613 * 1642 *
@@ -1758,4 +1787,85 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw, const u8 *ra,
1758 */ 1787 */
1759void ieee80211_notify_mac(struct ieee80211_hw *hw, 1788void ieee80211_notify_mac(struct ieee80211_hw *hw,
1760 enum ieee80211_notification_types notif_type); 1789 enum ieee80211_notification_types notif_type);
1790
1791/**
1792 * ieee80211_find_sta - find a station
1793 *
1794 * @hw: pointer as obtained from ieee80211_alloc_hw()
1795 * @addr: station's address
1796 *
1797 * This function must be called under RCU lock and the
1798 * resulting pointer is only valid under RCU lock as well.
1799 */
1800struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
1801 const u8 *addr);
1802
1803
1804/* Rate control API */
1805/**
1806 * struct rate_selection - rate information for/from rate control algorithms
1807 *
1808 * @rate_idx: selected transmission rate index
1809 * @nonerp_idx: Non-ERP rate to use instead if ERP cannot be used
1810 * @probe_idx: rate for probing (or -1)
1811 * @max_rate_idx: maximum rate index that can be used, this is
1812 * input to the algorithm and will be enforced
1813 */
1814struct rate_selection {
1815 s8 rate_idx, nonerp_idx, probe_idx, max_rate_idx;
1816};
1817
1818struct rate_control_ops {
1819 struct module *module;
1820 const char *name;
1821 void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
1822 void (*clear)(void *priv);
1823 void (*free)(void *priv);
1824
1825 void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
1826 void (*rate_init)(void *priv, struct ieee80211_supported_band *sband,
1827 struct ieee80211_sta *sta, void *priv_sta);
1828 void (*free_sta)(void *priv, struct ieee80211_sta *sta,
1829 void *priv_sta);
1830
1831 void (*tx_status)(void *priv, struct ieee80211_supported_band *sband,
1832 struct ieee80211_sta *sta, void *priv_sta,
1833 struct sk_buff *skb);
1834 void (*get_rate)(void *priv, struct ieee80211_supported_band *sband,
1835 struct ieee80211_sta *sta, void *priv_sta,
1836 struct sk_buff *skb,
1837 struct rate_selection *sel);
1838
1839 void (*add_sta_debugfs)(void *priv, void *priv_sta,
1840 struct dentry *dir);
1841 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
1842};
1843
1844static inline int rate_supported(struct ieee80211_sta *sta,
1845 enum ieee80211_band band,
1846 int index)
1847{
1848 return (sta == NULL || sta->supp_rates[band] & BIT(index));
1849}
1850
1851static inline s8
1852rate_lowest_index(struct ieee80211_supported_band *sband,
1853 struct ieee80211_sta *sta)
1854{
1855 int i;
1856
1857 for (i = 0; i < sband->n_bitrates; i++)
1858 if (rate_supported(sta, sband->band, i))
1859 return i;
1860
1861 /* warn when we cannot find a rate. */
1862 WARN_ON(1);
1863
1864 return 0;
1865}
1866
1867
1868int ieee80211_rate_control_register(struct rate_control_ops *ops);
1869void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
1870
1761#endif /* MAC80211_H */ 1871#endif /* MAC80211_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 208fe5a38546..3643bbb8e585 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -119,9 +119,6 @@
119 * Nested Attributes Construction: 119 * Nested Attributes Construction:
120 * nla_nest_start(skb, type) start a nested attribute 120 * nla_nest_start(skb, type) start a nested attribute
121 * nla_nest_end(skb, nla) finalize a nested attribute 121 * nla_nest_end(skb, nla) finalize a nested attribute
122 * nla_nest_compat_start(skb, type, start a nested compat attribute
123 * len, data)
124 * nla_nest_compat_end(skb, type) finalize a nested compat attribute
125 * nla_nest_cancel(skb, nla) cancel nested attribute construction 122 * nla_nest_cancel(skb, nla) cancel nested attribute construction
126 * 123 *
127 * Attribute Length Calculations: 124 * Attribute Length Calculations:
@@ -156,7 +153,6 @@
156 * nla_find_nested() find attribute in nested attributes 153 * nla_find_nested() find attribute in nested attributes
157 * nla_parse() parse and validate stream of attrs 154 * nla_parse() parse and validate stream of attrs
158 * nla_parse_nested() parse nested attribuets 155 * nla_parse_nested() parse nested attribuets
159 * nla_parse_nested_compat() parse nested compat attributes
160 * nla_for_each_attr() loop over all attributes 156 * nla_for_each_attr() loop over all attributes
161 * nla_for_each_nested() loop over the nested attributes 157 * nla_for_each_nested() loop over the nested attributes
162 *========================================================================= 158 *=========================================================================
@@ -752,39 +748,6 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
752} 748}
753 749
754/** 750/**
755 * nla_parse_nested_compat - parse nested compat attributes
756 * @tb: destination array with maxtype+1 elements
757 * @maxtype: maximum attribute type to be expected
758 * @nla: attribute containing the nested attributes
759 * @data: pointer to point to contained structure
760 * @len: length of contained structure
761 * @policy: validation policy
762 *
763 * Parse a nested compat attribute. The compat attribute contains a structure
764 * and optionally a set of nested attributes. On success the data pointer
765 * points to the nested data and tb contains the parsed attributes
766 * (see nla_parse).
767 */
768static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype,
769 struct nlattr *nla,
770 const struct nla_policy *policy,
771 int len)
772{
773 int nested_len = nla_len(nla) - NLA_ALIGN(len);
774
775 if (nested_len < 0)
776 return -EINVAL;
777 if (nested_len >= nla_attr_size(0))
778 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
779 nested_len, policy);
780 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
781 return 0;
782}
783
784#define nla_parse_nested_compat(tb, maxtype, nla, policy, data, len) \
785({ data = nla_len(nla) >= len ? nla_data(nla) : NULL; \
786 __nla_parse_nested_compat(tb, maxtype, nla, policy, len); })
787/**
788 * nla_put_u8 - Add a u8 netlink attribute to a socket buffer 751 * nla_put_u8 - Add a u8 netlink attribute to a socket buffer
789 * @skb: socket buffer to add attribute to 752 * @skb: socket buffer to add attribute to
790 * @attrtype: attribute type 753 * @attrtype: attribute type
@@ -1031,51 +994,6 @@ static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
1031} 994}
1032 995
1033/** 996/**
1034 * nla_nest_compat_start - Start a new level of nested compat attributes
1035 * @skb: socket buffer to add attributes to
1036 * @attrtype: attribute type of container
1037 * @attrlen: length of structure
1038 * @data: pointer to structure
1039 *
1040 * Start a nested compat attribute that contains both a structure and
1041 * a set of nested attributes.
1042 *
1043 * Returns the container attribute
1044 */
1045static inline struct nlattr *nla_nest_compat_start(struct sk_buff *skb,
1046 int attrtype, int attrlen,
1047 const void *data)
1048{
1049 struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
1050
1051 if (nla_put(skb, attrtype, attrlen, data) < 0)
1052 return NULL;
1053 if (nla_nest_start(skb, attrtype) == NULL) {
1054 nlmsg_trim(skb, start);
1055 return NULL;
1056 }
1057 return start;
1058}
1059
1060/**
1061 * nla_nest_compat_end - Finalize nesting of compat attributes
1062 * @skb: socket buffer the attributes are stored in
1063 * @start: container attribute
1064 *
1065 * Corrects the container attribute header to include the all
1066 * appeneded attributes.
1067 *
1068 * Returns the total data length of the skb.
1069 */
1070static inline int nla_nest_compat_end(struct sk_buff *skb, struct nlattr *start)
1071{
1072 struct nlattr *nest = (void *)start + NLMSG_ALIGN(start->nla_len);
1073
1074 start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
1075 return nla_nest_end(skb, nest);
1076}
1077
1078/**
1079 * nla_nest_cancel - Cancel nesting of attributes 997 * nla_nest_cancel - Cancel nesting of attributes
1080 * @skb: socket buffer the message is stored in 998 * @skb: socket buffer the message is stored in
1081 * @start: container attribute 999 * @start: container attribute
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
new file mode 100644
index 000000000000..d4e72508e145
--- /dev/null
+++ b/include/net/phonet/phonet.h
@@ -0,0 +1,112 @@
1/*
2 * File: af_phonet.h
3 *
4 * Phonet sockets kernel definitions
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef AF_PHONET_H
24#define AF_PHONET_H
25
26/*
27 * The lower layers may not require more space, ever. Make sure it's
28 * enough.
29 */
30#define MAX_PHONET_HEADER 8
31
32/*
33 * Every Phonet* socket has this structure first in its
34 * protocol-specific structure under name c.
35 */
36struct pn_sock {
37 struct sock sk;
38 u16 sobject;
39 u8 resource;
40};
41
42static inline struct pn_sock *pn_sk(struct sock *sk)
43{
44 return (struct pn_sock *)sk;
45}
46
47extern const struct proto_ops phonet_dgram_ops;
48
49struct sock *pn_find_sock_by_sa(const struct sockaddr_pn *sa);
50void phonet_get_local_port_range(int *min, int *max);
51void pn_sock_hash(struct sock *sk);
52void pn_sock_unhash(struct sock *sk);
53int pn_sock_get_port(struct sock *sk, unsigned short sport);
54
55int pn_skb_send(struct sock *sk, struct sk_buff *skb,
56 const struct sockaddr_pn *target);
57
58static inline struct phonethdr *pn_hdr(struct sk_buff *skb)
59{
60 return (struct phonethdr *)skb_network_header(skb);
61}
62
63static inline struct phonetmsg *pn_msg(struct sk_buff *skb)
64{
65 return (struct phonetmsg *)skb_transport_header(skb);
66}
67
68/*
69 * Get the other party's sockaddr from received skb. The skb begins
70 * with a Phonet header.
71 */
72static inline
73void pn_skb_get_src_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
74{
75 struct phonethdr *ph = pn_hdr(skb);
76 u16 obj = pn_object(ph->pn_sdev, ph->pn_sobj);
77
78 sa->spn_family = AF_PHONET;
79 pn_sockaddr_set_object(sa, obj);
80 pn_sockaddr_set_resource(sa, ph->pn_res);
81 memset(sa->spn_zero, 0, sizeof(sa->spn_zero));
82}
83
84static inline
85void pn_skb_get_dst_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa)
86{
87 struct phonethdr *ph = pn_hdr(skb);
88 u16 obj = pn_object(ph->pn_rdev, ph->pn_robj);
89
90 sa->spn_family = AF_PHONET;
91 pn_sockaddr_set_object(sa, obj);
92 pn_sockaddr_set_resource(sa, ph->pn_res);
93 memset(sa->spn_zero, 0, sizeof(sa->spn_zero));
94}
95
96/* Protocols in Phonet protocol family. */
97struct phonet_protocol {
98 const struct proto_ops *ops;
99 struct proto *prot;
100 int sock_type;
101};
102
103int phonet_proto_register(int protocol, struct phonet_protocol *pp);
104void phonet_proto_unregister(int protocol, struct phonet_protocol *pp);
105
106int phonet_sysctl_init(void);
107void phonet_sysctl_exit(void);
108void phonet_netlink_register(void);
109int isi_register(void);
110void isi_unregister(void);
111
112#endif
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h
new file mode 100644
index 000000000000..bbd2a836e04c
--- /dev/null
+++ b/include/net/phonet/pn_dev.h
@@ -0,0 +1,50 @@
1/*
2 * File: pn_dev.h
3 *
4 * Phonet network device
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22
23#ifndef PN_DEV_H
24#define PN_DEV_H
25
26struct phonet_device_list {
27 struct list_head list;
28 spinlock_t lock;
29};
30
31extern struct phonet_device_list pndevs;
32
33struct phonet_device {
34 struct list_head list;
35 struct net_device *netdev;
36 DECLARE_BITMAP(addrs, 64);
37};
38
39void phonet_device_init(void);
40void phonet_device_exit(void);
41struct net_device *phonet_device_get(struct net *net);
42
43int phonet_address_add(struct net_device *dev, u8 addr);
44int phonet_address_del(struct net_device *dev, u8 addr);
45u8 phonet_address_get(struct net_device *dev, u8 addr);
46int phonet_address_lookup(u8 addr);
47
48#define PN_NO_ADDR 0xff
49
50#endif
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index b786a5b09253..4082f39f5079 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -90,10 +90,7 @@ extern void __qdisc_run(struct Qdisc *q);
90 90
91static inline void qdisc_run(struct Qdisc *q) 91static inline void qdisc_run(struct Qdisc *q)
92{ 92{
93 struct netdev_queue *txq = q->dev_queue; 93 if (!test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
94
95 if (!netif_tx_queue_stopped(txq) &&
96 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state))
97 __qdisc_run(q); 94 __qdisc_run(q);
98} 95}
99 96
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e5569625d2a5..3b983e8a0555 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -52,7 +52,7 @@ struct Qdisc
52 u32 parent; 52 u32 parent;
53 atomic_t refcnt; 53 atomic_t refcnt;
54 unsigned long state; 54 unsigned long state;
55 struct sk_buff *gso_skb; 55 struct sk_buff_head requeue;
56 struct sk_buff_head q; 56 struct sk_buff_head q;
57 struct netdev_queue *dev_queue; 57 struct netdev_queue *dev_queue;
58 struct Qdisc *next_sched; 58 struct Qdisc *next_sched;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 17b932b8a55a..703305d00365 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -406,10 +406,7 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
406 406
407/* A macro to walk a list of skbs. */ 407/* A macro to walk a list of skbs. */
408#define sctp_skb_for_each(pos, head, tmp) \ 408#define sctp_skb_for_each(pos, head, tmp) \
409for (pos = (head)->next;\ 409 skb_queue_walk_safe(head, pos, tmp)
410 tmp = (pos)->next, pos != ((struct sk_buff *)(head));\
411 pos = tmp)
412
413 410
414/* A helper to append an entire skb list (list) to another (head). */ 411/* A helper to append an entire skb list (list) to another (head). */
415static inline void sctp_skb_list_tail(struct sk_buff_head *list, 412static inline void sctp_skb_list_tail(struct sk_buff_head *list,
@@ -420,10 +417,7 @@ static inline void sctp_skb_list_tail(struct sk_buff_head *list,
420 sctp_spin_lock_irqsave(&head->lock, flags); 417 sctp_spin_lock_irqsave(&head->lock, flags);
421 sctp_spin_lock(&list->lock); 418 sctp_spin_lock(&list->lock);
422 419
423 list_splice((struct list_head *)list, (struct list_head *)head->prev); 420 skb_queue_splice_tail_init(list, head);
424
425 head->qlen += list->qlen;
426 list->qlen = 0;
427 421
428 sctp_spin_unlock(&list->lock); 422 sctp_spin_unlock(&list->lock);
429 sctp_spin_unlock_irqrestore(&head->lock, flags); 423 sctp_spin_unlock_irqrestore(&head->lock, flags);
diff --git a/include/net/sock.h b/include/net/sock.h
index 06c5259aff30..75a312d3888a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -532,6 +532,7 @@ struct proto {
532 int (*getsockopt)(struct sock *sk, int level, 532 int (*getsockopt)(struct sock *sk, int level,
533 int optname, char __user *optval, 533 int optname, char __user *optval,
534 int __user *option); 534 int __user *option);
535#ifdef CONFIG_COMPAT
535 int (*compat_setsockopt)(struct sock *sk, 536 int (*compat_setsockopt)(struct sock *sk,
536 int level, 537 int level,
537 int optname, char __user *optval, 538 int optname, char __user *optval,
@@ -540,6 +541,7 @@ struct proto {
540 int level, 541 int level,
541 int optname, char __user *optval, 542 int optname, char __user *optval,
542 int __user *option); 543 int __user *option);
544#endif
543 int (*sendmsg)(struct kiocb *iocb, struct sock *sk, 545 int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
544 struct msghdr *msg, size_t len); 546 struct msghdr *msg, size_t len);
545 int (*recvmsg)(struct kiocb *iocb, struct sock *sk, 547 int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
new file mode 100644
index 000000000000..6abb3ed3ebf7
--- /dev/null
+++ b/include/net/tc_act/tc_skbedit.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
18 */
19
20#ifndef __NET_TC_SKBEDIT_H
21#define __NET_TC_SKBEDIT_H
22
23#include <net/act_api.h>
24
25struct tcf_skbedit {
26 struct tcf_common common;
27 u32 flags;
28 u32 priority;
29 u16 queue_mapping;
30};
31#define to_skbedit(pc) \
32 container_of(pc, struct tcf_skbedit, common)
33
34#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8983386356a5..12c9b4fec040 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -472,6 +472,8 @@ extern void tcp_send_delayed_ack(struct sock *sk);
472 472
473/* tcp_input.c */ 473/* tcp_input.c */
474extern void tcp_cwnd_application_limited(struct sock *sk); 474extern void tcp_cwnd_application_limited(struct sock *sk);
475extern void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
476 struct sk_buff *skb);
475 477
476/* tcp_timer.c */ 478/* tcp_timer.c */
477extern void tcp_init_xmit_timers(struct sock *); 479extern void tcp_init_xmit_timers(struct sock *);
@@ -1039,13 +1041,12 @@ static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1039{ 1041{
1040 tp->lost_skb_hint = NULL; 1042 tp->lost_skb_hint = NULL;
1041 tp->scoreboard_skb_hint = NULL; 1043 tp->scoreboard_skb_hint = NULL;
1042 tp->retransmit_skb_hint = NULL;
1043 tp->forward_skb_hint = NULL;
1044} 1044}
1045 1045
1046static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) 1046static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1047{ 1047{
1048 tcp_clear_retrans_hints_partial(tp); 1048 tcp_clear_retrans_hints_partial(tp);
1049 tp->retransmit_skb_hint = NULL;
1049} 1050}
1050 1051
1051/* MD5 Signature */ 1052/* MD5 Signature */
@@ -1180,49 +1181,45 @@ static inline void tcp_write_queue_purge(struct sock *sk)
1180 1181
1181static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) 1182static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
1182{ 1183{
1183 struct sk_buff *skb = sk->sk_write_queue.next; 1184 return skb_peek(&sk->sk_write_queue);
1184 if (skb == (struct sk_buff *) &sk->sk_write_queue)
1185 return NULL;
1186 return skb;
1187} 1185}
1188 1186
1189static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) 1187static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
1190{ 1188{
1191 struct sk_buff *skb = sk->sk_write_queue.prev; 1189 return skb_peek_tail(&sk->sk_write_queue);
1192 if (skb == (struct sk_buff *) &sk->sk_write_queue)
1193 return NULL;
1194 return skb;
1195} 1190}
1196 1191
1197static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) 1192static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
1198{ 1193{
1199 return skb->next; 1194 return skb_queue_next(&sk->sk_write_queue, skb);
1200} 1195}
1201 1196
1202#define tcp_for_write_queue(skb, sk) \ 1197#define tcp_for_write_queue(skb, sk) \
1203 for (skb = (sk)->sk_write_queue.next; \ 1198 skb_queue_walk(&(sk)->sk_write_queue, skb)
1204 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
1205 skb = skb->next)
1206 1199
1207#define tcp_for_write_queue_from(skb, sk) \ 1200#define tcp_for_write_queue_from(skb, sk) \
1208 for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ 1201 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1209 skb = skb->next)
1210 1202
1211#define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 1203#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1212 for (tmp = skb->next; \ 1204 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1213 (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
1214 skb = tmp, tmp = skb->next)
1215 1205
1216static inline struct sk_buff *tcp_send_head(struct sock *sk) 1206static inline struct sk_buff *tcp_send_head(struct sock *sk)
1217{ 1207{
1218 return sk->sk_send_head; 1208 return sk->sk_send_head;
1219} 1209}
1220 1210
1211static inline bool tcp_skb_is_last(const struct sock *sk,
1212 const struct sk_buff *skb)
1213{
1214 return skb_queue_is_last(&sk->sk_write_queue, skb);
1215}
1216
1221static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) 1217static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
1222{ 1218{
1223 sk->sk_send_head = skb->next; 1219 if (tcp_skb_is_last(sk, skb))
1224 if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
1225 sk->sk_send_head = NULL; 1220 sk->sk_send_head = NULL;
1221 else
1222 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1226} 1223}
1227 1224
1228static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) 1225static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
@@ -1267,12 +1264,12 @@ static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1267 __skb_queue_after(&sk->sk_write_queue, skb, buff); 1264 __skb_queue_after(&sk->sk_write_queue, skb, buff);
1268} 1265}
1269 1266
1270/* Insert skb between prev and next on the write queue of sk. */ 1267/* Insert new before skb on the write queue of sk. */
1271static inline void tcp_insert_write_queue_before(struct sk_buff *new, 1268static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1272 struct sk_buff *skb, 1269 struct sk_buff *skb,
1273 struct sock *sk) 1270 struct sock *sk)
1274{ 1271{
1275 __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); 1272 __skb_queue_before(&sk->sk_write_queue, skb, new);
1276 1273
1277 if (sk->sk_send_head == skb) 1274 if (sk->sk_send_head == skb)
1278 sk->sk_send_head = new; 1275 sk->sk_send_head = new;
@@ -1283,12 +1280,6 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1283 __skb_unlink(skb, &sk->sk_write_queue); 1280 __skb_unlink(skb, &sk->sk_write_queue);
1284} 1281}
1285 1282
1286static inline int tcp_skb_is_last(const struct sock *sk,
1287 const struct sk_buff *skb)
1288{
1289 return skb->next == (struct sk_buff *)&sk->sk_write_queue;
1290}
1291
1292static inline int tcp_write_queue_empty(struct sock *sk) 1283static inline int tcp_write_queue_empty(struct sock *sk)
1293{ 1284{
1294 return skb_queue_empty(&sk->sk_write_queue); 1285 return skb_queue_empty(&sk->sk_write_queue);
diff --git a/include/net/wireless.h b/include/net/wireless.h
index 9324f8dd183e..721efb363db7 100644
--- a/include/net/wireless.h
+++ b/include/net/wireless.h
@@ -60,6 +60,7 @@ enum ieee80211_channel_flags {
60 * with cfg80211. 60 * with cfg80211.
61 * 61 *
62 * @center_freq: center frequency in MHz 62 * @center_freq: center frequency in MHz
63 * @max_bandwidth: maximum allowed bandwidth for this channel, in MHz
63 * @hw_value: hardware-specific value for the channel 64 * @hw_value: hardware-specific value for the channel
64 * @flags: channel flags from &enum ieee80211_channel_flags. 65 * @flags: channel flags from &enum ieee80211_channel_flags.
65 * @orig_flags: channel flags at registration time, used by regulatory 66 * @orig_flags: channel flags at registration time, used by regulatory
@@ -73,6 +74,7 @@ enum ieee80211_channel_flags {
73struct ieee80211_channel { 74struct ieee80211_channel {
74 enum ieee80211_band band; 75 enum ieee80211_band band;
75 u16 center_freq; 76 u16 center_freq;
77 u8 max_bandwidth;
76 u16 hw_value; 78 u16 hw_value;
77 u32 flags; 79 u32 flags;
78 int max_antenna_gain; 80 int max_antenna_gain;
@@ -178,6 +180,7 @@ struct ieee80211_supported_band {
178 * struct wiphy - wireless hardware description 180 * struct wiphy - wireless hardware description
179 * @idx: the wiphy index assigned to this item 181 * @idx: the wiphy index assigned to this item
180 * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name> 182 * @class_dev: the class device representing /sys/class/ieee80211/<wiphy-name>
183 * @reg_notifier: the driver's regulatory notification callback
181 */ 184 */
182struct wiphy { 185struct wiphy {
183 /* assign these fields before you register the wiphy */ 186 /* assign these fields before you register the wiphy */
@@ -185,6 +188,9 @@ struct wiphy {
185 /* permanent MAC address */ 188 /* permanent MAC address */
186 u8 perm_addr[ETH_ALEN]; 189 u8 perm_addr[ETH_ALEN];
187 190
191 /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */
192 u16 interface_modes;
193
188 /* If multiple wiphys are registered and you're handed e.g. 194 /* If multiple wiphys are registered and you're handed e.g.
189 * a regular netdev with assigned ieee80211_ptr, you won't 195 * a regular netdev with assigned ieee80211_ptr, you won't
190 * know whether it points to a wiphy your driver has registered 196 * know whether it points to a wiphy your driver has registered
@@ -194,6 +200,9 @@ struct wiphy {
194 200
195 struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS]; 201 struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS];
196 202
203 /* Lets us get back the wiphy on the callback */
204 int (*reg_notifier)(struct wiphy *wiphy, enum reg_set_by setby);
205
197 /* fields below are read-only, assigned by cfg80211 */ 206 /* fields below are read-only, assigned by cfg80211 */
198 207
199 /* the item in /sys/class/ieee80211/ points to this, 208 /* the item in /sys/class/ieee80211/ points to this,
@@ -214,9 +223,11 @@ struct wiphy {
214 * the netdev.) 223 * the netdev.)
215 * 224 *
216 * @wiphy: pointer to hardware description 225 * @wiphy: pointer to hardware description
226 * @iftype: interface type
217 */ 227 */
218struct wireless_dev { 228struct wireless_dev {
219 struct wiphy *wiphy; 229 struct wiphy *wiphy;
230 enum nl80211_iftype iftype;
220 231
221 /* private to the generic wireless code */ 232 /* private to the generic wireless code */
222 struct list_head list; 233 struct list_head list;
@@ -319,7 +330,6 @@ extern int ieee80211_frequency_to_channel(int freq);
319 */ 330 */
320extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy, 331extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
321 int freq); 332 int freq);
322
323/** 333/**
324 * ieee80211_get_channel - get channel struct from wiphy for specified frequency 334 * ieee80211_get_channel - get channel struct from wiphy for specified frequency
325 */ 335 */
@@ -328,4 +338,57 @@ ieee80211_get_channel(struct wiphy *wiphy, int freq)
328{ 338{
329 return __ieee80211_get_channel(wiphy, freq); 339 return __ieee80211_get_channel(wiphy, freq);
330} 340}
341
342/**
343 * __regulatory_hint - hint to the wireless core a regulatory domain
344 * @wiphy: if a driver is providing the hint this is the driver's very
345 * own &struct wiphy
346 * @alpha2: the ISO/IEC 3166 alpha2 being claimed the regulatory domain
347 * should be in. If @rd is set this should be NULL
348 * @rd: a complete regulatory domain, if passed the caller need not worry
349 * about freeing it
350 *
351 * The Wireless subsystem can use this function to hint to the wireless core
352 * what it believes should be the current regulatory domain by
353 * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory
354 * domain should be in or by providing a completely build regulatory domain.
355 *
356 * Returns -EALREADY if *a regulatory domain* has already been set. Note that
357 * this could be by another driver. It is safe for drivers to continue if
358 * -EALREADY is returned, if drivers are not capable of world roaming they
359 * should not register more channels than they support. Right now we only
360 * support listening to the first driver hint. If the driver is capable
361 * of world roaming but wants to respect its own EEPROM mappings for
362 * specific regulatory domains it should register the @reg_notifier callback
363 * on the &struct wiphy. Returns 0 if the hint went through fine or through an
364 * intersection operation. Otherwise a standard error code is returned.
365 *
366 */
367extern int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by,
368 const char *alpha2, struct ieee80211_regdomain *rd);
369/**
370 * regulatory_hint - driver hint to the wireless core a regulatory domain
371 * @wiphy: the driver's very own &struct wiphy
372 * @alpha2: the ISO/IEC 3166 alpha2 the driver claims its regulatory domain
373 * should be in. If @rd is set this should be NULL. Note that if you
374 * set this to NULL you should still set rd->alpha2 to some accepted
375 * alpha2.
376 * @rd: a complete regulatory domain provided by the driver. If passed
377 * the driver does not need to worry about freeing it.
378 *
379 * Wireless drivers can use this function to hint to the wireless core
380 * what it believes should be the current regulatory domain by
381 * giving it an ISO/IEC 3166 alpha2 country code it knows its regulatory
382 * domain should be in or by providing a completely build regulatory domain.
383 * If the driver provides an ISO/IEC 3166 alpha2 userspace will be queried
384 * for a regulatory domain structure for the respective country. If
385 * a regulatory domain is build and passed you should set the alpha2
386 * if possible, otherwise set it to the special value of "99" which tells
387 * the wireless core it is unknown. If you pass a built regulatory domain
388 * and we return non zero you are in charge of kfree()'ing the structure.
389 *
390 * See __regulatory_hint() documentation for possible return values.
391 */
392extern int regulatory_hint(struct wiphy *wiphy,
393 const char *alpha2, struct ieee80211_regdomain *rd);
331#endif /* __NET_WIRELESS_H */ 394#endif /* __NET_WIRELESS_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 2933d7474a79..48630b266593 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -120,9 +120,11 @@ extern struct mutex xfrm_cfg_mutex;
120/* Full description of state of transformer. */ 120/* Full description of state of transformer. */
121struct xfrm_state 121struct xfrm_state
122{ 122{
123 /* Note: bydst is re-used during gc */
124 struct list_head all; 123 struct list_head all;
125 struct hlist_node bydst; 124 union {
125 struct list_head gclist;
126 struct hlist_node bydst;
127 };
126 struct hlist_node bysrc; 128 struct hlist_node bysrc;
127 struct hlist_node byspi; 129 struct hlist_node byspi;
128 130
@@ -1244,6 +1246,8 @@ struct xfrm6_tunnel {
1244}; 1246};
1245 1247
1246struct xfrm_state_walk { 1248struct xfrm_state_walk {
1249 struct list_head list;
1250 unsigned long genid;
1247 struct xfrm_state *state; 1251 struct xfrm_state *state;
1248 int count; 1252 int count;
1249 u8 proto; 1253 u8 proto;
@@ -1279,23 +1283,10 @@ static inline void xfrm6_fini(void)
1279extern int xfrm_proc_init(void); 1283extern int xfrm_proc_init(void);
1280#endif 1284#endif
1281 1285
1282static inline void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto) 1286extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
1283{
1284 walk->proto = proto;
1285 walk->state = NULL;
1286 walk->count = 0;
1287}
1288
1289static inline void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1290{
1291 if (walk->state != NULL) {
1292 xfrm_state_put(walk->state);
1293 walk->state = NULL;
1294 }
1295}
1296
1297extern int xfrm_state_walk(struct xfrm_state_walk *walk, 1287extern int xfrm_state_walk(struct xfrm_state_walk *walk,
1298 int (*func)(struct xfrm_state *, int, void*), void *); 1288 int (*func)(struct xfrm_state *, int, void*), void *);
1289extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
1299extern struct xfrm_state *xfrm_state_alloc(void); 1290extern struct xfrm_state *xfrm_state_alloc(void);
1300extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 1291extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1301 struct flowi *fl, struct xfrm_tmpl *tmpl, 1292 struct flowi *fl, struct xfrm_tmpl *tmpl,
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index b661f47bf10a..f0e335aa20df 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -394,6 +394,7 @@ static void vlan_transfer_features(struct net_device *dev,
394 394
395 vlandev->features &= ~dev->vlan_features; 395 vlandev->features &= ~dev->vlan_features;
396 vlandev->features |= dev->features & dev->vlan_features; 396 vlandev->features |= dev->features & dev->vlan_features;
397 vlandev->gso_max_size = dev->gso_max_size;
397 398
398 if (old_features != vlandev->features) 399 if (old_features != vlandev->features)
399 netdev_features_change(vlandev); 400 netdev_features_change(vlandev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4bf014e51f8c..8883e9c8a223 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -48,7 +48,7 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
48 48
49 switch (veth->h_vlan_encapsulated_proto) { 49 switch (veth->h_vlan_encapsulated_proto) {
50#ifdef CONFIG_INET 50#ifdef CONFIG_INET
51 case __constant_htons(ETH_P_IP): 51 case htons(ETH_P_IP):
52 52
53 /* TODO: Confirm this will work with VLAN headers... */ 53 /* TODO: Confirm this will work with VLAN headers... */
54 return arp_find(veth->h_dest, skb); 54 return arp_find(veth->h_dest, skb);
@@ -607,6 +607,7 @@ static int vlan_dev_init(struct net_device *dev)
607 (1<<__LINK_STATE_PRESENT); 607 (1<<__LINK_STATE_PRESENT);
608 608
609 dev->features |= real_dev->features & real_dev->vlan_features; 609 dev->features |= real_dev->features & real_dev->vlan_features;
610 dev->gso_max_size = real_dev->gso_max_size;
610 611
611 /* ipv6 shared card related stuff */ 612 /* ipv6 shared card related stuff */
612 dev->dev_id = real_dev->dev_id; 613 dev->dev_id = real_dev->dev_id;
diff --git a/net/Kconfig b/net/Kconfig
index 7612cc8c337c..9103a16a77be 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,18 +232,23 @@ source "net/can/Kconfig"
232source "net/irda/Kconfig" 232source "net/irda/Kconfig"
233source "net/bluetooth/Kconfig" 233source "net/bluetooth/Kconfig"
234source "net/rxrpc/Kconfig" 234source "net/rxrpc/Kconfig"
235source "net/phonet/Kconfig"
235 236
236config FIB_RULES 237config FIB_RULES
237 bool 238 bool
238 239
239menu "Wireless" 240menuconfig WIRELESS
241 bool "Wireless"
240 depends on !S390 242 depends on !S390
243 default y
244
245if WIRELESS
241 246
242source "net/wireless/Kconfig" 247source "net/wireless/Kconfig"
243source "net/mac80211/Kconfig" 248source "net/mac80211/Kconfig"
244source "net/ieee80211/Kconfig" 249source "net/ieee80211/Kconfig"
245 250
246endmenu 251endif # WIRELESS
247 252
248source "net/rfkill/Kconfig" 253source "net/rfkill/Kconfig"
249source "net/9p/Kconfig" 254source "net/9p/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index 4f43e7f874f3..acaf819f24aa 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_AF_RXRPC) += rxrpc/
42obj-$(CONFIG_ATM) += atm/ 42obj-$(CONFIG_ATM) += atm/
43obj-$(CONFIG_DECNET) += decnet/ 43obj-$(CONFIG_DECNET) += decnet/
44obj-$(CONFIG_ECONET) += econet/ 44obj-$(CONFIG_ECONET) += econet/
45obj-$(CONFIG_PHONET) += phonet/
45ifneq ($(CONFIG_VLAN_8021Q),) 46ifneq ($(CONFIG_VLAN_8021Q),)
46obj-y += 8021q/ 47obj-y += 8021q/
47endif 48endif
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 8d9a6f158880..280de481edc7 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -375,11 +375,11 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
375 if (memcmp 375 if (memcmp
376 (skb->data + 6, ethertype_ipv6, 376 (skb->data + 6, ethertype_ipv6,
377 sizeof(ethertype_ipv6)) == 0) 377 sizeof(ethertype_ipv6)) == 0)
378 skb->protocol = __constant_htons(ETH_P_IPV6); 378 skb->protocol = htons(ETH_P_IPV6);
379 else if (memcmp 379 else if (memcmp
380 (skb->data + 6, ethertype_ipv4, 380 (skb->data + 6, ethertype_ipv4,
381 sizeof(ethertype_ipv4)) == 0) 381 sizeof(ethertype_ipv4)) == 0)
382 skb->protocol = __constant_htons(ETH_P_IP); 382 skb->protocol = htons(ETH_P_IP);
383 else 383 else
384 goto error; 384 goto error;
385 skb_pull(skb, sizeof(llc_oui_ipv4)); 385 skb_pull(skb, sizeof(llc_oui_ipv4));
@@ -404,9 +404,9 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
404 skb_reset_network_header(skb); 404 skb_reset_network_header(skb);
405 iph = ip_hdr(skb); 405 iph = ip_hdr(skb);
406 if (iph->version == 4) 406 if (iph->version == 4)
407 skb->protocol = __constant_htons(ETH_P_IP); 407 skb->protocol = htons(ETH_P_IP);
408 else if (iph->version == 6) 408 else if (iph->version == 6)
409 skb->protocol = __constant_htons(ETH_P_IPV6); 409 skb->protocol = htons(ETH_P_IPV6);
410 else 410 else
411 goto error; 411 goto error;
412 skb->pkt_type = PACKET_HOST; 412 skb->pkt_type = PACKET_HOST;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5799fb52365a..8f701cde5945 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1931,7 +1931,6 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
1931 switch (priv->lane_version) { 1931 switch (priv->lane_version) {
1932 case 1: 1932 case 1:
1933 return priv->mcast_vcc; 1933 return priv->mcast_vcc;
1934 break;
1935 case 2: /* LANE2 wants arp for multicast addresses */ 1934 case 2: /* LANE2 wants arp for multicast addresses */
1936 if (!compare_ether_addr(mac_to_find, bus_mac)) 1935 if (!compare_ether_addr(mac_to_find, bus_mac))
1937 return priv->mcast_vcc; 1936 return priv->mcast_vcc;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 573acdf6f9ff..4d2c1f1cb524 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -28,6 +28,10 @@ static const struct stp_proto br_stp_proto = {
28 .rcv = br_stp_rcv, 28 .rcv = br_stp_rcv,
29}; 29};
30 30
31static struct pernet_operations br_net_ops = {
32 .exit = br_net_exit,
33};
34
31static int __init br_init(void) 35static int __init br_init(void)
32{ 36{
33 int err; 37 int err;
@@ -42,18 +46,22 @@ static int __init br_init(void)
42 if (err) 46 if (err)
43 goto err_out; 47 goto err_out;
44 48
45 err = br_netfilter_init(); 49 err = register_pernet_subsys(&br_net_ops);
46 if (err) 50 if (err)
47 goto err_out1; 51 goto err_out1;
48 52
49 err = register_netdevice_notifier(&br_device_notifier); 53 err = br_netfilter_init();
50 if (err) 54 if (err)
51 goto err_out2; 55 goto err_out2;
52 56
53 err = br_netlink_init(); 57 err = register_netdevice_notifier(&br_device_notifier);
54 if (err) 58 if (err)
55 goto err_out3; 59 goto err_out3;
56 60
61 err = br_netlink_init();
62 if (err)
63 goto err_out4;
64
57 brioctl_set(br_ioctl_deviceless_stub); 65 brioctl_set(br_ioctl_deviceless_stub);
58 br_handle_frame_hook = br_handle_frame; 66 br_handle_frame_hook = br_handle_frame;
59 67
@@ -61,10 +69,12 @@ static int __init br_init(void)
61 br_fdb_put_hook = br_fdb_put; 69 br_fdb_put_hook = br_fdb_put;
62 70
63 return 0; 71 return 0;
64err_out3: 72err_out4:
65 unregister_netdevice_notifier(&br_device_notifier); 73 unregister_netdevice_notifier(&br_device_notifier);
66err_out2: 74err_out3:
67 br_netfilter_fini(); 75 br_netfilter_fini();
76err_out2:
77 unregister_pernet_subsys(&br_net_ops);
68err_out1: 78err_out1:
69 br_fdb_fini(); 79 br_fdb_fini();
70err_out: 80err_out:
@@ -80,7 +90,7 @@ static void __exit br_deinit(void)
80 unregister_netdevice_notifier(&br_device_notifier); 90 unregister_netdevice_notifier(&br_device_notifier);
81 brioctl_set(NULL); 91 brioctl_set(NULL);
82 92
83 br_cleanup_bridges(); 93 unregister_pernet_subsys(&br_net_ops);
84 94
85 synchronize_net(); 95 synchronize_net();
86 96
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 4f52c3d50ebe..22ba8632196f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -178,5 +178,6 @@ void br_dev_setup(struct net_device *dev)
178 dev->priv_flags = IFF_EBRIDGE; 178 dev->priv_flags = IFF_EBRIDGE;
179 179
180 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 180 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
181 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX; 181 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
182 NETIF_F_NETNS_LOCAL;
182} 183}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 63c18aacde8c..573e20f7dba4 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -168,7 +168,7 @@ static void del_br(struct net_bridge *br)
168 unregister_netdevice(br->dev); 168 unregister_netdevice(br->dev);
169} 169}
170 170
171static struct net_device *new_bridge_dev(const char *name) 171static struct net_device *new_bridge_dev(struct net *net, const char *name)
172{ 172{
173 struct net_bridge *br; 173 struct net_bridge *br;
174 struct net_device *dev; 174 struct net_device *dev;
@@ -178,6 +178,7 @@ static struct net_device *new_bridge_dev(const char *name)
178 178
179 if (!dev) 179 if (!dev)
180 return NULL; 180 return NULL;
181 dev_net_set(dev, net);
181 182
182 br = netdev_priv(dev); 183 br = netdev_priv(dev);
183 br->dev = dev; 184 br->dev = dev;
@@ -262,12 +263,12 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
262 return p; 263 return p;
263} 264}
264 265
265int br_add_bridge(const char *name) 266int br_add_bridge(struct net *net, const char *name)
266{ 267{
267 struct net_device *dev; 268 struct net_device *dev;
268 int ret; 269 int ret;
269 270
270 dev = new_bridge_dev(name); 271 dev = new_bridge_dev(net, name);
271 if (!dev) 272 if (!dev)
272 return -ENOMEM; 273 return -ENOMEM;
273 274
@@ -294,13 +295,13 @@ out_free:
294 goto out; 295 goto out;
295} 296}
296 297
297int br_del_bridge(const char *name) 298int br_del_bridge(struct net *net, const char *name)
298{ 299{
299 struct net_device *dev; 300 struct net_device *dev;
300 int ret = 0; 301 int ret = 0;
301 302
302 rtnl_lock(); 303 rtnl_lock();
303 dev = __dev_get_by_name(&init_net, name); 304 dev = __dev_get_by_name(net, name);
304 if (dev == NULL) 305 if (dev == NULL)
305 ret = -ENXIO; /* Could not find device */ 306 ret = -ENXIO; /* Could not find device */
306 307
@@ -445,13 +446,13 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
445 return 0; 446 return 0;
446} 447}
447 448
448void __exit br_cleanup_bridges(void) 449void br_net_exit(struct net *net)
449{ 450{
450 struct net_device *dev; 451 struct net_device *dev;
451 452
452 rtnl_lock(); 453 rtnl_lock();
453restart: 454restart:
454 for_each_netdev(&init_net, dev) { 455 for_each_netdev(net, dev) {
455 if (dev->priv_flags & IFF_EBRIDGE) { 456 if (dev->priv_flags & IFF_EBRIDGE) {
456 del_br(dev->priv); 457 del_br(dev->priv);
457 goto restart; 458 goto restart;
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 5bbf07362172..6a6433daaf27 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -21,12 +21,12 @@
21#include "br_private.h" 21#include "br_private.h"
22 22
23/* called with RTNL */ 23/* called with RTNL */
24static int get_bridge_ifindices(int *indices, int num) 24static int get_bridge_ifindices(struct net *net, int *indices, int num)
25{ 25{
26 struct net_device *dev; 26 struct net_device *dev;
27 int i = 0; 27 int i = 0;
28 28
29 for_each_netdev(&init_net, dev) { 29 for_each_netdev(net, dev) {
30 if (i >= num) 30 if (i >= num)
31 break; 31 break;
32 if (dev->priv_flags & IFF_EBRIDGE) 32 if (dev->priv_flags & IFF_EBRIDGE)
@@ -89,7 +89,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
89 if (!capable(CAP_NET_ADMIN)) 89 if (!capable(CAP_NET_ADMIN))
90 return -EPERM; 90 return -EPERM;
91 91
92 dev = dev_get_by_index(&init_net, ifindex); 92 dev = dev_get_by_index(dev_net(br->dev), ifindex);
93 if (dev == NULL) 93 if (dev == NULL)
94 return -EINVAL; 94 return -EINVAL;
95 95
@@ -315,7 +315,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
315 return -EOPNOTSUPP; 315 return -EOPNOTSUPP;
316} 316}
317 317
318static int old_deviceless(void __user *uarg) 318static int old_deviceless(struct net *net, void __user *uarg)
319{ 319{
320 unsigned long args[3]; 320 unsigned long args[3];
321 321
@@ -337,7 +337,7 @@ static int old_deviceless(void __user *uarg)
337 if (indices == NULL) 337 if (indices == NULL)
338 return -ENOMEM; 338 return -ENOMEM;
339 339
340 args[2] = get_bridge_ifindices(indices, args[2]); 340 args[2] = get_bridge_ifindices(net, indices, args[2]);
341 341
342 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int)) 342 ret = copy_to_user((void __user *)args[1], indices, args[2]*sizeof(int))
343 ? -EFAULT : args[2]; 343 ? -EFAULT : args[2];
@@ -360,9 +360,9 @@ static int old_deviceless(void __user *uarg)
360 buf[IFNAMSIZ-1] = 0; 360 buf[IFNAMSIZ-1] = 0;
361 361
362 if (args[0] == BRCTL_ADD_BRIDGE) 362 if (args[0] == BRCTL_ADD_BRIDGE)
363 return br_add_bridge(buf); 363 return br_add_bridge(net, buf);
364 364
365 return br_del_bridge(buf); 365 return br_del_bridge(net, buf);
366 } 366 }
367 } 367 }
368 368
@@ -374,7 +374,7 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar
374 switch (cmd) { 374 switch (cmd) {
375 case SIOCGIFBR: 375 case SIOCGIFBR:
376 case SIOCSIFBR: 376 case SIOCSIFBR:
377 return old_deviceless(uarg); 377 return old_deviceless(net, uarg);
378 378
379 case SIOCBRADDBR: 379 case SIOCBRADDBR:
380 case SIOCBRDELBR: 380 case SIOCBRDELBR:
@@ -389,9 +389,9 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uar
389 389
390 buf[IFNAMSIZ-1] = 0; 390 buf[IFNAMSIZ-1] = 0;
391 if (cmd == SIOCBRADDBR) 391 if (cmd == SIOCBRADDBR)
392 return br_add_bridge(buf); 392 return br_add_bridge(net, buf);
393 393
394 return br_del_bridge(buf); 394 return br_del_bridge(net, buf);
395 } 395 }
396 } 396 }
397 return -EOPNOTSUPP; 397 return -EOPNOTSUPP;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f155e6ce8a21..ba7be195803c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -82,6 +82,7 @@ nla_put_failure:
82 */ 82 */
83void br_ifinfo_notify(int event, struct net_bridge_port *port) 83void br_ifinfo_notify(int event, struct net_bridge_port *port)
84{ 84{
85 struct net *net = dev_net(port->dev);
85 struct sk_buff *skb; 86 struct sk_buff *skb;
86 int err = -ENOBUFS; 87 int err = -ENOBUFS;
87 88
@@ -97,10 +98,10 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
97 kfree_skb(skb); 98 kfree_skb(skb);
98 goto errout; 99 goto errout;
99 } 100 }
100 err = rtnl_notify(skb, &init_net,0, RTNLGRP_LINK, NULL, GFP_ATOMIC); 101 err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
101errout: 102errout:
102 if (err < 0) 103 if (err < 0)
103 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err); 104 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
104} 105}
105 106
106/* 107/*
@@ -112,11 +113,8 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
112 struct net_device *dev; 113 struct net_device *dev;
113 int idx; 114 int idx;
114 115
115 if (net != &init_net)
116 return 0;
117
118 idx = 0; 116 idx = 0;
119 for_each_netdev(&init_net, dev) { 117 for_each_netdev(net, dev) {
120 /* not a bridge port */ 118 /* not a bridge port */
121 if (dev->br_port == NULL || idx < cb->args[0]) 119 if (dev->br_port == NULL || idx < cb->args[0])
122 goto skip; 120 goto skip;
@@ -147,9 +145,6 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
147 struct net_bridge_port *p; 145 struct net_bridge_port *p;
148 u8 new_state; 146 u8 new_state;
149 147
150 if (net != &init_net)
151 return -EINVAL;
152
153 if (nlmsg_len(nlh) < sizeof(*ifm)) 148 if (nlmsg_len(nlh) < sizeof(*ifm))
154 return -EINVAL; 149 return -EINVAL;
155 150
@@ -165,7 +160,7 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
165 if (new_state > BR_STATE_BLOCKING) 160 if (new_state > BR_STATE_BLOCKING)
166 return -EINVAL; 161 return -EINVAL;
167 162
168 dev = __dev_get_by_index(&init_net, ifm->ifi_index); 163 dev = __dev_get_by_index(net, ifm->ifi_index);
169 if (!dev) 164 if (!dev)
170 return -ENODEV; 165 return -ENODEV;
171 166
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 76340bdd052e..763a3ec292e5 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -35,9 +35,6 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
35 struct net_bridge_port *p = dev->br_port; 35 struct net_bridge_port *p = dev->br_port;
36 struct net_bridge *br; 36 struct net_bridge *br;
37 37
38 if (!net_eq(dev_net(dev), &init_net))
39 return NOTIFY_DONE;
40
41 /* not a port of a bridge */ 38 /* not a port of a bridge */
42 if (p == NULL) 39 if (p == NULL)
43 return NOTIFY_DONE; 40 return NOTIFY_DONE;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c3dc18ddc043..b6c3b71974dc 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -178,9 +178,9 @@ extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb);
178 178
179/* br_if.c */ 179/* br_if.c */
180extern void br_port_carrier_check(struct net_bridge_port *p); 180extern void br_port_carrier_check(struct net_bridge_port *p);
181extern int br_add_bridge(const char *name); 181extern int br_add_bridge(struct net *net, const char *name);
182extern int br_del_bridge(const char *name); 182extern int br_del_bridge(struct net *net, const char *name);
183extern void br_cleanup_bridges(void); 183extern void br_net_exit(struct net *net);
184extern int br_add_if(struct net_bridge *br, 184extern int br_add_if(struct net_bridge *br,
185 struct net_device *dev); 185 struct net_device *dev);
186extern int br_del_if(struct net_bridge *br, 186extern int br_del_if(struct net_bridge *br,
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8b200f96f722..81ae40b3f655 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -140,9 +140,6 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
140 struct net_bridge *br; 140 struct net_bridge *br;
141 const unsigned char *buf; 141 const unsigned char *buf;
142 142
143 if (!net_eq(dev_net(dev), &init_net))
144 goto err;
145
146 if (!p) 143 if (!p)
147 goto err; 144 goto err;
148 145
diff --git a/net/core/Makefile b/net/core/Makefile
index b1332f6d0042..26a37cb31923 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -6,6 +6,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
6 gen_stats.o gen_estimator.o net_namespace.o 6 gen_stats.o gen_estimator.o net_namespace.o
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
9 10
10obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ 11obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 12 neighbour.o rtnetlink.o utils.o link_watch.o filter.o
diff --git a/net/core/dev.c b/net/core/dev.c
index e8eb2b478344..7091040e32ac 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -891,7 +891,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
891 * Change name of a device, can pass format strings "eth%d". 891 * Change name of a device, can pass format strings "eth%d".
892 * for wildcarding. 892 * for wildcarding.
893 */ 893 */
894int dev_change_name(struct net_device *dev, char *newname) 894int dev_change_name(struct net_device *dev, const char *newname)
895{ 895{
896 char oldname[IFNAMSIZ]; 896 char oldname[IFNAMSIZ];
897 int err = 0; 897 int err = 0;
@@ -917,7 +917,6 @@ int dev_change_name(struct net_device *dev, char *newname)
917 err = dev_alloc_name(dev, newname); 917 err = dev_alloc_name(dev, newname);
918 if (err < 0) 918 if (err < 0)
919 return err; 919 return err;
920 strcpy(newname, dev->name);
921 } 920 }
922 else if (__dev_get_by_name(net, newname)) 921 else if (__dev_get_by_name(net, newname))
923 return -EEXIST; 922 return -EEXIST;
@@ -955,6 +954,38 @@ rollback:
955} 954}
956 955
957/** 956/**
957 * dev_set_alias - change ifalias of a device
958 * @dev: device
959 * @alias: name up to IFALIASZ
960 * @len: limit of bytes to copy from info
961 *
962 * Set ifalias for a device,
963 */
964int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
965{
966 ASSERT_RTNL();
967
968 if (len >= IFALIASZ)
969 return -EINVAL;
970
971 if (!len) {
972 if (dev->ifalias) {
973 kfree(dev->ifalias);
974 dev->ifalias = NULL;
975 }
976 return 0;
977 }
978
979 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
980 if (!dev->ifalias)
981 return -ENOMEM;
982
983 strlcpy(dev->ifalias, alias, len+1);
984 return len;
985}
986
987
988/**
958 * netdev_features_change - device changes features 989 * netdev_features_change - device changes features
959 * @dev: device to cause notification 990 * @dev: device to cause notification
960 * 991 *
@@ -1676,14 +1707,14 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1676 } 1707 }
1677 1708
1678 switch (skb->protocol) { 1709 switch (skb->protocol) {
1679 case __constant_htons(ETH_P_IP): 1710 case htons(ETH_P_IP):
1680 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) 1711 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
1681 ip_proto = ip_hdr(skb)->protocol; 1712 ip_proto = ip_hdr(skb)->protocol;
1682 addr1 = ip_hdr(skb)->saddr; 1713 addr1 = ip_hdr(skb)->saddr;
1683 addr2 = ip_hdr(skb)->daddr; 1714 addr2 = ip_hdr(skb)->daddr;
1684 ihl = ip_hdr(skb)->ihl; 1715 ihl = ip_hdr(skb)->ihl;
1685 break; 1716 break;
1686 case __constant_htons(ETH_P_IPV6): 1717 case htons(ETH_P_IPV6):
1687 ip_proto = ipv6_hdr(skb)->nexthdr; 1718 ip_proto = ipv6_hdr(skb)->nexthdr;
1688 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3]; 1719 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1689 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3]; 1720 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
@@ -3302,6 +3333,12 @@ static void dev_addr_discard(struct net_device *dev)
3302 netif_addr_unlock_bh(dev); 3333 netif_addr_unlock_bh(dev);
3303} 3334}
3304 3335
3336/**
3337 * dev_get_flags - get flags reported to userspace
3338 * @dev: device
3339 *
3340 * Get the combination of flag bits exported through APIs to userspace.
3341 */
3305unsigned dev_get_flags(const struct net_device *dev) 3342unsigned dev_get_flags(const struct net_device *dev)
3306{ 3343{
3307 unsigned flags; 3344 unsigned flags;
@@ -3326,6 +3363,14 @@ unsigned dev_get_flags(const struct net_device *dev)
3326 return flags; 3363 return flags;
3327} 3364}
3328 3365
3366/**
3367 * dev_change_flags - change device settings
3368 * @dev: device
3369 * @flags: device state flags
3370 *
3371 * Change settings on device based state flags. The flags are
3372 * in the userspace exported format.
3373 */
3329int dev_change_flags(struct net_device *dev, unsigned flags) 3374int dev_change_flags(struct net_device *dev, unsigned flags)
3330{ 3375{
3331 int ret, changes; 3376 int ret, changes;
@@ -3395,6 +3440,13 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
3395 return ret; 3440 return ret;
3396} 3441}
3397 3442
3443/**
3444 * dev_set_mtu - Change maximum transfer unit
3445 * @dev: device
3446 * @new_mtu: new transfer unit
3447 *
3448 * Change the maximum transfer size of the network device.
3449 */
3398int dev_set_mtu(struct net_device *dev, int new_mtu) 3450int dev_set_mtu(struct net_device *dev, int new_mtu)
3399{ 3451{
3400 int err; 3452 int err;
@@ -3419,6 +3471,13 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
3419 return err; 3471 return err;
3420} 3472}
3421 3473
3474/**
3475 * dev_set_mac_address - Change Media Access Control Address
3476 * @dev: device
3477 * @sa: new address
3478 *
3479 * Change the hardware (MAC) address of the device
3480 */
3422int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa) 3481int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3423{ 3482{
3424 int err; 3483 int err;
@@ -4322,7 +4381,12 @@ void free_netdev(struct net_device *dev)
4322 put_device(&dev->dev); 4381 put_device(&dev->dev);
4323} 4382}
4324 4383
4325/* Synchronize with packet receive processing. */ 4384/**
4385 * synchronize_net - Synchronize with packet receive processing
4386 *
4387 * Wait for packets currently being received to be done.
4388 * Does not block later packets from starting.
4389 */
4326void synchronize_net(void) 4390void synchronize_net(void)
4327{ 4391{
4328 might_sleep(); 4392 might_sleep();
@@ -4624,7 +4688,7 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4624} 4688}
4625 4689
4626/** 4690/**
4627 * netdev_dma_regiser - register the networking subsystem as a DMA client 4691 * netdev_dma_register - register the networking subsystem as a DMA client
4628 */ 4692 */
4629static int __init netdev_dma_register(void) 4693static int __init netdev_dma_register(void)
4630{ 4694{
@@ -4670,6 +4734,12 @@ int netdev_compute_features(unsigned long all, unsigned long one)
4670 one |= NETIF_F_GSO_SOFTWARE; 4734 one |= NETIF_F_GSO_SOFTWARE;
4671 one |= NETIF_F_GSO; 4735 one |= NETIF_F_GSO;
4672 4736
4737 /*
4738 * If even one device supports a GSO protocol with software fallback,
4739 * enable it for all.
4740 */
4741 all |= one & NETIF_F_GSO_SOFTWARE;
4742
4673 /* If even one device supports robust GSO, enable it for all. */ 4743 /* If even one device supports robust GSO, enable it for all. */
4674 if (one & NETIF_F_GSO_ROBUST) 4744 if (one & NETIF_F_GSO_ROBUST)
4675 all |= NETIF_F_GSO_ROBUST; 4745 all |= NETIF_F_GSO_ROBUST;
@@ -4719,10 +4789,18 @@ err_name:
4719 return -ENOMEM; 4789 return -ENOMEM;
4720} 4790}
4721 4791
4722char *netdev_drivername(struct net_device *dev, char *buffer, int len) 4792/**
4793 * netdev_drivername - network driver for the device
4794 * @dev: network device
4795 * @buffer: buffer for resulting name
4796 * @len: size of buffer
4797 *
4798 * Determine network driver for device.
4799 */
4800char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
4723{ 4801{
4724 struct device_driver *driver; 4802 const struct device_driver *driver;
4725 struct device *parent; 4803 const struct device *parent;
4726 4804
4727 if (len <= 0 || !buffer) 4805 if (len <= 0 || !buffer)
4728 return buffer; 4806 return buffer;
diff --git a/net/core/dst.c b/net/core/dst.c
index fe03266130b6..09c1530f4681 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -203,6 +203,7 @@ void __dst_free(struct dst_entry * dst)
203 if (dst_garbage.timer_inc > DST_GC_INC) { 203 if (dst_garbage.timer_inc > DST_GC_INC) {
204 dst_garbage.timer_inc = DST_GC_INC; 204 dst_garbage.timer_inc = DST_GC_INC;
205 dst_garbage.timer_expires = DST_GC_MIN; 205 dst_garbage.timer_expires = DST_GC_MIN;
206 cancel_delayed_work(&dst_gc_work);
206 schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); 207 schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
207 } 208 }
208 spin_unlock_bh(&dst_garbage.lock); 209 spin_unlock_bh(&dst_garbage.lock);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9d92e41826e7..1dc728b38589 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -927,8 +927,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
927 if (skb_queue_len(&neigh->arp_queue) >= 927 if (skb_queue_len(&neigh->arp_queue) >=
928 neigh->parms->queue_len) { 928 neigh->parms->queue_len) {
929 struct sk_buff *buff; 929 struct sk_buff *buff;
930 buff = neigh->arp_queue.next; 930 buff = __skb_dequeue(&neigh->arp_queue);
931 __skb_unlink(buff, &neigh->arp_queue);
932 kfree_skb(buff); 931 kfree_skb(buff);
933 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); 932 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
934 } 933 }
@@ -1259,24 +1258,20 @@ static void neigh_proxy_process(unsigned long arg)
1259 struct neigh_table *tbl = (struct neigh_table *)arg; 1258 struct neigh_table *tbl = (struct neigh_table *)arg;
1260 long sched_next = 0; 1259 long sched_next = 0;
1261 unsigned long now = jiffies; 1260 unsigned long now = jiffies;
1262 struct sk_buff *skb; 1261 struct sk_buff *skb, *n;
1263 1262
1264 spin_lock(&tbl->proxy_queue.lock); 1263 spin_lock(&tbl->proxy_queue.lock);
1265 1264
1266 skb = tbl->proxy_queue.next; 1265 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1267 1266 long tdif = NEIGH_CB(skb)->sched_next - now;
1268 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1269 struct sk_buff *back = skb;
1270 long tdif = NEIGH_CB(back)->sched_next - now;
1271 1267
1272 skb = skb->next;
1273 if (tdif <= 0) { 1268 if (tdif <= 0) {
1274 struct net_device *dev = back->dev; 1269 struct net_device *dev = skb->dev;
1275 __skb_unlink(back, &tbl->proxy_queue); 1270 __skb_unlink(skb, &tbl->proxy_queue);
1276 if (tbl->proxy_redo && netif_running(dev)) 1271 if (tbl->proxy_redo && netif_running(dev))
1277 tbl->proxy_redo(back); 1272 tbl->proxy_redo(skb);
1278 else 1273 else
1279 kfree_skb(back); 1274 kfree_skb(skb);
1280 1275
1281 dev_put(dev); 1276 dev_put(dev);
1282 } else if (!sched_next || tdif < sched_next) 1277 } else if (!sched_next || tdif < sched_next)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c1f4e0d428c0..92d6b9467314 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -209,9 +209,44 @@ static ssize_t store_tx_queue_len(struct device *dev,
209 return netdev_store(dev, attr, buf, len, change_tx_queue_len); 209 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
210} 210}
211 211
212static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
213 const char *buf, size_t len)
214{
215 struct net_device *netdev = to_net_dev(dev);
216 size_t count = len;
217 ssize_t ret;
218
219 if (!capable(CAP_NET_ADMIN))
220 return -EPERM;
221
222 /* ignore trailing newline */
223 if (len > 0 && buf[len - 1] == '\n')
224 --count;
225
226 rtnl_lock();
227 ret = dev_set_alias(netdev, buf, count);
228 rtnl_unlock();
229
230 return ret < 0 ? ret : len;
231}
232
233static ssize_t show_ifalias(struct device *dev,
234 struct device_attribute *attr, char *buf)
235{
236 const struct net_device *netdev = to_net_dev(dev);
237 ssize_t ret = 0;
238
239 rtnl_lock();
240 if (netdev->ifalias)
241 ret = sprintf(buf, "%s\n", netdev->ifalias);
242 rtnl_unlock();
243 return ret;
244}
245
212static struct device_attribute net_class_attributes[] = { 246static struct device_attribute net_class_attributes[] = {
213 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), 247 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
214 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), 248 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
249 __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
215 __ATTR(iflink, S_IRUGO, show_iflink, NULL), 250 __ATTR(iflink, S_IRUGO, show_iflink, NULL),
216 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), 251 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
217 __ATTR(features, S_IRUGO, show_features, NULL), 252 __ATTR(features, S_IRUGO, show_features, NULL),
@@ -418,6 +453,7 @@ static void netdev_release(struct device *d)
418 453
419 BUG_ON(dev->reg_state != NETREG_RELEASED); 454 BUG_ON(dev->reg_state != NETREG_RELEASED);
420 455
456 kfree(dev->ifalias);
421 kfree((char *)dev - dev->padded); 457 kfree((char *)dev - dev->padded);
422} 458}
423 459
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 71edb8b36341..8862498fd4a6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -586,6 +586,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
586{ 586{
587 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 587 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
588 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 588 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
589 + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
589 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 590 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
590 + nla_total_size(sizeof(struct rtnl_link_ifmap)) 591 + nla_total_size(sizeof(struct rtnl_link_ifmap))
591 + nla_total_size(sizeof(struct rtnl_link_stats)) 592 + nla_total_size(sizeof(struct rtnl_link_stats))
@@ -640,6 +641,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
640 if (txq->qdisc_sleeping) 641 if (txq->qdisc_sleeping)
641 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id); 642 NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
642 643
644 if (dev->ifalias)
645 NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
646
643 if (1) { 647 if (1) {
644 struct rtnl_link_ifmap map = { 648 struct rtnl_link_ifmap map = {
645 .mem_start = dev->mem_start, 649 .mem_start = dev->mem_start,
@@ -713,6 +717,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
713 [IFLA_LINKMODE] = { .type = NLA_U8 }, 717 [IFLA_LINKMODE] = { .type = NLA_U8 },
714 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 718 [IFLA_LINKINFO] = { .type = NLA_NESTED },
715 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 719 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
720 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
716}; 721};
717 722
718static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 723static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -853,6 +858,14 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
853 modified = 1; 858 modified = 1;
854 } 859 }
855 860
861 if (tb[IFLA_IFALIAS]) {
862 err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
863 nla_len(tb[IFLA_IFALIAS]));
864 if (err < 0)
865 goto errout;
866 modified = 1;
867 }
868
856 if (tb[IFLA_BROADCAST]) { 869 if (tb[IFLA_BROADCAST]) {
857 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 870 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
858 send_addr_notify = 1; 871 send_addr_notify = 1;
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
new file mode 100644
index 000000000000..1f49afcd8e86
--- /dev/null
+++ b/net/core/skb_dma_map.c
@@ -0,0 +1,66 @@
1/* skb_dma_map.c: DMA mapping helpers for socket buffers.
2 *
3 * Copyright (C) David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/dma-mapping.h>
9#include <linux/skbuff.h>
10
11int skb_dma_map(struct device *dev, struct sk_buff *skb,
12 enum dma_data_direction dir)
13{
14 struct skb_shared_info *sp = skb_shinfo(skb);
15 dma_addr_t map;
16 int i;
17
18 map = dma_map_single(dev, skb->data,
19 skb_headlen(skb), dir);
20 if (dma_mapping_error(dev, map))
21 goto out_err;
22
23 sp->dma_maps[0] = map;
24 for (i = 0; i < sp->nr_frags; i++) {
25 skb_frag_t *fp = &sp->frags[i];
26
27 map = dma_map_page(dev, fp->page, fp->page_offset,
28 fp->size, dir);
29 if (dma_mapping_error(dev, map))
30 goto unwind;
31 sp->dma_maps[i + 1] = map;
32 }
33 sp->num_dma_maps = i + 1;
34
35 return 0;
36
37unwind:
38 while (i-- >= 0) {
39 skb_frag_t *fp = &sp->frags[i];
40
41 dma_unmap_page(dev, sp->dma_maps[i + 1],
42 fp->size, dir);
43 }
44 dma_unmap_single(dev, sp->dma_maps[0],
45 skb_headlen(skb), dir);
46out_err:
47 return -ENOMEM;
48}
49EXPORT_SYMBOL(skb_dma_map);
50
51void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
52 enum dma_data_direction dir)
53{
54 struct skb_shared_info *sp = skb_shinfo(skb);
55 int i;
56
57 dma_unmap_single(dev, sp->dma_maps[0],
58 skb_headlen(skb), dir);
59 for (i = 0; i < sp->nr_frags; i++) {
60 skb_frag_t *fp = &sp->frags[i];
61
62 dma_unmap_page(dev, sp->dma_maps[i + 1],
63 fp->size, dir);
64 }
65}
66EXPORT_SYMBOL(skb_dma_unmap);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ca1ccdf1ef76..2c218a0808b4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -363,8 +363,7 @@ static void kfree_skbmem(struct sk_buff *skb)
363 } 363 }
364} 364}
365 365
366/* Free everything but the sk_buff shell. */ 366static void skb_release_head_state(struct sk_buff *skb)
367static void skb_release_all(struct sk_buff *skb)
368{ 367{
369 dst_release(skb->dst); 368 dst_release(skb->dst);
370#ifdef CONFIG_XFRM 369#ifdef CONFIG_XFRM
@@ -388,6 +387,12 @@ static void skb_release_all(struct sk_buff *skb)
388 skb->tc_verd = 0; 387 skb->tc_verd = 0;
389#endif 388#endif
390#endif 389#endif
390}
391
392/* Free everything but the sk_buff shell. */
393static void skb_release_all(struct sk_buff *skb)
394{
395 skb_release_head_state(skb);
391 skb_release_data(skb); 396 skb_release_data(skb);
392} 397}
393 398
@@ -424,6 +429,38 @@ void kfree_skb(struct sk_buff *skb)
424 __kfree_skb(skb); 429 __kfree_skb(skb);
425} 430}
426 431
432int skb_recycle_check(struct sk_buff *skb, int skb_size)
433{
434 struct skb_shared_info *shinfo;
435
436 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
437 return 0;
438
439 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
440 if (skb_end_pointer(skb) - skb->head < skb_size)
441 return 0;
442
443 if (skb_shared(skb) || skb_cloned(skb))
444 return 0;
445
446 skb_release_head_state(skb);
447 shinfo = skb_shinfo(skb);
448 atomic_set(&shinfo->dataref, 1);
449 shinfo->nr_frags = 0;
450 shinfo->gso_size = 0;
451 shinfo->gso_segs = 0;
452 shinfo->gso_type = 0;
453 shinfo->ip6_frag_id = 0;
454 shinfo->frag_list = NULL;
455
456 memset(skb, 0, offsetof(struct sk_buff, tail));
457 skb_reset_tail_pointer(skb);
458 skb->data = skb->head + NET_SKB_PAD;
459
460 return 1;
461}
462EXPORT_SYMBOL(skb_recycle_check);
463
427static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 464static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
428{ 465{
429 new->tstamp = old->tstamp; 466 new->tstamp = old->tstamp;
diff --git a/net/core/sock.c b/net/core/sock.c
index 91f8bbc93526..2d358dd8a03e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -154,7 +154,8 @@ static const char *af_family_key_strings[AF_MAX+1] = {
154 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 154 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 155 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 156 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
157 "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX" 157 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
158 "sk_lock-AF_MAX"
158}; 159};
159static const char *af_family_slock_key_strings[AF_MAX+1] = { 160static const char *af_family_slock_key_strings[AF_MAX+1] = {
160 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 161 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -168,7 +169,8 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = {
168 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 169 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
169 "slock-27" , "slock-28" , "slock-AF_CAN" , 170 "slock-27" , "slock-28" , "slock-AF_CAN" ,
170 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 171 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
171 "slock-AF_RXRPC" , "slock-AF_MAX" 172 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
173 "slock-AF_MAX"
172}; 174};
173static const char *af_family_clock_key_strings[AF_MAX+1] = { 175static const char *af_family_clock_key_strings[AF_MAX+1] = {
174 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 176 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -182,7 +184,8 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = {
182 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 184 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
183 "clock-27" , "clock-28" , "clock-AF_CAN" , 185 "clock-27" , "clock-28" , "clock-AF_CAN" ,
184 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 186 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
185 "clock-AF_RXRPC" , "clock-AF_MAX" 187 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
188 "clock-AF_MAX"
186}; 189};
187#endif 190#endif
188 191
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 8e9580874216..9a430734530c 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -783,7 +783,7 @@ static struct ccid_operations ccid2 = {
783}; 783};
784 784
785#ifdef CONFIG_IP_DCCP_CCID2_DEBUG 785#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
786module_param(ccid2_debug, bool, 0444); 786module_param(ccid2_debug, bool, 0644);
787MODULE_PARM_DESC(ccid2_debug, "Enable debug messages"); 787MODULE_PARM_DESC(ccid2_debug, "Enable debug messages");
788#endif 788#endif
789 789
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index f6756e0c9e69..3b8bd7ca6761 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -963,7 +963,7 @@ static struct ccid_operations ccid3 = {
963}; 963};
964 964
965#ifdef CONFIG_IP_DCCP_CCID3_DEBUG 965#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
966module_param(ccid3_debug, bool, 0444); 966module_param(ccid3_debug, bool, 0644);
967MODULE_PARM_DESC(ccid3_debug, "Enable debug messages"); 967MODULE_PARM_DESC(ccid3_debug, "Enable debug messages");
968#endif 968#endif
969 969
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index bcd6ac415bb9..5b3ce0688c5c 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -67,7 +67,10 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
67 u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0; 67 u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0;
68 int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */ 68 int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */
69 69
70 for (i=0; i <= k; i++) { 70 if (k <= 0)
71 return;
72
73 for (i = 0; i <= k; i++) {
71 i_i = tfrc_lh_get_interval(lh, i); 74 i_i = tfrc_lh_get_interval(lh, i);
72 75
73 if (i < k) { 76 if (i < k) {
@@ -78,7 +81,6 @@ static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
78 i_tot1 += i_i * tfrc_lh_weights[i-1]; 81 i_tot1 += i_i * tfrc_lh_weights[i-1];
79 } 82 }
80 83
81 BUG_ON(w_tot == 0);
82 lh->i_mean = max(i_tot0, i_tot1) / w_tot; 84 lh->i_mean = max(i_tot0, i_tot1) / w_tot;
83} 85}
84 86
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 97ecec0a8e76..185916218e07 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -10,7 +10,7 @@
10 10
11#ifdef CONFIG_IP_DCCP_TFRC_DEBUG 11#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
12int tfrc_debug; 12int tfrc_debug;
13module_param(tfrc_debug, bool, 0444); 13module_param(tfrc_debug, bool, 0644);
14MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); 14MODULE_PARM_DESC(tfrc_debug, "Enable debug messages");
15#endif 15#endif
16 16
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 803933ab396d..779d0ed9ae94 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -370,7 +370,7 @@ int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
370 goto discard; 370 goto discard;
371 371
372 if (dccp_parse_options(sk, NULL, skb)) 372 if (dccp_parse_options(sk, NULL, skb))
373 goto discard; 373 return 1;
374 374
375 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 375 if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
376 dccp_event_ack_recv(sk, skb); 376 dccp_event_ack_recv(sk, skb);
@@ -610,7 +610,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
610 * Step 8: Process options and mark acknowledgeable 610 * Step 8: Process options and mark acknowledgeable
611 */ 611 */
612 if (dccp_parse_options(sk, NULL, skb)) 612 if (dccp_parse_options(sk, NULL, skb))
613 goto discard; 613 return 1;
614 614
615 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) 615 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
616 dccp_event_ack_recv(sk, skb); 616 dccp_event_ack_recv(sk, skb);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index dc7c158a2f4b..0809b63cb055 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -81,11 +81,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
81 /* Check if this isn't a single byte option */ 81 /* Check if this isn't a single byte option */
82 if (opt > DCCPO_MAX_RESERVED) { 82 if (opt > DCCPO_MAX_RESERVED) {
83 if (opt_ptr == opt_end) 83 if (opt_ptr == opt_end)
84 goto out_invalid_option; 84 goto out_nonsensical_length;
85 85
86 len = *opt_ptr++; 86 len = *opt_ptr++;
87 if (len < 3) 87 if (len < 2)
88 goto out_invalid_option; 88 goto out_nonsensical_length;
89 /* 89 /*
90 * Remove the type and len fields, leaving 90 * Remove the type and len fields, leaving
91 * just the value size 91 * just the value size
@@ -95,7 +95,7 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
95 opt_ptr += len; 95 opt_ptr += len;
96 96
97 if (opt_ptr > opt_end) 97 if (opt_ptr > opt_end)
98 goto out_invalid_option; 98 goto out_nonsensical_length;
99 } 99 }
100 100
101 /* 101 /*
@@ -283,12 +283,17 @@ ignore_option:
283 if (mandatory) 283 if (mandatory)
284 goto out_invalid_option; 284 goto out_invalid_option;
285 285
286out_nonsensical_length:
287 /* RFC 4340, 5.8: ignore option and all remaining option space */
286 return 0; 288 return 0;
287 289
288out_invalid_option: 290out_invalid_option:
289 DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); 291 DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT);
290 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR; 292 DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_OPTION_ERROR;
291 DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len); 293 DCCP_WARN("DCCP(%p): invalid option %d, len=%d", sk, opt, len);
294 DCCP_SKB_CB(skb)->dccpd_reset_data[0] = opt;
295 DCCP_SKB_CB(skb)->dccpd_reset_data[1] = len > 0 ? value[0] : 0;
296 DCCP_SKB_CB(skb)->dccpd_reset_data[2] = len > 1 ? value[1] : 0;
292 return -1; 297 return -1;
293} 298}
294 299
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 1ca3b26eed0f..d0bd34819761 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -309,7 +309,9 @@ int dccp_disconnect(struct sock *sk, int flags)
309 sk->sk_err = ECONNRESET; 309 sk->sk_err = ECONNRESET;
310 310
311 dccp_clear_xmit_timers(sk); 311 dccp_clear_xmit_timers(sk);
312
312 __skb_queue_purge(&sk->sk_receive_queue); 313 __skb_queue_purge(&sk->sk_receive_queue);
314 __skb_queue_purge(&sk->sk_write_queue);
313 if (sk->sk_send_head != NULL) { 315 if (sk->sk_send_head != NULL) {
314 __kfree_skb(sk->sk_send_head); 316 __kfree_skb(sk->sk_send_head);
315 sk->sk_send_head = NULL; 317 sk->sk_send_head = NULL;
@@ -1028,7 +1030,7 @@ MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1028 1030
1029#ifdef CONFIG_IP_DCCP_DEBUG 1031#ifdef CONFIG_IP_DCCP_DEBUG
1030int dccp_debug; 1032int dccp_debug;
1031module_param(dccp_debug, bool, 0444); 1033module_param(dccp_debug, bool, 0644);
1032MODULE_PARM_DESC(dccp_debug, "Enable debug messages"); 1034MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1033 1035
1034EXPORT_SYMBOL_GPL(dccp_debug); 1036EXPORT_SYMBOL_GPL(dccp_debug);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index a80839b02e3f..647a9edee375 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -129,7 +129,7 @@ int eth_rebuild_header(struct sk_buff *skb)
129 129
130 switch (eth->h_proto) { 130 switch (eth->h_proto) {
131#ifdef CONFIG_INET 131#ifdef CONFIG_INET
132 case __constant_htons(ETH_P_IP): 132 case htons(ETH_P_IP):
133 return arp_find(eth->h_dest, skb); 133 return arp_find(eth->h_dest, skb);
134#endif 134#endif
135 default: 135 default:
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 3bca97f55d47..949772a5a7dc 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -157,7 +157,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
157 err = ieee80211_networks_allocate(ieee); 157 err = ieee80211_networks_allocate(ieee);
158 if (err) { 158 if (err) {
159 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err); 159 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err);
160 goto failed; 160 goto failed_free_netdev;
161 } 161 }
162 ieee80211_networks_initialize(ieee); 162 ieee80211_networks_initialize(ieee);
163 163
@@ -193,9 +193,9 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
193 193
194 return dev; 194 return dev;
195 195
196 failed: 196failed_free_netdev:
197 if (dev) 197 free_netdev(dev);
198 free_netdev(dev); 198failed:
199 return NULL; 199 return NULL;
200} 200}
201 201
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index c10036e7a463..89cb047ab314 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -782,11 +782,15 @@ skip_listen_ht:
782 struct sock *sk; 782 struct sock *sk;
783 struct hlist_node *node; 783 struct hlist_node *node;
784 784
785 num = 0;
786
787 if (hlist_empty(&head->chain) && hlist_empty(&head->twchain))
788 continue;
789
785 if (i > s_i) 790 if (i > s_i)
786 s_num = 0; 791 s_num = 0;
787 792
788 read_lock_bh(lock); 793 read_lock_bh(lock);
789 num = 0;
790 sk_for_each(sk, node, &head->chain) { 794 sk_for_each(sk, node, &head->chain) {
791 struct inet_sock *inet = inet_sk(sk); 795 struct inet_sock *inet = inet_sk(sk);
792 796
diff --git a/net/ipv4/ipvs/Kconfig b/net/ipv4/ipvs/Kconfig
index 09d0c3f35669..de6004de80bc 100644
--- a/net/ipv4/ipvs/Kconfig
+++ b/net/ipv4/ipvs/Kconfig
@@ -24,6 +24,14 @@ menuconfig IP_VS
24 24
25if IP_VS 25if IP_VS
26 26
27config IP_VS_IPV6
28 bool "IPv6 support for IPVS (DANGEROUS)"
29 depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6)
30 ---help---
31 Add IPv6 support to IPVS. This is incomplete and might be dangerous.
32
33 Say N if unsure.
34
27config IP_VS_DEBUG 35config IP_VS_DEBUG
28 bool "IP virtual server debugging" 36 bool "IP virtual server debugging"
29 ---help--- 37 ---help---
@@ -33,7 +41,8 @@ config IP_VS_DEBUG
33 41
34config IP_VS_TAB_BITS 42config IP_VS_TAB_BITS
35 int "IPVS connection table size (the Nth power of 2)" 43 int "IPVS connection table size (the Nth power of 2)"
36 default "12" 44 range 8 20
45 default 12
37 ---help--- 46 ---help---
38 The IPVS connection hash table uses the chaining scheme to handle 47 The IPVS connection hash table uses the chaining scheme to handle
39 hash collisions. Using a big IPVS connection hash table will greatly 48 hash collisions. Using a big IPVS connection hash table will greatly
@@ -71,14 +80,20 @@ config IP_VS_PROTO_UDP
71 This option enables support for load balancing UDP transport 80 This option enables support for load balancing UDP transport
72 protocol. Say Y if unsure. 81 protocol. Say Y if unsure.
73 82
83config IP_VS_PROTO_AH_ESP
84 bool
85 depends on UNDEFINED
86
74config IP_VS_PROTO_ESP 87config IP_VS_PROTO_ESP
75 bool "ESP load balancing support" 88 bool "ESP load balancing support"
89 select IP_VS_PROTO_AH_ESP
76 ---help--- 90 ---help---
77 This option enables support for load balancing ESP (Encapsulation 91 This option enables support for load balancing ESP (Encapsulation
78 Security Payload) transport protocol. Say Y if unsure. 92 Security Payload) transport protocol. Say Y if unsure.
79 93
80config IP_VS_PROTO_AH 94config IP_VS_PROTO_AH
81 bool "AH load balancing support" 95 bool "AH load balancing support"
96 select IP_VS_PROTO_AH_ESP
82 ---help--- 97 ---help---
83 This option enables support for load balancing AH (Authentication 98 This option enables support for load balancing AH (Authentication
84 Header) transport protocol. Say Y if unsure. 99 Header) transport protocol. Say Y if unsure.
diff --git a/net/ipv4/ipvs/Makefile b/net/ipv4/ipvs/Makefile
index 30e85de9ffff..73a46fe1fe4c 100644
--- a/net/ipv4/ipvs/Makefile
+++ b/net/ipv4/ipvs/Makefile
@@ -6,8 +6,7 @@
6ip_vs_proto-objs-y := 6ip_vs_proto-objs-y :=
7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o 7ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o 8ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_ESP) += ip_vs_proto_esp.o 9ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o
10ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o
11 10
12ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \ 11ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
13 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \ 12 ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 44a6872dc245..9a24332fbed8 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -114,9 +114,18 @@ static inline void ct_write_unlock_bh(unsigned key)
114/* 114/*
115 * Returns hash value for IPVS connection entry 115 * Returns hash value for IPVS connection entry
116 */ 116 */
117static unsigned int ip_vs_conn_hashkey(unsigned proto, __be32 addr, __be16 port) 117static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
118 const union nf_inet_addr *addr,
119 __be16 port)
118{ 120{
119 return jhash_3words((__force u32)addr, (__force u32)port, proto, ip_vs_conn_rnd) 121#ifdef CONFIG_IP_VS_IPV6
122 if (af == AF_INET6)
123 return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
124 (__force u32)port, proto, ip_vs_conn_rnd)
125 & IP_VS_CONN_TAB_MASK;
126#endif
127 return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
128 ip_vs_conn_rnd)
120 & IP_VS_CONN_TAB_MASK; 129 & IP_VS_CONN_TAB_MASK;
121} 130}
122 131
@@ -131,7 +140,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
131 int ret; 140 int ret;
132 141
133 /* Hash by protocol, client address and port */ 142 /* Hash by protocol, client address and port */
134 hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); 143 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
135 144
136 ct_write_lock(hash); 145 ct_write_lock(hash);
137 146
@@ -162,7 +171,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
162 int ret; 171 int ret;
163 172
164 /* unhash it and decrease its reference counter */ 173 /* unhash it and decrease its reference counter */
165 hash = ip_vs_conn_hashkey(cp->protocol, cp->caddr, cp->cport); 174 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
166 175
167 ct_write_lock(hash); 176 ct_write_lock(hash);
168 177
@@ -187,20 +196,23 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
187 * d_addr, d_port: pkt dest address (load balancer) 196 * d_addr, d_port: pkt dest address (load balancer)
188 */ 197 */
189static inline struct ip_vs_conn *__ip_vs_conn_in_get 198static inline struct ip_vs_conn *__ip_vs_conn_in_get
190(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) 199(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
200 const union nf_inet_addr *d_addr, __be16 d_port)
191{ 201{
192 unsigned hash; 202 unsigned hash;
193 struct ip_vs_conn *cp; 203 struct ip_vs_conn *cp;
194 204
195 hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); 205 hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port);
196 206
197 ct_read_lock(hash); 207 ct_read_lock(hash);
198 208
199 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 209 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
200 if (s_addr==cp->caddr && s_port==cp->cport && 210 if (cp->af == af &&
201 d_port==cp->vport && d_addr==cp->vaddr && 211 ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
212 ip_vs_addr_equal(af, d_addr, &cp->vaddr) &&
213 s_port == cp->cport && d_port == cp->vport &&
202 ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && 214 ((!s_port) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
203 protocol==cp->protocol) { 215 protocol == cp->protocol) {
204 /* HIT */ 216 /* HIT */
205 atomic_inc(&cp->refcnt); 217 atomic_inc(&cp->refcnt);
206 ct_read_unlock(hash); 218 ct_read_unlock(hash);
@@ -214,39 +226,44 @@ static inline struct ip_vs_conn *__ip_vs_conn_in_get
214} 226}
215 227
216struct ip_vs_conn *ip_vs_conn_in_get 228struct ip_vs_conn *ip_vs_conn_in_get
217(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) 229(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
230 const union nf_inet_addr *d_addr, __be16 d_port)
218{ 231{
219 struct ip_vs_conn *cp; 232 struct ip_vs_conn *cp;
220 233
221 cp = __ip_vs_conn_in_get(protocol, s_addr, s_port, d_addr, d_port); 234 cp = __ip_vs_conn_in_get(af, protocol, s_addr, s_port, d_addr, d_port);
222 if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt)) 235 if (!cp && atomic_read(&ip_vs_conn_no_cport_cnt))
223 cp = __ip_vs_conn_in_get(protocol, s_addr, 0, d_addr, d_port); 236 cp = __ip_vs_conn_in_get(af, protocol, s_addr, 0, d_addr,
237 d_port);
224 238
225 IP_VS_DBG(9, "lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", 239 IP_VS_DBG_BUF(9, "lookup/in %s %s:%d->%s:%d %s\n",
226 ip_vs_proto_name(protocol), 240 ip_vs_proto_name(protocol),
227 NIPQUAD(s_addr), ntohs(s_port), 241 IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
228 NIPQUAD(d_addr), ntohs(d_port), 242 IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
229 cp?"hit":"not hit"); 243 cp ? "hit" : "not hit");
230 244
231 return cp; 245 return cp;
232} 246}
233 247
234/* Get reference to connection template */ 248/* Get reference to connection template */
235struct ip_vs_conn *ip_vs_ct_in_get 249struct ip_vs_conn *ip_vs_ct_in_get
236(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) 250(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
251 const union nf_inet_addr *d_addr, __be16 d_port)
237{ 252{
238 unsigned hash; 253 unsigned hash;
239 struct ip_vs_conn *cp; 254 struct ip_vs_conn *cp;
240 255
241 hash = ip_vs_conn_hashkey(protocol, s_addr, s_port); 256 hash = ip_vs_conn_hashkey(af, protocol, s_addr, s_port);
242 257
243 ct_read_lock(hash); 258 ct_read_lock(hash);
244 259
245 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 260 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
246 if (s_addr==cp->caddr && s_port==cp->cport && 261 if (cp->af == af &&
247 d_port==cp->vport && d_addr==cp->vaddr && 262 ip_vs_addr_equal(af, s_addr, &cp->caddr) &&
263 ip_vs_addr_equal(af, d_addr, &cp->vaddr) &&
264 s_port == cp->cport && d_port == cp->vport &&
248 cp->flags & IP_VS_CONN_F_TEMPLATE && 265 cp->flags & IP_VS_CONN_F_TEMPLATE &&
249 protocol==cp->protocol) { 266 protocol == cp->protocol) {
250 /* HIT */ 267 /* HIT */
251 atomic_inc(&cp->refcnt); 268 atomic_inc(&cp->refcnt);
252 goto out; 269 goto out;
@@ -257,11 +274,11 @@ struct ip_vs_conn *ip_vs_ct_in_get
257 out: 274 out:
258 ct_read_unlock(hash); 275 ct_read_unlock(hash);
259 276
260 IP_VS_DBG(9, "template lookup/in %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", 277 IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
261 ip_vs_proto_name(protocol), 278 ip_vs_proto_name(protocol),
262 NIPQUAD(s_addr), ntohs(s_port), 279 IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
263 NIPQUAD(d_addr), ntohs(d_port), 280 IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
264 cp?"hit":"not hit"); 281 cp ? "hit" : "not hit");
265 282
266 return cp; 283 return cp;
267} 284}
@@ -273,7 +290,8 @@ struct ip_vs_conn *ip_vs_ct_in_get
273 * d_addr, d_port: pkt dest address (foreign host) 290 * d_addr, d_port: pkt dest address (foreign host)
274 */ 291 */
275struct ip_vs_conn *ip_vs_conn_out_get 292struct ip_vs_conn *ip_vs_conn_out_get
276(int protocol, __be32 s_addr, __be16 s_port, __be32 d_addr, __be16 d_port) 293(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
294 const union nf_inet_addr *d_addr, __be16 d_port)
277{ 295{
278 unsigned hash; 296 unsigned hash;
279 struct ip_vs_conn *cp, *ret=NULL; 297 struct ip_vs_conn *cp, *ret=NULL;
@@ -281,13 +299,15 @@ struct ip_vs_conn *ip_vs_conn_out_get
281 /* 299 /*
282 * Check for "full" addressed entries 300 * Check for "full" addressed entries
283 */ 301 */
284 hash = ip_vs_conn_hashkey(protocol, d_addr, d_port); 302 hash = ip_vs_conn_hashkey(af, protocol, d_addr, d_port);
285 303
286 ct_read_lock(hash); 304 ct_read_lock(hash);
287 305
288 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 306 list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
289 if (d_addr == cp->caddr && d_port == cp->cport && 307 if (cp->af == af &&
290 s_port == cp->dport && s_addr == cp->daddr && 308 ip_vs_addr_equal(af, d_addr, &cp->caddr) &&
309 ip_vs_addr_equal(af, s_addr, &cp->daddr) &&
310 d_port == cp->cport && s_port == cp->dport &&
291 protocol == cp->protocol) { 311 protocol == cp->protocol) {
292 /* HIT */ 312 /* HIT */
293 atomic_inc(&cp->refcnt); 313 atomic_inc(&cp->refcnt);
@@ -298,11 +318,11 @@ struct ip_vs_conn *ip_vs_conn_out_get
298 318
299 ct_read_unlock(hash); 319 ct_read_unlock(hash);
300 320
301 IP_VS_DBG(9, "lookup/out %s %u.%u.%u.%u:%d->%u.%u.%u.%u:%d %s\n", 321 IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
302 ip_vs_proto_name(protocol), 322 ip_vs_proto_name(protocol),
303 NIPQUAD(s_addr), ntohs(s_port), 323 IP_VS_DBG_ADDR(af, s_addr), ntohs(s_port),
304 NIPQUAD(d_addr), ntohs(d_port), 324 IP_VS_DBG_ADDR(af, d_addr), ntohs(d_port),
305 ret?"hit":"not hit"); 325 ret ? "hit" : "not hit");
306 326
307 return ret; 327 return ret;
308} 328}
@@ -369,6 +389,33 @@ static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
369 } 389 }
370} 390}
371 391
392#ifdef CONFIG_IP_VS_IPV6
393static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
394{
395 switch (IP_VS_FWD_METHOD(cp)) {
396 case IP_VS_CONN_F_MASQ:
397 cp->packet_xmit = ip_vs_nat_xmit_v6;
398 break;
399
400 case IP_VS_CONN_F_TUNNEL:
401 cp->packet_xmit = ip_vs_tunnel_xmit_v6;
402 break;
403
404 case IP_VS_CONN_F_DROUTE:
405 cp->packet_xmit = ip_vs_dr_xmit_v6;
406 break;
407
408 case IP_VS_CONN_F_LOCALNODE:
409 cp->packet_xmit = ip_vs_null_xmit;
410 break;
411
412 case IP_VS_CONN_F_BYPASS:
413 cp->packet_xmit = ip_vs_bypass_xmit_v6;
414 break;
415 }
416}
417#endif
418
372 419
373static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest) 420static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest)
374{ 421{
@@ -402,16 +449,16 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
402 cp->flags |= atomic_read(&dest->conn_flags); 449 cp->flags |= atomic_read(&dest->conn_flags);
403 cp->dest = dest; 450 cp->dest = dest;
404 451
405 IP_VS_DBG(7, "Bind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " 452 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
406 "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " 453 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
407 "dest->refcnt:%d\n", 454 "dest->refcnt:%d\n",
408 ip_vs_proto_name(cp->protocol), 455 ip_vs_proto_name(cp->protocol),
409 NIPQUAD(cp->caddr), ntohs(cp->cport), 456 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
410 NIPQUAD(cp->vaddr), ntohs(cp->vport), 457 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
411 NIPQUAD(cp->daddr), ntohs(cp->dport), 458 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
412 ip_vs_fwd_tag(cp), cp->state, 459 ip_vs_fwd_tag(cp), cp->state,
413 cp->flags, atomic_read(&cp->refcnt), 460 cp->flags, atomic_read(&cp->refcnt),
414 atomic_read(&dest->refcnt)); 461 atomic_read(&dest->refcnt));
415 462
416 /* Update the connection counters */ 463 /* Update the connection counters */
417 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { 464 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -444,8 +491,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
444 struct ip_vs_dest *dest; 491 struct ip_vs_dest *dest;
445 492
446 if ((cp) && (!cp->dest)) { 493 if ((cp) && (!cp->dest)) {
447 dest = ip_vs_find_dest(cp->daddr, cp->dport, 494 dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
448 cp->vaddr, cp->vport, cp->protocol); 495 &cp->vaddr, cp->vport,
496 cp->protocol);
449 ip_vs_bind_dest(cp, dest); 497 ip_vs_bind_dest(cp, dest);
450 return dest; 498 return dest;
451 } else 499 } else
@@ -464,16 +512,16 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
464 if (!dest) 512 if (!dest)
465 return; 513 return;
466 514
467 IP_VS_DBG(7, "Unbind-dest %s c:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " 515 IP_VS_DBG_BUF(7, "Unbind-dest %s c:%s:%d v:%s:%d "
468 "d:%u.%u.%u.%u:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d " 516 "d:%s:%d fwd:%c s:%u conn->flags:%X conn->refcnt:%d "
469 "dest->refcnt:%d\n", 517 "dest->refcnt:%d\n",
470 ip_vs_proto_name(cp->protocol), 518 ip_vs_proto_name(cp->protocol),
471 NIPQUAD(cp->caddr), ntohs(cp->cport), 519 IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
472 NIPQUAD(cp->vaddr), ntohs(cp->vport), 520 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
473 NIPQUAD(cp->daddr), ntohs(cp->dport), 521 IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
474 ip_vs_fwd_tag(cp), cp->state, 522 ip_vs_fwd_tag(cp), cp->state,
475 cp->flags, atomic_read(&cp->refcnt), 523 cp->flags, atomic_read(&cp->refcnt),
476 atomic_read(&dest->refcnt)); 524 atomic_read(&dest->refcnt));
477 525
478 /* Update the connection counters */ 526 /* Update the connection counters */
479 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { 527 if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
@@ -526,13 +574,16 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
526 !(dest->flags & IP_VS_DEST_F_AVAILABLE) || 574 !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
527 (sysctl_ip_vs_expire_quiescent_template && 575 (sysctl_ip_vs_expire_quiescent_template &&
528 (atomic_read(&dest->weight) == 0))) { 576 (atomic_read(&dest->weight) == 0))) {
529 IP_VS_DBG(9, "check_template: dest not available for " 577 IP_VS_DBG_BUF(9, "check_template: dest not available for "
530 "protocol %s s:%u.%u.%u.%u:%d v:%u.%u.%u.%u:%d " 578 "protocol %s s:%s:%d v:%s:%d "
531 "-> d:%u.%u.%u.%u:%d\n", 579 "-> d:%s:%d\n",
532 ip_vs_proto_name(ct->protocol), 580 ip_vs_proto_name(ct->protocol),
533 NIPQUAD(ct->caddr), ntohs(ct->cport), 581 IP_VS_DBG_ADDR(ct->af, &ct->caddr),
534 NIPQUAD(ct->vaddr), ntohs(ct->vport), 582 ntohs(ct->cport),
535 NIPQUAD(ct->daddr), ntohs(ct->dport)); 583 IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
584 ntohs(ct->vport),
585 IP_VS_DBG_ADDR(ct->af, &ct->daddr),
586 ntohs(ct->dport));
536 587
537 /* 588 /*
538 * Invalidate the connection template 589 * Invalidate the connection template
@@ -625,8 +676,9 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
625 * Create a new connection entry and hash it into the ip_vs_conn_tab 676 * Create a new connection entry and hash it into the ip_vs_conn_tab
626 */ 677 */
627struct ip_vs_conn * 678struct ip_vs_conn *
628ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport, 679ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
629 __be32 daddr, __be16 dport, unsigned flags, 680 const union nf_inet_addr *vaddr, __be16 vport,
681 const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
630 struct ip_vs_dest *dest) 682 struct ip_vs_dest *dest)
631{ 683{
632 struct ip_vs_conn *cp; 684 struct ip_vs_conn *cp;
@@ -640,12 +692,13 @@ ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport
640 692
641 INIT_LIST_HEAD(&cp->c_list); 693 INIT_LIST_HEAD(&cp->c_list);
642 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp); 694 setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
695 cp->af = af;
643 cp->protocol = proto; 696 cp->protocol = proto;
644 cp->caddr = caddr; 697 ip_vs_addr_copy(af, &cp->caddr, caddr);
645 cp->cport = cport; 698 cp->cport = cport;
646 cp->vaddr = vaddr; 699 ip_vs_addr_copy(af, &cp->vaddr, vaddr);
647 cp->vport = vport; 700 cp->vport = vport;
648 cp->daddr = daddr; 701 ip_vs_addr_copy(af, &cp->daddr, daddr);
649 cp->dport = dport; 702 cp->dport = dport;
650 cp->flags = flags; 703 cp->flags = flags;
651 spin_lock_init(&cp->lock); 704 spin_lock_init(&cp->lock);
@@ -672,7 +725,12 @@ ip_vs_conn_new(int proto, __be32 caddr, __be16 cport, __be32 vaddr, __be16 vport
672 cp->timeout = 3*HZ; 725 cp->timeout = 3*HZ;
673 726
674 /* Bind its packet transmitter */ 727 /* Bind its packet transmitter */
675 ip_vs_bind_xmit(cp); 728#ifdef CONFIG_IP_VS_IPV6
729 if (af == AF_INET6)
730 ip_vs_bind_xmit_v6(cp);
731 else
732#endif
733 ip_vs_bind_xmit(cp);
676 734
677 if (unlikely(pp && atomic_read(&pp->appcnt))) 735 if (unlikely(pp && atomic_read(&pp->appcnt)))
678 ip_vs_bind_app(cp, pp); 736 ip_vs_bind_app(cp, pp);
@@ -760,12 +818,26 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
760 else { 818 else {
761 const struct ip_vs_conn *cp = v; 819 const struct ip_vs_conn *cp = v;
762 820
763 seq_printf(seq, 821#ifdef CONFIG_IP_VS_IPV6
764 "%-3s %08X %04X %08X %04X %08X %04X %-11s %7lu\n", 822 if (cp->af == AF_INET6)
823 seq_printf(seq,
824 "%-3s " NIP6_FMT " %04X " NIP6_FMT
825 " %04X " NIP6_FMT " %04X %-11s %7lu\n",
826 ip_vs_proto_name(cp->protocol),
827 NIP6(cp->caddr.in6), ntohs(cp->cport),
828 NIP6(cp->vaddr.in6), ntohs(cp->vport),
829 NIP6(cp->daddr.in6), ntohs(cp->dport),
830 ip_vs_state_name(cp->protocol, cp->state),
831 (cp->timer.expires-jiffies)/HZ);
832 else
833#endif
834 seq_printf(seq,
835 "%-3s %08X %04X %08X %04X"
836 " %08X %04X %-11s %7lu\n",
765 ip_vs_proto_name(cp->protocol), 837 ip_vs_proto_name(cp->protocol),
766 ntohl(cp->caddr), ntohs(cp->cport), 838 ntohl(cp->caddr.ip), ntohs(cp->cport),
767 ntohl(cp->vaddr), ntohs(cp->vport), 839 ntohl(cp->vaddr.ip), ntohs(cp->vport),
768 ntohl(cp->daddr), ntohs(cp->dport), 840 ntohl(cp->daddr.ip), ntohs(cp->dport),
769 ip_vs_state_name(cp->protocol, cp->state), 841 ip_vs_state_name(cp->protocol, cp->state),
770 (cp->timer.expires-jiffies)/HZ); 842 (cp->timer.expires-jiffies)/HZ);
771 } 843 }
@@ -809,12 +881,27 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
809 else { 881 else {
810 const struct ip_vs_conn *cp = v; 882 const struct ip_vs_conn *cp = v;
811 883
812 seq_printf(seq, 884#ifdef CONFIG_IP_VS_IPV6
813 "%-3s %08X %04X %08X %04X %08X %04X %-11s %-6s %7lu\n", 885 if (cp->af == AF_INET6)
886 seq_printf(seq,
887 "%-3s " NIP6_FMT " %04X " NIP6_FMT
888 " %04X " NIP6_FMT " %04X %-11s %-6s %7lu\n",
889 ip_vs_proto_name(cp->protocol),
890 NIP6(cp->caddr.in6), ntohs(cp->cport),
891 NIP6(cp->vaddr.in6), ntohs(cp->vport),
892 NIP6(cp->daddr.in6), ntohs(cp->dport),
893 ip_vs_state_name(cp->protocol, cp->state),
894 ip_vs_origin_name(cp->flags),
895 (cp->timer.expires-jiffies)/HZ);
896 else
897#endif
898 seq_printf(seq,
899 "%-3s %08X %04X %08X %04X "
900 "%08X %04X %-11s %-6s %7lu\n",
814 ip_vs_proto_name(cp->protocol), 901 ip_vs_proto_name(cp->protocol),
815 ntohl(cp->caddr), ntohs(cp->cport), 902 ntohl(cp->caddr.ip), ntohs(cp->cport),
816 ntohl(cp->vaddr), ntohs(cp->vport), 903 ntohl(cp->vaddr.ip), ntohs(cp->vport),
817 ntohl(cp->daddr), ntohs(cp->dport), 904 ntohl(cp->daddr.ip), ntohs(cp->dport),
818 ip_vs_state_name(cp->protocol, cp->state), 905 ip_vs_state_name(cp->protocol, cp->state),
819 ip_vs_origin_name(cp->flags), 906 ip_vs_origin_name(cp->flags),
820 (cp->timer.expires-jiffies)/HZ); 907 (cp->timer.expires-jiffies)/HZ);
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index a7879eafc3b5..958abf3e5f8c 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -39,6 +39,11 @@
39#include <linux/netfilter.h> 39#include <linux/netfilter.h>
40#include <linux/netfilter_ipv4.h> 40#include <linux/netfilter_ipv4.h>
41 41
42#ifdef CONFIG_IP_VS_IPV6
43#include <net/ipv6.h>
44#include <linux/netfilter_ipv6.h>
45#endif
46
42#include <net/ip_vs.h> 47#include <net/ip_vs.h>
43 48
44 49
@@ -60,6 +65,7 @@ EXPORT_SYMBOL(ip_vs_get_debug_level);
60 65
61/* ID used in ICMP lookups */ 66/* ID used in ICMP lookups */
62#define icmp_id(icmph) (((icmph)->un).echo.id) 67#define icmp_id(icmph) (((icmph)->un).echo.id)
68#define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
63 69
64const char *ip_vs_proto_name(unsigned proto) 70const char *ip_vs_proto_name(unsigned proto)
65{ 71{
@@ -74,6 +80,10 @@ const char *ip_vs_proto_name(unsigned proto)
74 return "TCP"; 80 return "TCP";
75 case IPPROTO_ICMP: 81 case IPPROTO_ICMP:
76 return "ICMP"; 82 return "ICMP";
83#ifdef CONFIG_IP_VS_IPV6
84 case IPPROTO_ICMPV6:
85 return "ICMPv6";
86#endif
77 default: 87 default:
78 sprintf(buf, "IP_%d", proto); 88 sprintf(buf, "IP_%d", proto);
79 return buf; 89 return buf;
@@ -92,18 +102,18 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
92 struct ip_vs_dest *dest = cp->dest; 102 struct ip_vs_dest *dest = cp->dest;
93 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 103 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
94 spin_lock(&dest->stats.lock); 104 spin_lock(&dest->stats.lock);
95 dest->stats.inpkts++; 105 dest->stats.ustats.inpkts++;
96 dest->stats.inbytes += skb->len; 106 dest->stats.ustats.inbytes += skb->len;
97 spin_unlock(&dest->stats.lock); 107 spin_unlock(&dest->stats.lock);
98 108
99 spin_lock(&dest->svc->stats.lock); 109 spin_lock(&dest->svc->stats.lock);
100 dest->svc->stats.inpkts++; 110 dest->svc->stats.ustats.inpkts++;
101 dest->svc->stats.inbytes += skb->len; 111 dest->svc->stats.ustats.inbytes += skb->len;
102 spin_unlock(&dest->svc->stats.lock); 112 spin_unlock(&dest->svc->stats.lock);
103 113
104 spin_lock(&ip_vs_stats.lock); 114 spin_lock(&ip_vs_stats.lock);
105 ip_vs_stats.inpkts++; 115 ip_vs_stats.ustats.inpkts++;
106 ip_vs_stats.inbytes += skb->len; 116 ip_vs_stats.ustats.inbytes += skb->len;
107 spin_unlock(&ip_vs_stats.lock); 117 spin_unlock(&ip_vs_stats.lock);
108 } 118 }
109} 119}
@@ -115,18 +125,18 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
115 struct ip_vs_dest *dest = cp->dest; 125 struct ip_vs_dest *dest = cp->dest;
116 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { 126 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
117 spin_lock(&dest->stats.lock); 127 spin_lock(&dest->stats.lock);
118 dest->stats.outpkts++; 128 dest->stats.ustats.outpkts++;
119 dest->stats.outbytes += skb->len; 129 dest->stats.ustats.outbytes += skb->len;
120 spin_unlock(&dest->stats.lock); 130 spin_unlock(&dest->stats.lock);
121 131
122 spin_lock(&dest->svc->stats.lock); 132 spin_lock(&dest->svc->stats.lock);
123 dest->svc->stats.outpkts++; 133 dest->svc->stats.ustats.outpkts++;
124 dest->svc->stats.outbytes += skb->len; 134 dest->svc->stats.ustats.outbytes += skb->len;
125 spin_unlock(&dest->svc->stats.lock); 135 spin_unlock(&dest->svc->stats.lock);
126 136
127 spin_lock(&ip_vs_stats.lock); 137 spin_lock(&ip_vs_stats.lock);
128 ip_vs_stats.outpkts++; 138 ip_vs_stats.ustats.outpkts++;
129 ip_vs_stats.outbytes += skb->len; 139 ip_vs_stats.ustats.outbytes += skb->len;
130 spin_unlock(&ip_vs_stats.lock); 140 spin_unlock(&ip_vs_stats.lock);
131 } 141 }
132} 142}
@@ -136,15 +146,15 @@ static inline void
136ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc) 146ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
137{ 147{
138 spin_lock(&cp->dest->stats.lock); 148 spin_lock(&cp->dest->stats.lock);
139 cp->dest->stats.conns++; 149 cp->dest->stats.ustats.conns++;
140 spin_unlock(&cp->dest->stats.lock); 150 spin_unlock(&cp->dest->stats.lock);
141 151
142 spin_lock(&svc->stats.lock); 152 spin_lock(&svc->stats.lock);
143 svc->stats.conns++; 153 svc->stats.ustats.conns++;
144 spin_unlock(&svc->stats.lock); 154 spin_unlock(&svc->stats.lock);
145 155
146 spin_lock(&ip_vs_stats.lock); 156 spin_lock(&ip_vs_stats.lock);
147 ip_vs_stats.conns++; 157 ip_vs_stats.ustats.conns++;
148 spin_unlock(&ip_vs_stats.lock); 158 spin_unlock(&ip_vs_stats.lock);
149} 159}
150 160
@@ -173,20 +183,28 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
173 __be16 ports[2]) 183 __be16 ports[2])
174{ 184{
175 struct ip_vs_conn *cp = NULL; 185 struct ip_vs_conn *cp = NULL;
176 struct iphdr *iph = ip_hdr(skb); 186 struct ip_vs_iphdr iph;
177 struct ip_vs_dest *dest; 187 struct ip_vs_dest *dest;
178 struct ip_vs_conn *ct; 188 struct ip_vs_conn *ct;
179 __be16 dport; /* destination port to forward */ 189 __be16 dport; /* destination port to forward */
180 __be32 snet; /* source network of the client, after masking */ 190 union nf_inet_addr snet; /* source network of the client,
191 after masking */
192
193 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
181 194
182 /* Mask saddr with the netmask to adjust template granularity */ 195 /* Mask saddr with the netmask to adjust template granularity */
183 snet = iph->saddr & svc->netmask; 196#ifdef CONFIG_IP_VS_IPV6
197 if (svc->af == AF_INET6)
198 ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask);
199 else
200#endif
201 snet.ip = iph.saddr.ip & svc->netmask;
184 202
185 IP_VS_DBG(6, "p-schedule: src %u.%u.%u.%u:%u dest %u.%u.%u.%u:%u " 203 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
186 "mnet %u.%u.%u.%u\n", 204 "mnet %s\n",
187 NIPQUAD(iph->saddr), ntohs(ports[0]), 205 IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
188 NIPQUAD(iph->daddr), ntohs(ports[1]), 206 IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
189 NIPQUAD(snet)); 207 IP_VS_DBG_ADDR(svc->af, &snet));
190 208
191 /* 209 /*
192 * As far as we know, FTP is a very complicated network protocol, and 210 * As far as we know, FTP is a very complicated network protocol, and
@@ -204,11 +222,11 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
204 if (ports[1] == svc->port) { 222 if (ports[1] == svc->port) {
205 /* Check if a template already exists */ 223 /* Check if a template already exists */
206 if (svc->port != FTPPORT) 224 if (svc->port != FTPPORT)
207 ct = ip_vs_ct_in_get(iph->protocol, snet, 0, 225 ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0,
208 iph->daddr, ports[1]); 226 &iph.daddr, ports[1]);
209 else 227 else
210 ct = ip_vs_ct_in_get(iph->protocol, snet, 0, 228 ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0,
211 iph->daddr, 0); 229 &iph.daddr, 0);
212 230
213 if (!ct || !ip_vs_check_template(ct)) { 231 if (!ct || !ip_vs_check_template(ct)) {
214 /* 232 /*
@@ -228,18 +246,18 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
228 * for ftp service. 246 * for ftp service.
229 */ 247 */
230 if (svc->port != FTPPORT) 248 if (svc->port != FTPPORT)
231 ct = ip_vs_conn_new(iph->protocol, 249 ct = ip_vs_conn_new(svc->af, iph.protocol,
232 snet, 0, 250 &snet, 0,
233 iph->daddr, 251 &iph.daddr,
234 ports[1], 252 ports[1],
235 dest->addr, dest->port, 253 &dest->addr, dest->port,
236 IP_VS_CONN_F_TEMPLATE, 254 IP_VS_CONN_F_TEMPLATE,
237 dest); 255 dest);
238 else 256 else
239 ct = ip_vs_conn_new(iph->protocol, 257 ct = ip_vs_conn_new(svc->af, iph.protocol,
240 snet, 0, 258 &snet, 0,
241 iph->daddr, 0, 259 &iph.daddr, 0,
242 dest->addr, 0, 260 &dest->addr, 0,
243 IP_VS_CONN_F_TEMPLATE, 261 IP_VS_CONN_F_TEMPLATE,
244 dest); 262 dest);
245 if (ct == NULL) 263 if (ct == NULL)
@@ -258,12 +276,16 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
258 * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0> 276 * fwmark template: <IPPROTO_IP,caddr,0,fwmark,0,daddr,0>
259 * port zero template: <protocol,caddr,0,vaddr,0,daddr,0> 277 * port zero template: <protocol,caddr,0,vaddr,0,daddr,0>
260 */ 278 */
261 if (svc->fwmark) 279 if (svc->fwmark) {
262 ct = ip_vs_ct_in_get(IPPROTO_IP, snet, 0, 280 union nf_inet_addr fwmark = {
263 htonl(svc->fwmark), 0); 281 .all = { 0, 0, 0, htonl(svc->fwmark) }
264 else 282 };
265 ct = ip_vs_ct_in_get(iph->protocol, snet, 0, 283
266 iph->daddr, 0); 284 ct = ip_vs_ct_in_get(svc->af, IPPROTO_IP, &snet, 0,
285 &fwmark, 0);
286 } else
287 ct = ip_vs_ct_in_get(svc->af, iph.protocol, &snet, 0,
288 &iph.daddr, 0);
267 289
268 if (!ct || !ip_vs_check_template(ct)) { 290 if (!ct || !ip_vs_check_template(ct)) {
269 /* 291 /*
@@ -282,18 +304,22 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
282 /* 304 /*
283 * Create a template according to the service 305 * Create a template according to the service
284 */ 306 */
285 if (svc->fwmark) 307 if (svc->fwmark) {
286 ct = ip_vs_conn_new(IPPROTO_IP, 308 union nf_inet_addr fwmark = {
287 snet, 0, 309 .all = { 0, 0, 0, htonl(svc->fwmark) }
288 htonl(svc->fwmark), 0, 310 };
289 dest->addr, 0, 311
312 ct = ip_vs_conn_new(svc->af, IPPROTO_IP,
313 &snet, 0,
314 &fwmark, 0,
315 &dest->addr, 0,
290 IP_VS_CONN_F_TEMPLATE, 316 IP_VS_CONN_F_TEMPLATE,
291 dest); 317 dest);
292 else 318 } else
293 ct = ip_vs_conn_new(iph->protocol, 319 ct = ip_vs_conn_new(svc->af, iph.protocol,
294 snet, 0, 320 &snet, 0,
295 iph->daddr, 0, 321 &iph.daddr, 0,
296 dest->addr, 0, 322 &dest->addr, 0,
297 IP_VS_CONN_F_TEMPLATE, 323 IP_VS_CONN_F_TEMPLATE,
298 dest); 324 dest);
299 if (ct == NULL) 325 if (ct == NULL)
@@ -310,10 +336,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
310 /* 336 /*
311 * Create a new connection according to the template 337 * Create a new connection according to the template
312 */ 338 */
313 cp = ip_vs_conn_new(iph->protocol, 339 cp = ip_vs_conn_new(svc->af, iph.protocol,
314 iph->saddr, ports[0], 340 &iph.saddr, ports[0],
315 iph->daddr, ports[1], 341 &iph.daddr, ports[1],
316 dest->addr, dport, 342 &dest->addr, dport,
317 0, 343 0,
318 dest); 344 dest);
319 if (cp == NULL) { 345 if (cp == NULL) {
@@ -342,12 +368,12 @@ struct ip_vs_conn *
342ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 368ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
343{ 369{
344 struct ip_vs_conn *cp = NULL; 370 struct ip_vs_conn *cp = NULL;
345 struct iphdr *iph = ip_hdr(skb); 371 struct ip_vs_iphdr iph;
346 struct ip_vs_dest *dest; 372 struct ip_vs_dest *dest;
347 __be16 _ports[2], *pptr; 373 __be16 _ports[2], *pptr;
348 374
349 pptr = skb_header_pointer(skb, iph->ihl*4, 375 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
350 sizeof(_ports), _ports); 376 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
351 if (pptr == NULL) 377 if (pptr == NULL)
352 return NULL; 378 return NULL;
353 379
@@ -377,22 +403,22 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
377 /* 403 /*
378 * Create a connection entry. 404 * Create a connection entry.
379 */ 405 */
380 cp = ip_vs_conn_new(iph->protocol, 406 cp = ip_vs_conn_new(svc->af, iph.protocol,
381 iph->saddr, pptr[0], 407 &iph.saddr, pptr[0],
382 iph->daddr, pptr[1], 408 &iph.daddr, pptr[1],
383 dest->addr, dest->port?dest->port:pptr[1], 409 &dest->addr, dest->port ? dest->port : pptr[1],
384 0, 410 0,
385 dest); 411 dest);
386 if (cp == NULL) 412 if (cp == NULL)
387 return NULL; 413 return NULL;
388 414
389 IP_VS_DBG(6, "Schedule fwd:%c c:%u.%u.%u.%u:%u v:%u.%u.%u.%u:%u " 415 IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
390 "d:%u.%u.%u.%u:%u conn->flags:%X conn->refcnt:%d\n", 416 "d:%s:%u conn->flags:%X conn->refcnt:%d\n",
391 ip_vs_fwd_tag(cp), 417 ip_vs_fwd_tag(cp),
392 NIPQUAD(cp->caddr), ntohs(cp->cport), 418 IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
393 NIPQUAD(cp->vaddr), ntohs(cp->vport), 419 IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
394 NIPQUAD(cp->daddr), ntohs(cp->dport), 420 IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
395 cp->flags, atomic_read(&cp->refcnt)); 421 cp->flags, atomic_read(&cp->refcnt));
396 422
397 ip_vs_conn_stats(cp, svc); 423 ip_vs_conn_stats(cp, svc);
398 return cp; 424 return cp;
@@ -408,31 +434,39 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
408 struct ip_vs_protocol *pp) 434 struct ip_vs_protocol *pp)
409{ 435{
410 __be16 _ports[2], *pptr; 436 __be16 _ports[2], *pptr;
411 struct iphdr *iph = ip_hdr(skb); 437 struct ip_vs_iphdr iph;
438 int unicast;
439 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
412 440
413 pptr = skb_header_pointer(skb, iph->ihl*4, 441 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
414 sizeof(_ports), _ports);
415 if (pptr == NULL) { 442 if (pptr == NULL) {
416 ip_vs_service_put(svc); 443 ip_vs_service_put(svc);
417 return NF_DROP; 444 return NF_DROP;
418 } 445 }
419 446
447#ifdef CONFIG_IP_VS_IPV6
448 if (svc->af == AF_INET6)
449 unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
450 else
451#endif
452 unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
453
420 /* if it is fwmark-based service, the cache_bypass sysctl is up 454 /* if it is fwmark-based service, the cache_bypass sysctl is up
421 and the destination is RTN_UNICAST (and not local), then create 455 and the destination is a non-local unicast, then create
422 a cache_bypass connection entry */ 456 a cache_bypass connection entry */
423 if (sysctl_ip_vs_cache_bypass && svc->fwmark 457 if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
424 && (inet_addr_type(&init_net, iph->daddr) == RTN_UNICAST)) {
425 int ret, cs; 458 int ret, cs;
426 struct ip_vs_conn *cp; 459 struct ip_vs_conn *cp;
460 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
427 461
428 ip_vs_service_put(svc); 462 ip_vs_service_put(svc);
429 463
430 /* create a new connection entry */ 464 /* create a new connection entry */
431 IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n"); 465 IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n");
432 cp = ip_vs_conn_new(iph->protocol, 466 cp = ip_vs_conn_new(svc->af, iph.protocol,
433 iph->saddr, pptr[0], 467 &iph.saddr, pptr[0],
434 iph->daddr, pptr[1], 468 &iph.daddr, pptr[1],
435 0, 0, 469 &daddr, 0,
436 IP_VS_CONN_F_BYPASS, 470 IP_VS_CONN_F_BYPASS,
437 NULL); 471 NULL);
438 if (cp == NULL) 472 if (cp == NULL)
@@ -473,7 +507,14 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
473 * created, the TCP RST packet cannot be sent, instead that 507 * created, the TCP RST packet cannot be sent, instead that
474 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ 508 * ICMP_PORT_UNREACH is sent here no matter it is TCP/UDP. --WZ
475 */ 509 */
476 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 510#ifdef CONFIG_IP_VS_IPV6
511 if (svc->af == AF_INET6)
512 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0,
513 skb->dev);
514 else
515#endif
516 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
517
477 return NF_DROP; 518 return NF_DROP;
478} 519}
479 520
@@ -512,6 +553,14 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
512 return err; 553 return err;
513} 554}
514 555
556#ifdef CONFIG_IP_VS_IPV6
557static inline int ip_vs_gather_frags_v6(struct sk_buff *skb, u_int32_t user)
558{
559 /* TODO IPv6: Find out what to do here for IPv6 */
560 return 0;
561}
562#endif
563
515/* 564/*
516 * Packet has been made sufficiently writable in caller 565 * Packet has been made sufficiently writable in caller
517 * - inout: 1=in->out, 0=out->in 566 * - inout: 1=in->out, 0=out->in
@@ -526,14 +575,14 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
526 struct iphdr *ciph = (struct iphdr *)(icmph + 1); 575 struct iphdr *ciph = (struct iphdr *)(icmph + 1);
527 576
528 if (inout) { 577 if (inout) {
529 iph->saddr = cp->vaddr; 578 iph->saddr = cp->vaddr.ip;
530 ip_send_check(iph); 579 ip_send_check(iph);
531 ciph->daddr = cp->vaddr; 580 ciph->daddr = cp->vaddr.ip;
532 ip_send_check(ciph); 581 ip_send_check(ciph);
533 } else { 582 } else {
534 iph->daddr = cp->daddr; 583 iph->daddr = cp->daddr.ip;
535 ip_send_check(iph); 584 ip_send_check(iph);
536 ciph->saddr = cp->daddr; 585 ciph->saddr = cp->daddr.ip;
537 ip_send_check(ciph); 586 ip_send_check(ciph);
538 } 587 }
539 588
@@ -560,21 +609,112 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
560 "Forwarding altered incoming ICMP"); 609 "Forwarding altered incoming ICMP");
561} 610}
562 611
612#ifdef CONFIG_IP_VS_IPV6
613void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
614 struct ip_vs_conn *cp, int inout)
615{
616 struct ipv6hdr *iph = ipv6_hdr(skb);
617 unsigned int icmp_offset = sizeof(struct ipv6hdr);
618 struct icmp6hdr *icmph = (struct icmp6hdr *)(skb_network_header(skb) +
619 icmp_offset);
620 struct ipv6hdr *ciph = (struct ipv6hdr *)(icmph + 1);
621
622 if (inout) {
623 iph->saddr = cp->vaddr.in6;
624 ciph->daddr = cp->vaddr.in6;
625 } else {
626 iph->daddr = cp->daddr.in6;
627 ciph->saddr = cp->daddr.in6;
628 }
629
630 /* the TCP/UDP port */
631 if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) {
632 __be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
633
634 if (inout)
635 ports[1] = cp->vport;
636 else
637 ports[0] = cp->dport;
638 }
639
640 /* And finally the ICMP checksum */
641 icmph->icmp6_cksum = 0;
642 /* TODO IPv6: is this correct for ICMPv6? */
643 ip_vs_checksum_complete(skb, icmp_offset);
644 skb->ip_summed = CHECKSUM_UNNECESSARY;
645
646 if (inout)
647 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
648 "Forwarding altered outgoing ICMPv6");
649 else
650 IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
651 "Forwarding altered incoming ICMPv6");
652}
653#endif
654
655/* Handle relevant response ICMP messages - forward to the right
656 * destination host. Used for NAT and local client.
657 */
658static int handle_response_icmp(int af, struct sk_buff *skb,
659 union nf_inet_addr *snet,
660 __u8 protocol, struct ip_vs_conn *cp,
661 struct ip_vs_protocol *pp,
662 unsigned int offset, unsigned int ihl)
663{
664 unsigned int verdict = NF_DROP;
665
666 if (IP_VS_FWD_METHOD(cp) != 0) {
667 IP_VS_ERR("shouldn't reach here, because the box is on the "
668 "half connection in the tun/dr module.\n");
669 }
670
671 /* Ensure the checksum is correct */
672 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
673 /* Failed checksum! */
674 IP_VS_DBG_BUF(1, "Forward ICMP: failed checksum from %s!\n",
675 IP_VS_DBG_ADDR(af, snet));
676 goto out;
677 }
678
679 if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol)
680 offset += 2 * sizeof(__u16);
681 if (!skb_make_writable(skb, offset))
682 goto out;
683
684#ifdef CONFIG_IP_VS_IPV6
685 if (af == AF_INET6)
686 ip_vs_nat_icmp_v6(skb, pp, cp, 1);
687 else
688#endif
689 ip_vs_nat_icmp(skb, pp, cp, 1);
690
691 /* do the statistics and put it back */
692 ip_vs_out_stats(cp, skb);
693
694 skb->ipvs_property = 1;
695 verdict = NF_ACCEPT;
696
697out:
698 __ip_vs_conn_put(cp);
699
700 return verdict;
701}
702
563/* 703/*
564 * Handle ICMP messages in the inside-to-outside direction (outgoing). 704 * Handle ICMP messages in the inside-to-outside direction (outgoing).
565 * Find any that might be relevant, check against existing connections, 705 * Find any that might be relevant, check against existing connections.
566 * forward to the right destination host if relevant.
567 * Currently handles error types - unreachable, quench, ttl exceeded. 706 * Currently handles error types - unreachable, quench, ttl exceeded.
568 * (Only used in VS/NAT)
569 */ 707 */
570static int ip_vs_out_icmp(struct sk_buff *skb, int *related) 708static int ip_vs_out_icmp(struct sk_buff *skb, int *related)
571{ 709{
572 struct iphdr *iph; 710 struct iphdr *iph;
573 struct icmphdr _icmph, *ic; 711 struct icmphdr _icmph, *ic;
574 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ 712 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
713 struct ip_vs_iphdr ciph;
575 struct ip_vs_conn *cp; 714 struct ip_vs_conn *cp;
576 struct ip_vs_protocol *pp; 715 struct ip_vs_protocol *pp;
577 unsigned int offset, ihl, verdict; 716 unsigned int offset, ihl;
717 union nf_inet_addr snet;
578 718
579 *related = 1; 719 *related = 1;
580 720
@@ -627,102 +767,231 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related)
627 767
628 offset += cih->ihl * 4; 768 offset += cih->ihl * 4;
629 769
770 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
630 /* The embedded headers contain source and dest in reverse order */ 771 /* The embedded headers contain source and dest in reverse order */
631 cp = pp->conn_out_get(skb, pp, cih, offset, 1); 772 cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
632 if (!cp) 773 if (!cp)
633 return NF_ACCEPT; 774 return NF_ACCEPT;
634 775
635 verdict = NF_DROP; 776 snet.ip = iph->saddr;
777 return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
778 pp, offset, ihl);
779}
636 780
637 if (IP_VS_FWD_METHOD(cp) != 0) { 781#ifdef CONFIG_IP_VS_IPV6
638 IP_VS_ERR("shouldn't reach here, because the box is on the " 782static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related)
639 "half connection in the tun/dr module.\n"); 783{
784 struct ipv6hdr *iph;
785 struct icmp6hdr _icmph, *ic;
786 struct ipv6hdr _ciph, *cih; /* The ip header contained
787 within the ICMP */
788 struct ip_vs_iphdr ciph;
789 struct ip_vs_conn *cp;
790 struct ip_vs_protocol *pp;
791 unsigned int offset;
792 union nf_inet_addr snet;
793
794 *related = 1;
795
796 /* reassemble IP fragments */
797 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
798 if (ip_vs_gather_frags_v6(skb, IP_DEFRAG_VS_OUT))
799 return NF_STOLEN;
640 } 800 }
641 801
642 /* Ensure the checksum is correct */ 802 iph = ipv6_hdr(skb);
643 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { 803 offset = sizeof(struct ipv6hdr);
644 /* Failed checksum! */ 804 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
645 IP_VS_DBG(1, "Forward ICMP: failed checksum from %d.%d.%d.%d!\n", 805 if (ic == NULL)
646 NIPQUAD(iph->saddr)); 806 return NF_DROP;
647 goto out; 807
808 IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n",
809 ic->icmp6_type, ntohs(icmpv6_id(ic)),
810 NIP6(iph->saddr), NIP6(iph->daddr));
811
812 /*
813 * Work through seeing if this is for us.
814 * These checks are supposed to be in an order that means easy
815 * things are checked first to speed up processing.... however
816 * this means that some packets will manage to get a long way
817 * down this stack and then be rejected, but that's life.
818 */
819 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
820 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
821 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
822 *related = 0;
823 return NF_ACCEPT;
648 } 824 }
649 825
650 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 826 /* Now find the contained IP header */
651 offset += 2 * sizeof(__u16); 827 offset += sizeof(_icmph);
652 if (!skb_make_writable(skb, offset)) 828 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
653 goto out; 829 if (cih == NULL)
830 return NF_ACCEPT; /* The packet looks wrong, ignore */
654 831
655 ip_vs_nat_icmp(skb, pp, cp, 1); 832 pp = ip_vs_proto_get(cih->nexthdr);
833 if (!pp)
834 return NF_ACCEPT;
656 835
657 /* do the statistics and put it back */ 836 /* Is the embedded protocol header present? */
658 ip_vs_out_stats(cp, skb); 837 /* TODO: we don't support fragmentation at the moment anyways */
838 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
839 return NF_ACCEPT;
659 840
660 skb->ipvs_property = 1; 841 IP_VS_DBG_PKT(11, pp, skb, offset, "Checking outgoing ICMPv6 for");
661 verdict = NF_ACCEPT;
662 842
663 out: 843 offset += sizeof(struct ipv6hdr);
664 __ip_vs_conn_put(cp);
665 844
666 return verdict; 845 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
846 /* The embedded headers contain source and dest in reverse order */
847 cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
848 if (!cp)
849 return NF_ACCEPT;
850
851 ipv6_addr_copy(&snet.in6, &iph->saddr);
852 return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp,
853 pp, offset, sizeof(struct ipv6hdr));
667} 854}
855#endif
668 856
669static inline int is_tcp_reset(const struct sk_buff *skb) 857static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
670{ 858{
671 struct tcphdr _tcph, *th; 859 struct tcphdr _tcph, *th;
672 860
673 th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); 861 th = skb_header_pointer(skb, nh_len, sizeof(_tcph), &_tcph);
674 if (th == NULL) 862 if (th == NULL)
675 return 0; 863 return 0;
676 return th->rst; 864 return th->rst;
677} 865}
678 866
867/* Handle response packets: rewrite addresses and send away...
868 * Used for NAT and local client.
869 */
870static unsigned int
871handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
872 struct ip_vs_conn *cp, int ihl)
873{
874 IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet");
875
876 if (!skb_make_writable(skb, ihl))
877 goto drop;
878
879 /* mangle the packet */
880 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp))
881 goto drop;
882
883#ifdef CONFIG_IP_VS_IPV6
884 if (af == AF_INET6)
885 ipv6_hdr(skb)->saddr = cp->vaddr.in6;
886 else
887#endif
888 {
889 ip_hdr(skb)->saddr = cp->vaddr.ip;
890 ip_send_check(ip_hdr(skb));
891 }
892
893 /* For policy routing, packets originating from this
894 * machine itself may be routed differently to packets
895 * passing through. We want this packet to be routed as
896 * if it came from this machine itself. So re-compute
897 * the routing information.
898 */
899#ifdef CONFIG_IP_VS_IPV6
900 if (af == AF_INET6) {
901 if (ip6_route_me_harder(skb) != 0)
902 goto drop;
903 } else
904#endif
905 if (ip_route_me_harder(skb, RTN_LOCAL) != 0)
906 goto drop;
907
908 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
909
910 ip_vs_out_stats(cp, skb);
911 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
912 ip_vs_conn_put(cp);
913
914 skb->ipvs_property = 1;
915
916 LeaveFunction(11);
917 return NF_ACCEPT;
918
919drop:
920 ip_vs_conn_put(cp);
921 kfree_skb(skb);
922 return NF_STOLEN;
923}
924
679/* 925/*
680 * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT. 926 * It is hooked at the NF_INET_FORWARD chain, used only for VS/NAT.
681 * Check if outgoing packet belongs to the established ip_vs_conn, 927 * Check if outgoing packet belongs to the established ip_vs_conn.
682 * rewrite addresses of the packet and send it on its way...
683 */ 928 */
684static unsigned int 929static unsigned int
685ip_vs_out(unsigned int hooknum, struct sk_buff *skb, 930ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
686 const struct net_device *in, const struct net_device *out, 931 const struct net_device *in, const struct net_device *out,
687 int (*okfn)(struct sk_buff *)) 932 int (*okfn)(struct sk_buff *))
688{ 933{
689 struct iphdr *iph; 934 struct ip_vs_iphdr iph;
690 struct ip_vs_protocol *pp; 935 struct ip_vs_protocol *pp;
691 struct ip_vs_conn *cp; 936 struct ip_vs_conn *cp;
692 int ihl; 937 int af;
693 938
694 EnterFunction(11); 939 EnterFunction(11);
695 940
941 af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6;
942
696 if (skb->ipvs_property) 943 if (skb->ipvs_property)
697 return NF_ACCEPT; 944 return NF_ACCEPT;
698 945
699 iph = ip_hdr(skb); 946 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
700 if (unlikely(iph->protocol == IPPROTO_ICMP)) { 947#ifdef CONFIG_IP_VS_IPV6
701 int related, verdict = ip_vs_out_icmp(skb, &related); 948 if (af == AF_INET6) {
949 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
950 int related, verdict = ip_vs_out_icmp_v6(skb, &related);
702 951
703 if (related) 952 if (related)
704 return verdict; 953 return verdict;
705 iph = ip_hdr(skb); 954 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
706 } 955 }
956 } else
957#endif
958 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
959 int related, verdict = ip_vs_out_icmp(skb, &related);
707 960
708 pp = ip_vs_proto_get(iph->protocol); 961 if (related)
962 return verdict;
963 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
964 }
965
966 pp = ip_vs_proto_get(iph.protocol);
709 if (unlikely(!pp)) 967 if (unlikely(!pp))
710 return NF_ACCEPT; 968 return NF_ACCEPT;
711 969
712 /* reassemble IP fragments */ 970 /* reassemble IP fragments */
713 if (unlikely(iph->frag_off & htons(IP_MF|IP_OFFSET) && 971#ifdef CONFIG_IP_VS_IPV6
714 !pp->dont_defrag)) { 972 if (af == AF_INET6) {
715 if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT)) 973 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
716 return NF_STOLEN; 974 int related, verdict = ip_vs_out_icmp_v6(skb, &related);
717 iph = ip_hdr(skb); 975
718 } 976 if (related)
977 return verdict;
719 978
720 ihl = iph->ihl << 2; 979 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
980 }
981 } else
982#endif
983 if (unlikely(ip_hdr(skb)->frag_off & htons(IP_MF|IP_OFFSET) &&
984 !pp->dont_defrag)) {
985 if (ip_vs_gather_frags(skb, IP_DEFRAG_VS_OUT))
986 return NF_STOLEN;
987
988 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
989 }
721 990
722 /* 991 /*
723 * Check if the packet belongs to an existing entry 992 * Check if the packet belongs to an existing entry
724 */ 993 */
725 cp = pp->conn_out_get(skb, pp, iph, ihl, 0); 994 cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
726 995
727 if (unlikely(!cp)) { 996 if (unlikely(!cp)) {
728 if (sysctl_ip_vs_nat_icmp_send && 997 if (sysctl_ip_vs_nat_icmp_send &&
@@ -730,21 +999,31 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
730 pp->protocol == IPPROTO_UDP)) { 999 pp->protocol == IPPROTO_UDP)) {
731 __be16 _ports[2], *pptr; 1000 __be16 _ports[2], *pptr;
732 1001
733 pptr = skb_header_pointer(skb, ihl, 1002 pptr = skb_header_pointer(skb, iph.len,
734 sizeof(_ports), _ports); 1003 sizeof(_ports), _ports);
735 if (pptr == NULL) 1004 if (pptr == NULL)
736 return NF_ACCEPT; /* Not for me */ 1005 return NF_ACCEPT; /* Not for me */
737 if (ip_vs_lookup_real_service(iph->protocol, 1006 if (ip_vs_lookup_real_service(af, iph.protocol,
738 iph->saddr, pptr[0])) { 1007 &iph.saddr,
1008 pptr[0])) {
739 /* 1009 /*
740 * Notify the real server: there is no 1010 * Notify the real server: there is no
741 * existing entry if it is not RST 1011 * existing entry if it is not RST
742 * packet or not TCP packet. 1012 * packet or not TCP packet.
743 */ 1013 */
744 if (iph->protocol != IPPROTO_TCP 1014 if (iph.protocol != IPPROTO_TCP
745 || !is_tcp_reset(skb)) { 1015 || !is_tcp_reset(skb, iph.len)) {
746 icmp_send(skb,ICMP_DEST_UNREACH, 1016#ifdef CONFIG_IP_VS_IPV6
747 ICMP_PORT_UNREACH, 0); 1017 if (af == AF_INET6)
1018 icmpv6_send(skb,
1019 ICMPV6_DEST_UNREACH,
1020 ICMPV6_PORT_UNREACH,
1021 0, skb->dev);
1022 else
1023#endif
1024 icmp_send(skb,
1025 ICMP_DEST_UNREACH,
1026 ICMP_PORT_UNREACH, 0);
748 return NF_DROP; 1027 return NF_DROP;
749 } 1028 }
750 } 1029 }
@@ -754,41 +1033,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
754 return NF_ACCEPT; 1033 return NF_ACCEPT;
755 } 1034 }
756 1035
757 IP_VS_DBG_PKT(11, pp, skb, 0, "Outgoing packet"); 1036 return handle_response(af, skb, pp, cp, iph.len);
758
759 if (!skb_make_writable(skb, ihl))
760 goto drop;
761
762 /* mangle the packet */
763 if (pp->snat_handler && !pp->snat_handler(skb, pp, cp))
764 goto drop;
765 ip_hdr(skb)->saddr = cp->vaddr;
766 ip_send_check(ip_hdr(skb));
767
768 /* For policy routing, packets originating from this
769 * machine itself may be routed differently to packets
770 * passing through. We want this packet to be routed as
771 * if it came from this machine itself. So re-compute
772 * the routing information.
773 */
774 if (ip_route_me_harder(skb, RTN_LOCAL) != 0)
775 goto drop;
776
777 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
778
779 ip_vs_out_stats(cp, skb);
780 ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
781 ip_vs_conn_put(cp);
782
783 skb->ipvs_property = 1;
784
785 LeaveFunction(11);
786 return NF_ACCEPT;
787
788 drop:
789 ip_vs_conn_put(cp);
790 kfree_skb(skb);
791 return NF_STOLEN;
792} 1037}
793 1038
794 1039
@@ -804,9 +1049,11 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
804 struct iphdr *iph; 1049 struct iphdr *iph;
805 struct icmphdr _icmph, *ic; 1050 struct icmphdr _icmph, *ic;
806 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */ 1051 struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
1052 struct ip_vs_iphdr ciph;
807 struct ip_vs_conn *cp; 1053 struct ip_vs_conn *cp;
808 struct ip_vs_protocol *pp; 1054 struct ip_vs_protocol *pp;
809 unsigned int offset, ihl, verdict; 1055 unsigned int offset, ihl, verdict;
1056 union nf_inet_addr snet;
810 1057
811 *related = 1; 1058 *related = 1;
812 1059
@@ -860,10 +1107,20 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
860 1107
861 offset += cih->ihl * 4; 1108 offset += cih->ihl * 4;
862 1109
1110 ip_vs_fill_iphdr(AF_INET, cih, &ciph);
863 /* The embedded headers contain source and dest in reverse order */ 1111 /* The embedded headers contain source and dest in reverse order */
864 cp = pp->conn_in_get(skb, pp, cih, offset, 1); 1112 cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
865 if (!cp) 1113 if (!cp) {
1114 /* The packet could also belong to a local client */
1115 cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
1116 if (cp) {
1117 snet.ip = iph->saddr;
1118 return handle_response_icmp(AF_INET, skb, &snet,
1119 cih->protocol, cp, pp,
1120 offset, ihl);
1121 }
866 return NF_ACCEPT; 1122 return NF_ACCEPT;
1123 }
867 1124
868 verdict = NF_DROP; 1125 verdict = NF_DROP;
869 1126
@@ -888,6 +1145,105 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
888 return verdict; 1145 return verdict;
889} 1146}
890 1147
1148#ifdef CONFIG_IP_VS_IPV6
1149static int
1150ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1151{
1152 struct ipv6hdr *iph;
1153 struct icmp6hdr _icmph, *ic;
1154 struct ipv6hdr _ciph, *cih; /* The ip header contained
1155 within the ICMP */
1156 struct ip_vs_iphdr ciph;
1157 struct ip_vs_conn *cp;
1158 struct ip_vs_protocol *pp;
1159 unsigned int offset, verdict;
1160 union nf_inet_addr snet;
1161
1162 *related = 1;
1163
1164 /* reassemble IP fragments */
1165 if (ipv6_hdr(skb)->nexthdr == IPPROTO_FRAGMENT) {
1166 if (ip_vs_gather_frags_v6(skb, hooknum == NF_INET_LOCAL_IN ?
1167 IP_DEFRAG_VS_IN :
1168 IP_DEFRAG_VS_FWD))
1169 return NF_STOLEN;
1170 }
1171
1172 iph = ipv6_hdr(skb);
1173 offset = sizeof(struct ipv6hdr);
1174 ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph);
1175 if (ic == NULL)
1176 return NF_DROP;
1177
1178 IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) " NIP6_FMT "->" NIP6_FMT "\n",
1179 ic->icmp6_type, ntohs(icmpv6_id(ic)),
1180 NIP6(iph->saddr), NIP6(iph->daddr));
1181
1182 /*
1183 * Work through seeing if this is for us.
1184 * These checks are supposed to be in an order that means easy
1185 * things are checked first to speed up processing.... however
1186 * this means that some packets will manage to get a long way
1187 * down this stack and then be rejected, but that's life.
1188 */
1189 if ((ic->icmp6_type != ICMPV6_DEST_UNREACH) &&
1190 (ic->icmp6_type != ICMPV6_PKT_TOOBIG) &&
1191 (ic->icmp6_type != ICMPV6_TIME_EXCEED)) {
1192 *related = 0;
1193 return NF_ACCEPT;
1194 }
1195
1196 /* Now find the contained IP header */
1197 offset += sizeof(_icmph);
1198 cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
1199 if (cih == NULL)
1200 return NF_ACCEPT; /* The packet looks wrong, ignore */
1201
1202 pp = ip_vs_proto_get(cih->nexthdr);
1203 if (!pp)
1204 return NF_ACCEPT;
1205
1206 /* Is the embedded protocol header present? */
1207 /* TODO: we don't support fragmentation at the moment anyways */
1208 if (unlikely(cih->nexthdr == IPPROTO_FRAGMENT && pp->dont_defrag))
1209 return NF_ACCEPT;
1210
1211 IP_VS_DBG_PKT(11, pp, skb, offset, "Checking incoming ICMPv6 for");
1212
1213 offset += sizeof(struct ipv6hdr);
1214
1215 ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
1216 /* The embedded headers contain source and dest in reverse order */
1217 cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
1218 if (!cp) {
1219 /* The packet could also belong to a local client */
1220 cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
1221 if (cp) {
1222 ipv6_addr_copy(&snet.in6, &iph->saddr);
1223 return handle_response_icmp(AF_INET6, skb, &snet,
1224 cih->nexthdr,
1225 cp, pp, offset,
1226 sizeof(struct ipv6hdr));
1227 }
1228 return NF_ACCEPT;
1229 }
1230
1231 verdict = NF_DROP;
1232
1233 /* do the statistics and put it back */
1234 ip_vs_in_stats(cp, skb);
1235 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr)
1236 offset += 2 * sizeof(__u16);
1237 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset);
1238 /* do not touch skb anymore */
1239
1240 __ip_vs_conn_put(cp);
1241
1242 return verdict;
1243}
1244#endif
1245
1246
891/* 1247/*
892 * Check if it's for virtual services, look it up, 1248 * Check if it's for virtual services, look it up,
893 * and send it on its way... 1249 * and send it on its way...
@@ -897,50 +1253,54 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
897 const struct net_device *in, const struct net_device *out, 1253 const struct net_device *in, const struct net_device *out,
898 int (*okfn)(struct sk_buff *)) 1254 int (*okfn)(struct sk_buff *))
899{ 1255{
900 struct iphdr *iph; 1256 struct ip_vs_iphdr iph;
901 struct ip_vs_protocol *pp; 1257 struct ip_vs_protocol *pp;
902 struct ip_vs_conn *cp; 1258 struct ip_vs_conn *cp;
903 int ret, restart; 1259 int ret, restart, af;
904 int ihl; 1260
1261 af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6;
1262
1263 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
905 1264
906 /* 1265 /*
907 * Big tappo: only PACKET_HOST (neither loopback nor mcasts) 1266 * Big tappo: only PACKET_HOST, including loopback for local client
908 * ... don't know why 1st test DOES NOT include 2nd (?) 1267 * Don't handle local packets on IPv6 for now
909 */ 1268 */
910 if (unlikely(skb->pkt_type != PACKET_HOST 1269 if (unlikely(skb->pkt_type != PACKET_HOST)) {
911 || skb->dev->flags & IFF_LOOPBACK || skb->sk)) { 1270 IP_VS_DBG_BUF(12, "packet type=%d proto=%d daddr=%s ignored\n",
912 IP_VS_DBG(12, "packet type=%d proto=%d daddr=%d.%d.%d.%d ignored\n", 1271 skb->pkt_type,
913 skb->pkt_type, 1272 iph.protocol,
914 ip_hdr(skb)->protocol, 1273 IP_VS_DBG_ADDR(af, &iph.daddr));
915 NIPQUAD(ip_hdr(skb)->daddr));
916 return NF_ACCEPT; 1274 return NF_ACCEPT;
917 } 1275 }
918 1276
919 iph = ip_hdr(skb); 1277 if (unlikely(iph.protocol == IPPROTO_ICMP)) {
920 if (unlikely(iph->protocol == IPPROTO_ICMP)) {
921 int related, verdict = ip_vs_in_icmp(skb, &related, hooknum); 1278 int related, verdict = ip_vs_in_icmp(skb, &related, hooknum);
922 1279
923 if (related) 1280 if (related)
924 return verdict; 1281 return verdict;
925 iph = ip_hdr(skb); 1282 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
926 } 1283 }
927 1284
928 /* Protocol supported? */ 1285 /* Protocol supported? */
929 pp = ip_vs_proto_get(iph->protocol); 1286 pp = ip_vs_proto_get(iph.protocol);
930 if (unlikely(!pp)) 1287 if (unlikely(!pp))
931 return NF_ACCEPT; 1288 return NF_ACCEPT;
932 1289
933 ihl = iph->ihl << 2;
934
935 /* 1290 /*
936 * Check if the packet belongs to an existing connection entry 1291 * Check if the packet belongs to an existing connection entry
937 */ 1292 */
938 cp = pp->conn_in_get(skb, pp, iph, ihl, 0); 1293 cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
939 1294
940 if (unlikely(!cp)) { 1295 if (unlikely(!cp)) {
941 int v; 1296 int v;
942 1297
943 if (!pp->conn_schedule(skb, pp, &v, &cp)) 1298 /* For local client packets, it could be a response */
1299 cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
1300 if (cp)
1301 return handle_response(af, skb, pp, cp, iph.len);
1302
1303 if (!pp->conn_schedule(af, skb, pp, &v, &cp))
944 return v; 1304 return v;
945 } 1305 }
946 1306
@@ -984,7 +1344,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
984 * encorage the standby servers to update the connections timeout 1344 * encorage the standby servers to update the connections timeout
985 */ 1345 */
986 atomic_inc(&cp->in_pkts); 1346 atomic_inc(&cp->in_pkts);
987 if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && 1347 if (af == AF_INET &&
1348 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
988 (((cp->protocol != IPPROTO_TCP || 1349 (((cp->protocol != IPPROTO_TCP ||
989 cp->state == IP_VS_TCP_S_ESTABLISHED) && 1350 cp->state == IP_VS_TCP_S_ESTABLISHED) &&
990 (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1] 1351 (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1]
@@ -1023,6 +1384,21 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
1023 return ip_vs_in_icmp(skb, &r, hooknum); 1384 return ip_vs_in_icmp(skb, &r, hooknum);
1024} 1385}
1025 1386
1387#ifdef CONFIG_IP_VS_IPV6
1388static unsigned int
1389ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
1390 const struct net_device *in, const struct net_device *out,
1391 int (*okfn)(struct sk_buff *))
1392{
1393 int r;
1394
1395 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1396 return NF_ACCEPT;
1397
1398 return ip_vs_in_icmp_v6(skb, &r, hooknum);
1399}
1400#endif
1401
1026 1402
1027static struct nf_hook_ops ip_vs_ops[] __read_mostly = { 1403static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1028 /* After packet filtering, forward packet through VS/DR, VS/TUN, 1404 /* After packet filtering, forward packet through VS/DR, VS/TUN,
@@ -1060,6 +1436,43 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1060 .hooknum = NF_INET_POST_ROUTING, 1436 .hooknum = NF_INET_POST_ROUTING,
1061 .priority = NF_IP_PRI_NAT_SRC-1, 1437 .priority = NF_IP_PRI_NAT_SRC-1,
1062 }, 1438 },
1439#ifdef CONFIG_IP_VS_IPV6
1440 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1441 * or VS/NAT(change destination), so that filtering rules can be
1442 * applied to IPVS. */
1443 {
1444 .hook = ip_vs_in,
1445 .owner = THIS_MODULE,
1446 .pf = PF_INET6,
1447 .hooknum = NF_INET_LOCAL_IN,
1448 .priority = 100,
1449 },
1450 /* After packet filtering, change source only for VS/NAT */
1451 {
1452 .hook = ip_vs_out,
1453 .owner = THIS_MODULE,
1454 .pf = PF_INET6,
1455 .hooknum = NF_INET_FORWARD,
1456 .priority = 100,
1457 },
1458 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1459 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
1460 {
1461 .hook = ip_vs_forward_icmp_v6,
1462 .owner = THIS_MODULE,
1463 .pf = PF_INET6,
1464 .hooknum = NF_INET_FORWARD,
1465 .priority = 99,
1466 },
1467 /* Before the netfilter connection tracking, exit from POST_ROUTING */
1468 {
1469 .hook = ip_vs_post_routing,
1470 .owner = THIS_MODULE,
1471 .pf = PF_INET6,
1472 .hooknum = NF_INET_POST_ROUTING,
1473 .priority = NF_IP6_PRI_NAT_SRC-1,
1474 },
1475#endif
1063}; 1476};
1064 1477
1065 1478
@@ -1070,10 +1483,12 @@ static int __init ip_vs_init(void)
1070{ 1483{
1071 int ret; 1484 int ret;
1072 1485
1486 ip_vs_estimator_init();
1487
1073 ret = ip_vs_control_init(); 1488 ret = ip_vs_control_init();
1074 if (ret < 0) { 1489 if (ret < 0) {
1075 IP_VS_ERR("can't setup control.\n"); 1490 IP_VS_ERR("can't setup control.\n");
1076 goto cleanup_nothing; 1491 goto cleanup_estimator;
1077 } 1492 }
1078 1493
1079 ip_vs_protocol_init(); 1494 ip_vs_protocol_init();
@@ -1106,7 +1521,8 @@ static int __init ip_vs_init(void)
1106 cleanup_protocol: 1521 cleanup_protocol:
1107 ip_vs_protocol_cleanup(); 1522 ip_vs_protocol_cleanup();
1108 ip_vs_control_cleanup(); 1523 ip_vs_control_cleanup();
1109 cleanup_nothing: 1524 cleanup_estimator:
1525 ip_vs_estimator_cleanup();
1110 return ret; 1526 return ret;
1111} 1527}
1112 1528
@@ -1117,6 +1533,7 @@ static void __exit ip_vs_cleanup(void)
1117 ip_vs_app_cleanup(); 1533 ip_vs_app_cleanup();
1118 ip_vs_protocol_cleanup(); 1534 ip_vs_protocol_cleanup();
1119 ip_vs_control_cleanup(); 1535 ip_vs_control_cleanup();
1536 ip_vs_estimator_cleanup();
1120 IP_VS_INFO("ipvs unloaded.\n"); 1537 IP_VS_INFO("ipvs unloaded.\n");
1121} 1538}
1122 1539
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 6379705a8dcb..771551d8fba9 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -35,8 +35,13 @@
35 35
36#include <net/net_namespace.h> 36#include <net/net_namespace.h>
37#include <net/ip.h> 37#include <net/ip.h>
38#ifdef CONFIG_IP_VS_IPV6
39#include <net/ipv6.h>
40#include <net/ip6_route.h>
41#endif
38#include <net/route.h> 42#include <net/route.h>
39#include <net/sock.h> 43#include <net/sock.h>
44#include <net/genetlink.h>
40 45
41#include <asm/uaccess.h> 46#include <asm/uaccess.h>
42 47
@@ -90,6 +95,26 @@ int ip_vs_get_debug_level(void)
90} 95}
91#endif 96#endif
92 97
98#ifdef CONFIG_IP_VS_IPV6
99/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
100static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
101{
102 struct rt6_info *rt;
103 struct flowi fl = {
104 .oif = 0,
105 .nl_u = {
106 .ip6_u = {
107 .daddr = *addr,
108 .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
109 };
110
111 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
112 if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
113 return 1;
114
115 return 0;
116}
117#endif
93/* 118/*
94 * update_defense_level is called from keventd and from sysctl, 119 * update_defense_level is called from keventd and from sysctl,
95 * so it needs to protect itself from softirqs 120 * so it needs to protect itself from softirqs
@@ -281,11 +306,19 @@ static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
281 * Returns hash value for virtual service 306 * Returns hash value for virtual service
282 */ 307 */
283static __inline__ unsigned 308static __inline__ unsigned
284ip_vs_svc_hashkey(unsigned proto, __be32 addr, __be16 port) 309ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
310 __be16 port)
285{ 311{
286 register unsigned porth = ntohs(port); 312 register unsigned porth = ntohs(port);
313 __be32 addr_fold = addr->ip;
314
315#ifdef CONFIG_IP_VS_IPV6
316 if (af == AF_INET6)
317 addr_fold = addr->ip6[0]^addr->ip6[1]^
318 addr->ip6[2]^addr->ip6[3];
319#endif
287 320
288 return (proto^ntohl(addr)^(porth>>IP_VS_SVC_TAB_BITS)^porth) 321 return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
289 & IP_VS_SVC_TAB_MASK; 322 & IP_VS_SVC_TAB_MASK;
290} 323}
291 324
@@ -316,7 +349,8 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
316 /* 349 /*
317 * Hash it by <protocol,addr,port> in ip_vs_svc_table 350 * Hash it by <protocol,addr,port> in ip_vs_svc_table
318 */ 351 */
319 hash = ip_vs_svc_hashkey(svc->protocol, svc->addr, svc->port); 352 hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr,
353 svc->port);
320 list_add(&svc->s_list, &ip_vs_svc_table[hash]); 354 list_add(&svc->s_list, &ip_vs_svc_table[hash]);
321 } else { 355 } else {
322 /* 356 /*
@@ -362,17 +396,19 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
362/* 396/*
363 * Get service by {proto,addr,port} in the service table. 397 * Get service by {proto,addr,port} in the service table.
364 */ 398 */
365static __inline__ struct ip_vs_service * 399static inline struct ip_vs_service *
366__ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport) 400__ip_vs_service_get(int af, __u16 protocol, const union nf_inet_addr *vaddr,
401 __be16 vport)
367{ 402{
368 unsigned hash; 403 unsigned hash;
369 struct ip_vs_service *svc; 404 struct ip_vs_service *svc;
370 405
371 /* Check for "full" addressed entries */ 406 /* Check for "full" addressed entries */
372 hash = ip_vs_svc_hashkey(protocol, vaddr, vport); 407 hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
373 408
374 list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){ 409 list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
375 if ((svc->addr == vaddr) 410 if ((svc->af == af)
411 && ip_vs_addr_equal(af, &svc->addr, vaddr)
376 && (svc->port == vport) 412 && (svc->port == vport)
377 && (svc->protocol == protocol)) { 413 && (svc->protocol == protocol)) {
378 /* HIT */ 414 /* HIT */
@@ -388,7 +424,8 @@ __ip_vs_service_get(__u16 protocol, __be32 vaddr, __be16 vport)
388/* 424/*
389 * Get service by {fwmark} in the service table. 425 * Get service by {fwmark} in the service table.
390 */ 426 */
391static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark) 427static inline struct ip_vs_service *
428__ip_vs_svc_fwm_get(int af, __u32 fwmark)
392{ 429{
393 unsigned hash; 430 unsigned hash;
394 struct ip_vs_service *svc; 431 struct ip_vs_service *svc;
@@ -397,7 +434,7 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark)
397 hash = ip_vs_svc_fwm_hashkey(fwmark); 434 hash = ip_vs_svc_fwm_hashkey(fwmark);
398 435
399 list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) { 436 list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
400 if (svc->fwmark == fwmark) { 437 if (svc->fwmark == fwmark && svc->af == af) {
401 /* HIT */ 438 /* HIT */
402 atomic_inc(&svc->usecnt); 439 atomic_inc(&svc->usecnt);
403 return svc; 440 return svc;
@@ -408,7 +445,8 @@ static __inline__ struct ip_vs_service *__ip_vs_svc_fwm_get(__u32 fwmark)
408} 445}
409 446
410struct ip_vs_service * 447struct ip_vs_service *
411ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport) 448ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
449 const union nf_inet_addr *vaddr, __be16 vport)
412{ 450{
413 struct ip_vs_service *svc; 451 struct ip_vs_service *svc;
414 452
@@ -417,14 +455,14 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport)
417 /* 455 /*
418 * Check the table hashed by fwmark first 456 * Check the table hashed by fwmark first
419 */ 457 */
420 if (fwmark && (svc = __ip_vs_svc_fwm_get(fwmark))) 458 if (fwmark && (svc = __ip_vs_svc_fwm_get(af, fwmark)))
421 goto out; 459 goto out;
422 460
423 /* 461 /*
424 * Check the table hashed by <protocol,addr,port> 462 * Check the table hashed by <protocol,addr,port>
425 * for "full" addressed entries 463 * for "full" addressed entries
426 */ 464 */
427 svc = __ip_vs_service_get(protocol, vaddr, vport); 465 svc = __ip_vs_service_get(af, protocol, vaddr, vport);
428 466
429 if (svc == NULL 467 if (svc == NULL
430 && protocol == IPPROTO_TCP 468 && protocol == IPPROTO_TCP
@@ -434,7 +472,7 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport)
434 * Check if ftp service entry exists, the packet 472 * Check if ftp service entry exists, the packet
435 * might belong to FTP data connections. 473 * might belong to FTP data connections.
436 */ 474 */
437 svc = __ip_vs_service_get(protocol, vaddr, FTPPORT); 475 svc = __ip_vs_service_get(af, protocol, vaddr, FTPPORT);
438 } 476 }
439 477
440 if (svc == NULL 478 if (svc == NULL
@@ -442,16 +480,16 @@ ip_vs_service_get(__u32 fwmark, __u16 protocol, __be32 vaddr, __be16 vport)
442 /* 480 /*
443 * Check if the catch-all port (port zero) exists 481 * Check if the catch-all port (port zero) exists
444 */ 482 */
445 svc = __ip_vs_service_get(protocol, vaddr, 0); 483 svc = __ip_vs_service_get(af, protocol, vaddr, 0);
446 } 484 }
447 485
448 out: 486 out:
449 read_unlock(&__ip_vs_svc_lock); 487 read_unlock(&__ip_vs_svc_lock);
450 488
451 IP_VS_DBG(9, "lookup service: fwm %u %s %u.%u.%u.%u:%u %s\n", 489 IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n",
452 fwmark, ip_vs_proto_name(protocol), 490 fwmark, ip_vs_proto_name(protocol),
453 NIPQUAD(vaddr), ntohs(vport), 491 IP_VS_DBG_ADDR(af, vaddr), ntohs(vport),
454 svc?"hit":"not hit"); 492 svc ? "hit" : "not hit");
455 493
456 return svc; 494 return svc;
457} 495}
@@ -478,11 +516,20 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
478/* 516/*
479 * Returns hash value for real service 517 * Returns hash value for real service
480 */ 518 */
481static __inline__ unsigned ip_vs_rs_hashkey(__be32 addr, __be16 port) 519static inline unsigned ip_vs_rs_hashkey(int af,
520 const union nf_inet_addr *addr,
521 __be16 port)
482{ 522{
483 register unsigned porth = ntohs(port); 523 register unsigned porth = ntohs(port);
524 __be32 addr_fold = addr->ip;
525
526#ifdef CONFIG_IP_VS_IPV6
527 if (af == AF_INET6)
528 addr_fold = addr->ip6[0]^addr->ip6[1]^
529 addr->ip6[2]^addr->ip6[3];
530#endif
484 531
485 return (ntohl(addr)^(porth>>IP_VS_RTAB_BITS)^porth) 532 return (ntohl(addr_fold)^(porth>>IP_VS_RTAB_BITS)^porth)
486 & IP_VS_RTAB_MASK; 533 & IP_VS_RTAB_MASK;
487} 534}
488 535
@@ -502,7 +549,8 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest)
502 * Hash by proto,addr,port, 549 * Hash by proto,addr,port,
503 * which are the parameters of the real service. 550 * which are the parameters of the real service.
504 */ 551 */
505 hash = ip_vs_rs_hashkey(dest->addr, dest->port); 552 hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
553
506 list_add(&dest->d_list, &ip_vs_rtable[hash]); 554 list_add(&dest->d_list, &ip_vs_rtable[hash]);
507 555
508 return 1; 556 return 1;
@@ -529,7 +577,9 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
529 * Lookup real service by <proto,addr,port> in the real service table. 577 * Lookup real service by <proto,addr,port> in the real service table.
530 */ 578 */
531struct ip_vs_dest * 579struct ip_vs_dest *
532ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport) 580ip_vs_lookup_real_service(int af, __u16 protocol,
581 const union nf_inet_addr *daddr,
582 __be16 dport)
533{ 583{
534 unsigned hash; 584 unsigned hash;
535 struct ip_vs_dest *dest; 585 struct ip_vs_dest *dest;
@@ -538,11 +588,12 @@ ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport)
538 * Check for "full" addressed entries 588 * Check for "full" addressed entries
539 * Return the first found entry 589 * Return the first found entry
540 */ 590 */
541 hash = ip_vs_rs_hashkey(daddr, dport); 591 hash = ip_vs_rs_hashkey(af, daddr, dport);
542 592
543 read_lock(&__ip_vs_rs_lock); 593 read_lock(&__ip_vs_rs_lock);
544 list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) { 594 list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
545 if ((dest->addr == daddr) 595 if ((dest->af == af)
596 && ip_vs_addr_equal(af, &dest->addr, daddr)
546 && (dest->port == dport) 597 && (dest->port == dport)
547 && ((dest->protocol == protocol) || 598 && ((dest->protocol == protocol) ||
548 dest->vfwmark)) { 599 dest->vfwmark)) {
@@ -560,7 +611,8 @@ ip_vs_lookup_real_service(__u16 protocol, __be32 daddr, __be16 dport)
560 * Lookup destination by {addr,port} in the given service 611 * Lookup destination by {addr,port} in the given service
561 */ 612 */
562static struct ip_vs_dest * 613static struct ip_vs_dest *
563ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) 614ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
615 __be16 dport)
564{ 616{
565 struct ip_vs_dest *dest; 617 struct ip_vs_dest *dest;
566 618
@@ -568,7 +620,9 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
568 * Find the destination for the given service 620 * Find the destination for the given service
569 */ 621 */
570 list_for_each_entry(dest, &svc->destinations, n_list) { 622 list_for_each_entry(dest, &svc->destinations, n_list) {
571 if ((dest->addr == daddr) && (dest->port == dport)) { 623 if ((dest->af == svc->af)
624 && ip_vs_addr_equal(svc->af, &dest->addr, daddr)
625 && (dest->port == dport)) {
572 /* HIT */ 626 /* HIT */
573 return dest; 627 return dest;
574 } 628 }
@@ -587,13 +641,15 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
587 * ip_vs_lookup_real_service() looked promissing, but 641 * ip_vs_lookup_real_service() looked promissing, but
588 * seems not working as expected. 642 * seems not working as expected.
589 */ 643 */
590struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport, 644struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr,
591 __be32 vaddr, __be16 vport, __u16 protocol) 645 __be16 dport,
646 const union nf_inet_addr *vaddr,
647 __be16 vport, __u16 protocol)
592{ 648{
593 struct ip_vs_dest *dest; 649 struct ip_vs_dest *dest;
594 struct ip_vs_service *svc; 650 struct ip_vs_service *svc;
595 651
596 svc = ip_vs_service_get(0, protocol, vaddr, vport); 652 svc = ip_vs_service_get(af, 0, protocol, vaddr, vport);
597 if (!svc) 653 if (!svc)
598 return NULL; 654 return NULL;
599 dest = ip_vs_lookup_dest(svc, daddr, dport); 655 dest = ip_vs_lookup_dest(svc, daddr, dport);
@@ -614,7 +670,8 @@ struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport,
614 * scheduling. 670 * scheduling.
615 */ 671 */
616static struct ip_vs_dest * 672static struct ip_vs_dest *
617ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport) 673ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
674 __be16 dport)
618{ 675{
619 struct ip_vs_dest *dest, *nxt; 676 struct ip_vs_dest *dest, *nxt;
620 677
@@ -622,17 +679,19 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
622 * Find the destination in trash 679 * Find the destination in trash
623 */ 680 */
624 list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) { 681 list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
625 IP_VS_DBG(3, "Destination %u/%u.%u.%u.%u:%u still in trash, " 682 IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
626 "dest->refcnt=%d\n", 683 "dest->refcnt=%d\n",
627 dest->vfwmark, 684 dest->vfwmark,
628 NIPQUAD(dest->addr), ntohs(dest->port), 685 IP_VS_DBG_ADDR(svc->af, &dest->addr),
629 atomic_read(&dest->refcnt)); 686 ntohs(dest->port),
630 if (dest->addr == daddr && 687 atomic_read(&dest->refcnt));
688 if (dest->af == svc->af &&
689 ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
631 dest->port == dport && 690 dest->port == dport &&
632 dest->vfwmark == svc->fwmark && 691 dest->vfwmark == svc->fwmark &&
633 dest->protocol == svc->protocol && 692 dest->protocol == svc->protocol &&
634 (svc->fwmark || 693 (svc->fwmark ||
635 (dest->vaddr == svc->addr && 694 (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) &&
636 dest->vport == svc->port))) { 695 dest->vport == svc->port))) {
637 /* HIT */ 696 /* HIT */
638 return dest; 697 return dest;
@@ -642,10 +701,11 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
642 * Try to purge the destination from trash if not referenced 701 * Try to purge the destination from trash if not referenced
643 */ 702 */
644 if (atomic_read(&dest->refcnt) == 1) { 703 if (atomic_read(&dest->refcnt) == 1) {
645 IP_VS_DBG(3, "Removing destination %u/%u.%u.%u.%u:%u " 704 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u "
646 "from trash\n", 705 "from trash\n",
647 dest->vfwmark, 706 dest->vfwmark,
648 NIPQUAD(dest->addr), ntohs(dest->port)); 707 IP_VS_DBG_ADDR(svc->af, &dest->addr),
708 ntohs(dest->port));
649 list_del(&dest->n_list); 709 list_del(&dest->n_list);
650 ip_vs_dst_reset(dest); 710 ip_vs_dst_reset(dest);
651 __ip_vs_unbind_svc(dest); 711 __ip_vs_unbind_svc(dest);
@@ -684,18 +744,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats)
684{ 744{
685 spin_lock_bh(&stats->lock); 745 spin_lock_bh(&stats->lock);
686 746
687 stats->conns = 0; 747 memset(&stats->ustats, 0, sizeof(stats->ustats));
688 stats->inpkts = 0;
689 stats->outpkts = 0;
690 stats->inbytes = 0;
691 stats->outbytes = 0;
692
693 stats->cps = 0;
694 stats->inpps = 0;
695 stats->outpps = 0;
696 stats->inbps = 0;
697 stats->outbps = 0;
698
699 ip_vs_zero_estimator(stats); 748 ip_vs_zero_estimator(stats);
700 749
701 spin_unlock_bh(&stats->lock); 750 spin_unlock_bh(&stats->lock);
@@ -706,7 +755,7 @@ ip_vs_zero_stats(struct ip_vs_stats *stats)
706 */ 755 */
707static void 756static void
708__ip_vs_update_dest(struct ip_vs_service *svc, 757__ip_vs_update_dest(struct ip_vs_service *svc,
709 struct ip_vs_dest *dest, struct ip_vs_dest_user *udest) 758 struct ip_vs_dest *dest, struct ip_vs_dest_user_kern *udest)
710{ 759{
711 int conn_flags; 760 int conn_flags;
712 761
@@ -715,10 +764,18 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
715 conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE; 764 conn_flags = udest->conn_flags | IP_VS_CONN_F_INACTIVE;
716 765
717 /* check if local node and update the flags */ 766 /* check if local node and update the flags */
718 if (inet_addr_type(&init_net, udest->addr) == RTN_LOCAL) { 767#ifdef CONFIG_IP_VS_IPV6
719 conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK) 768 if (svc->af == AF_INET6) {
720 | IP_VS_CONN_F_LOCALNODE; 769 if (__ip_vs_addr_is_local_v6(&udest->addr.in6)) {
721 } 770 conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
771 | IP_VS_CONN_F_LOCALNODE;
772 }
773 } else
774#endif
775 if (inet_addr_type(&init_net, udest->addr.ip) == RTN_LOCAL) {
776 conn_flags = (conn_flags & ~IP_VS_CONN_F_FWD_MASK)
777 | IP_VS_CONN_F_LOCALNODE;
778 }
722 779
723 /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ 780 /* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
724 if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) { 781 if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != 0) {
@@ -759,7 +816,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
759 * Create a destination for the given service 816 * Create a destination for the given service
760 */ 817 */
761static int 818static int
762ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest, 819ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
763 struct ip_vs_dest **dest_p) 820 struct ip_vs_dest **dest_p)
764{ 821{
765 struct ip_vs_dest *dest; 822 struct ip_vs_dest *dest;
@@ -767,9 +824,20 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
767 824
768 EnterFunction(2); 825 EnterFunction(2);
769 826
770 atype = inet_addr_type(&init_net, udest->addr); 827#ifdef CONFIG_IP_VS_IPV6
771 if (atype != RTN_LOCAL && atype != RTN_UNICAST) 828 if (svc->af == AF_INET6) {
772 return -EINVAL; 829 atype = ipv6_addr_type(&udest->addr.in6);
830 if ((!(atype & IPV6_ADDR_UNICAST) ||
831 atype & IPV6_ADDR_LINKLOCAL) &&
832 !__ip_vs_addr_is_local_v6(&udest->addr.in6))
833 return -EINVAL;
834 } else
835#endif
836 {
837 atype = inet_addr_type(&init_net, udest->addr.ip);
838 if (atype != RTN_LOCAL && atype != RTN_UNICAST)
839 return -EINVAL;
840 }
773 841
774 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC); 842 dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
775 if (dest == NULL) { 843 if (dest == NULL) {
@@ -777,11 +845,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
777 return -ENOMEM; 845 return -ENOMEM;
778 } 846 }
779 847
848 dest->af = svc->af;
780 dest->protocol = svc->protocol; 849 dest->protocol = svc->protocol;
781 dest->vaddr = svc->addr; 850 dest->vaddr = svc->addr;
782 dest->vport = svc->port; 851 dest->vport = svc->port;
783 dest->vfwmark = svc->fwmark; 852 dest->vfwmark = svc->fwmark;
784 dest->addr = udest->addr; 853 ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr);
785 dest->port = udest->port; 854 dest->port = udest->port;
786 855
787 atomic_set(&dest->activeconns, 0); 856 atomic_set(&dest->activeconns, 0);
@@ -806,10 +875,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest,
806 * Add a destination into an existing service 875 * Add a destination into an existing service
807 */ 876 */
808static int 877static int
809ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) 878ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
810{ 879{
811 struct ip_vs_dest *dest; 880 struct ip_vs_dest *dest;
812 __be32 daddr = udest->addr; 881 union nf_inet_addr daddr;
813 __be16 dport = udest->port; 882 __be16 dport = udest->port;
814 int ret; 883 int ret;
815 884
@@ -826,10 +895,13 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
826 return -ERANGE; 895 return -ERANGE;
827 } 896 }
828 897
898 ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
899
829 /* 900 /*
830 * Check if the dest already exists in the list 901 * Check if the dest already exists in the list
831 */ 902 */
832 dest = ip_vs_lookup_dest(svc, daddr, dport); 903 dest = ip_vs_lookup_dest(svc, &daddr, dport);
904
833 if (dest != NULL) { 905 if (dest != NULL) {
834 IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n"); 906 IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n");
835 return -EEXIST; 907 return -EEXIST;
@@ -839,15 +911,17 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
839 * Check if the dest already exists in the trash and 911 * Check if the dest already exists in the trash and
840 * is from the same service 912 * is from the same service
841 */ 913 */
842 dest = ip_vs_trash_get_dest(svc, daddr, dport); 914 dest = ip_vs_trash_get_dest(svc, &daddr, dport);
915
843 if (dest != NULL) { 916 if (dest != NULL) {
844 IP_VS_DBG(3, "Get destination %u.%u.%u.%u:%u from trash, " 917 IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
845 "dest->refcnt=%d, service %u/%u.%u.%u.%u:%u\n", 918 "dest->refcnt=%d, service %u/%s:%u\n",
846 NIPQUAD(daddr), ntohs(dport), 919 IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport),
847 atomic_read(&dest->refcnt), 920 atomic_read(&dest->refcnt),
848 dest->vfwmark, 921 dest->vfwmark,
849 NIPQUAD(dest->vaddr), 922 IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
850 ntohs(dest->vport)); 923 ntohs(dest->vport));
924
851 __ip_vs_update_dest(svc, dest, udest); 925 __ip_vs_update_dest(svc, dest, udest);
852 926
853 /* 927 /*
@@ -868,7 +942,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
868 svc->num_dests++; 942 svc->num_dests++;
869 943
870 /* call the update_service function of its scheduler */ 944 /* call the update_service function of its scheduler */
871 svc->scheduler->update_service(svc); 945 if (svc->scheduler->update_service)
946 svc->scheduler->update_service(svc);
872 947
873 write_unlock_bh(&__ip_vs_svc_lock); 948 write_unlock_bh(&__ip_vs_svc_lock);
874 return 0; 949 return 0;
@@ -898,7 +973,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
898 svc->num_dests++; 973 svc->num_dests++;
899 974
900 /* call the update_service function of its scheduler */ 975 /* call the update_service function of its scheduler */
901 svc->scheduler->update_service(svc); 976 if (svc->scheduler->update_service)
977 svc->scheduler->update_service(svc);
902 978
903 write_unlock_bh(&__ip_vs_svc_lock); 979 write_unlock_bh(&__ip_vs_svc_lock);
904 980
@@ -912,10 +988,10 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
912 * Edit a destination in the given service 988 * Edit a destination in the given service
913 */ 989 */
914static int 990static int
915ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest) 991ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
916{ 992{
917 struct ip_vs_dest *dest; 993 struct ip_vs_dest *dest;
918 __be32 daddr = udest->addr; 994 union nf_inet_addr daddr;
919 __be16 dport = udest->port; 995 __be16 dport = udest->port;
920 996
921 EnterFunction(2); 997 EnterFunction(2);
@@ -931,10 +1007,13 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
931 return -ERANGE; 1007 return -ERANGE;
932 } 1008 }
933 1009
1010 ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
1011
934 /* 1012 /*
935 * Lookup the destination list 1013 * Lookup the destination list
936 */ 1014 */
937 dest = ip_vs_lookup_dest(svc, daddr, dport); 1015 dest = ip_vs_lookup_dest(svc, &daddr, dport);
1016
938 if (dest == NULL) { 1017 if (dest == NULL) {
939 IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n"); 1018 IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n");
940 return -ENOENT; 1019 return -ENOENT;
@@ -948,7 +1027,8 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user *udest)
948 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1); 1027 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 1);
949 1028
950 /* call the update_service, because server weight may be changed */ 1029 /* call the update_service, because server weight may be changed */
951 svc->scheduler->update_service(svc); 1030 if (svc->scheduler->update_service)
1031 svc->scheduler->update_service(svc);
952 1032
953 write_unlock_bh(&__ip_vs_svc_lock); 1033 write_unlock_bh(&__ip_vs_svc_lock);
954 1034
@@ -987,10 +1067,11 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
987 atomic_dec(&dest->svc->refcnt); 1067 atomic_dec(&dest->svc->refcnt);
988 kfree(dest); 1068 kfree(dest);
989 } else { 1069 } else {
990 IP_VS_DBG(3, "Moving dest %u.%u.%u.%u:%u into trash, " 1070 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
991 "dest->refcnt=%d\n", 1071 "dest->refcnt=%d\n",
992 NIPQUAD(dest->addr), ntohs(dest->port), 1072 IP_VS_DBG_ADDR(dest->af, &dest->addr),
993 atomic_read(&dest->refcnt)); 1073 ntohs(dest->port),
1074 atomic_read(&dest->refcnt));
994 list_add(&dest->n_list, &ip_vs_dest_trash); 1075 list_add(&dest->n_list, &ip_vs_dest_trash);
995 atomic_inc(&dest->refcnt); 1076 atomic_inc(&dest->refcnt);
996 } 1077 }
@@ -1011,12 +1092,12 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1011 */ 1092 */
1012 list_del(&dest->n_list); 1093 list_del(&dest->n_list);
1013 svc->num_dests--; 1094 svc->num_dests--;
1014 if (svcupd) { 1095
1015 /* 1096 /*
1016 * Call the update_service function of its scheduler 1097 * Call the update_service function of its scheduler
1017 */ 1098 */
1018 svc->scheduler->update_service(svc); 1099 if (svcupd && svc->scheduler->update_service)
1019 } 1100 svc->scheduler->update_service(svc);
1020} 1101}
1021 1102
1022 1103
@@ -1024,15 +1105,15 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1024 * Delete a destination server in the given service 1105 * Delete a destination server in the given service
1025 */ 1106 */
1026static int 1107static int
1027ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest) 1108ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
1028{ 1109{
1029 struct ip_vs_dest *dest; 1110 struct ip_vs_dest *dest;
1030 __be32 daddr = udest->addr;
1031 __be16 dport = udest->port; 1111 __be16 dport = udest->port;
1032 1112
1033 EnterFunction(2); 1113 EnterFunction(2);
1034 1114
1035 dest = ip_vs_lookup_dest(svc, daddr, dport); 1115 dest = ip_vs_lookup_dest(svc, &udest->addr, dport);
1116
1036 if (dest == NULL) { 1117 if (dest == NULL) {
1037 IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n"); 1118 IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n");
1038 return -ENOENT; 1119 return -ENOENT;
@@ -1067,7 +1148,8 @@ ip_vs_del_dest(struct ip_vs_service *svc,struct ip_vs_dest_user *udest)
1067 * Add a service into the service hash table 1148 * Add a service into the service hash table
1068 */ 1149 */
1069static int 1150static int
1070ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p) 1151ip_vs_add_service(struct ip_vs_service_user_kern *u,
1152 struct ip_vs_service **svc_p)
1071{ 1153{
1072 int ret = 0; 1154 int ret = 0;
1073 struct ip_vs_scheduler *sched = NULL; 1155 struct ip_vs_scheduler *sched = NULL;
@@ -1085,6 +1167,19 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1085 goto out_mod_dec; 1167 goto out_mod_dec;
1086 } 1168 }
1087 1169
1170#ifdef CONFIG_IP_VS_IPV6
1171 if (u->af == AF_INET6) {
1172 if (!sched->supports_ipv6) {
1173 ret = -EAFNOSUPPORT;
1174 goto out_err;
1175 }
1176 if ((u->netmask < 1) || (u->netmask > 128)) {
1177 ret = -EINVAL;
1178 goto out_err;
1179 }
1180 }
1181#endif
1182
1088 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC); 1183 svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
1089 if (svc == NULL) { 1184 if (svc == NULL) {
1090 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n"); 1185 IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
@@ -1096,8 +1191,9 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1096 atomic_set(&svc->usecnt, 1); 1191 atomic_set(&svc->usecnt, 1);
1097 atomic_set(&svc->refcnt, 0); 1192 atomic_set(&svc->refcnt, 0);
1098 1193
1194 svc->af = u->af;
1099 svc->protocol = u->protocol; 1195 svc->protocol = u->protocol;
1100 svc->addr = u->addr; 1196 ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
1101 svc->port = u->port; 1197 svc->port = u->port;
1102 svc->fwmark = u->fwmark; 1198 svc->fwmark = u->fwmark;
1103 svc->flags = u->flags; 1199 svc->flags = u->flags;
@@ -1121,7 +1217,10 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1121 atomic_inc(&ip_vs_nullsvc_counter); 1217 atomic_inc(&ip_vs_nullsvc_counter);
1122 1218
1123 ip_vs_new_estimator(&svc->stats); 1219 ip_vs_new_estimator(&svc->stats);
1124 ip_vs_num_services++; 1220
1221 /* Count only IPv4 services for old get/setsockopt interface */
1222 if (svc->af == AF_INET)
1223 ip_vs_num_services++;
1125 1224
1126 /* Hash the service into the service table */ 1225 /* Hash the service into the service table */
1127 write_lock_bh(&__ip_vs_svc_lock); 1226 write_lock_bh(&__ip_vs_svc_lock);
@@ -1156,7 +1255,7 @@ ip_vs_add_service(struct ip_vs_service_user *u, struct ip_vs_service **svc_p)
1156 * Edit a service and bind it with a new scheduler 1255 * Edit a service and bind it with a new scheduler
1157 */ 1256 */
1158static int 1257static int
1159ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u) 1258ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1160{ 1259{
1161 struct ip_vs_scheduler *sched, *old_sched; 1260 struct ip_vs_scheduler *sched, *old_sched;
1162 int ret = 0; 1261 int ret = 0;
@@ -1172,6 +1271,19 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u)
1172 } 1271 }
1173 old_sched = sched; 1272 old_sched = sched;
1174 1273
1274#ifdef CONFIG_IP_VS_IPV6
1275 if (u->af == AF_INET6) {
1276 if (!sched->supports_ipv6) {
1277 ret = -EAFNOSUPPORT;
1278 goto out;
1279 }
1280 if ((u->netmask < 1) || (u->netmask > 128)) {
1281 ret = -EINVAL;
1282 goto out;
1283 }
1284 }
1285#endif
1286
1175 write_lock_bh(&__ip_vs_svc_lock); 1287 write_lock_bh(&__ip_vs_svc_lock);
1176 1288
1177 /* 1289 /*
@@ -1193,7 +1305,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u)
1193 */ 1305 */
1194 if ((ret = ip_vs_unbind_scheduler(svc))) { 1306 if ((ret = ip_vs_unbind_scheduler(svc))) {
1195 old_sched = sched; 1307 old_sched = sched;
1196 goto out; 1308 goto out_unlock;
1197 } 1309 }
1198 1310
1199 /* 1311 /*
@@ -1212,12 +1324,13 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user *u)
1212 */ 1324 */
1213 ip_vs_bind_scheduler(svc, old_sched); 1325 ip_vs_bind_scheduler(svc, old_sched);
1214 old_sched = sched; 1326 old_sched = sched;
1215 goto out; 1327 goto out_unlock;
1216 } 1328 }
1217 } 1329 }
1218 1330
1219 out: 1331 out_unlock:
1220 write_unlock_bh(&__ip_vs_svc_lock); 1332 write_unlock_bh(&__ip_vs_svc_lock);
1333 out:
1221 1334
1222 if (old_sched) 1335 if (old_sched)
1223 ip_vs_scheduler_put(old_sched); 1336 ip_vs_scheduler_put(old_sched);
@@ -1236,7 +1349,10 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
1236 struct ip_vs_dest *dest, *nxt; 1349 struct ip_vs_dest *dest, *nxt;
1237 struct ip_vs_scheduler *old_sched; 1350 struct ip_vs_scheduler *old_sched;
1238 1351
1239 ip_vs_num_services--; 1352 /* Count only IPv4 services for old get/setsockopt interface */
1353 if (svc->af == AF_INET)
1354 ip_vs_num_services--;
1355
1240 ip_vs_kill_estimator(&svc->stats); 1356 ip_vs_kill_estimator(&svc->stats);
1241 1357
1242 /* Unbind scheduler */ 1358 /* Unbind scheduler */
@@ -1671,6 +1787,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
1671} 1787}
1672 1788
1673static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos) 1789static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos)
1790__acquires(__ip_vs_svc_lock)
1674{ 1791{
1675 1792
1676 read_lock_bh(&__ip_vs_svc_lock); 1793 read_lock_bh(&__ip_vs_svc_lock);
@@ -1724,6 +1841,7 @@ static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1724} 1841}
1725 1842
1726static void ip_vs_info_seq_stop(struct seq_file *seq, void *v) 1843static void ip_vs_info_seq_stop(struct seq_file *seq, void *v)
1844__releases(__ip_vs_svc_lock)
1727{ 1845{
1728 read_unlock_bh(&__ip_vs_svc_lock); 1846 read_unlock_bh(&__ip_vs_svc_lock);
1729} 1847}
@@ -1744,15 +1862,25 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1744 const struct ip_vs_iter *iter = seq->private; 1862 const struct ip_vs_iter *iter = seq->private;
1745 const struct ip_vs_dest *dest; 1863 const struct ip_vs_dest *dest;
1746 1864
1747 if (iter->table == ip_vs_svc_table) 1865 if (iter->table == ip_vs_svc_table) {
1748 seq_printf(seq, "%s %08X:%04X %s ", 1866#ifdef CONFIG_IP_VS_IPV6
1749 ip_vs_proto_name(svc->protocol), 1867 if (svc->af == AF_INET6)
1750 ntohl(svc->addr), 1868 seq_printf(seq, "%s [" NIP6_FMT "]:%04X %s ",
1751 ntohs(svc->port), 1869 ip_vs_proto_name(svc->protocol),
1752 svc->scheduler->name); 1870 NIP6(svc->addr.in6),
1753 else 1871 ntohs(svc->port),
1872 svc->scheduler->name);
1873 else
1874#endif
1875 seq_printf(seq, "%s %08X:%04X %s ",
1876 ip_vs_proto_name(svc->protocol),
1877 ntohl(svc->addr.ip),
1878 ntohs(svc->port),
1879 svc->scheduler->name);
1880 } else {
1754 seq_printf(seq, "FWM %08X %s ", 1881 seq_printf(seq, "FWM %08X %s ",
1755 svc->fwmark, svc->scheduler->name); 1882 svc->fwmark, svc->scheduler->name);
1883 }
1756 1884
1757 if (svc->flags & IP_VS_SVC_F_PERSISTENT) 1885 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
1758 seq_printf(seq, "persistent %d %08X\n", 1886 seq_printf(seq, "persistent %d %08X\n",
@@ -1762,13 +1890,29 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1762 seq_putc(seq, '\n'); 1890 seq_putc(seq, '\n');
1763 1891
1764 list_for_each_entry(dest, &svc->destinations, n_list) { 1892 list_for_each_entry(dest, &svc->destinations, n_list) {
1765 seq_printf(seq, 1893#ifdef CONFIG_IP_VS_IPV6
1766 " -> %08X:%04X %-7s %-6d %-10d %-10d\n", 1894 if (dest->af == AF_INET6)
1767 ntohl(dest->addr), ntohs(dest->port), 1895 seq_printf(seq,
1768 ip_vs_fwd_name(atomic_read(&dest->conn_flags)), 1896 " -> [" NIP6_FMT "]:%04X"
1769 atomic_read(&dest->weight), 1897 " %-7s %-6d %-10d %-10d\n",
1770 atomic_read(&dest->activeconns), 1898 NIP6(dest->addr.in6),
1771 atomic_read(&dest->inactconns)); 1899 ntohs(dest->port),
1900 ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
1901 atomic_read(&dest->weight),
1902 atomic_read(&dest->activeconns),
1903 atomic_read(&dest->inactconns));
1904 else
1905#endif
1906 seq_printf(seq,
1907 " -> %08X:%04X "
1908 "%-7s %-6d %-10d %-10d\n",
1909 ntohl(dest->addr.ip),
1910 ntohs(dest->port),
1911 ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
1912 atomic_read(&dest->weight),
1913 atomic_read(&dest->activeconns),
1914 atomic_read(&dest->inactconns));
1915
1772 } 1916 }
1773 } 1917 }
1774 return 0; 1918 return 0;
@@ -1812,20 +1956,20 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
1812 " Conns Packets Packets Bytes Bytes\n"); 1956 " Conns Packets Packets Bytes Bytes\n");
1813 1957
1814 spin_lock_bh(&ip_vs_stats.lock); 1958 spin_lock_bh(&ip_vs_stats.lock);
1815 seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.conns, 1959 seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
1816 ip_vs_stats.inpkts, ip_vs_stats.outpkts, 1960 ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
1817 (unsigned long long) ip_vs_stats.inbytes, 1961 (unsigned long long) ip_vs_stats.ustats.inbytes,
1818 (unsigned long long) ip_vs_stats.outbytes); 1962 (unsigned long long) ip_vs_stats.ustats.outbytes);
1819 1963
1820/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */ 1964/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
1821 seq_puts(seq, 1965 seq_puts(seq,
1822 " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n"); 1966 " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
1823 seq_printf(seq,"%8X %8X %8X %16X %16X\n", 1967 seq_printf(seq,"%8X %8X %8X %16X %16X\n",
1824 ip_vs_stats.cps, 1968 ip_vs_stats.ustats.cps,
1825 ip_vs_stats.inpps, 1969 ip_vs_stats.ustats.inpps,
1826 ip_vs_stats.outpps, 1970 ip_vs_stats.ustats.outpps,
1827 ip_vs_stats.inbps, 1971 ip_vs_stats.ustats.inbps,
1828 ip_vs_stats.outbps); 1972 ip_vs_stats.ustats.outbps);
1829 spin_unlock_bh(&ip_vs_stats.lock); 1973 spin_unlock_bh(&ip_vs_stats.lock);
1830 1974
1831 return 0; 1975 return 0;
@@ -1900,14 +2044,44 @@ static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = {
1900 [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN, 2044 [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN,
1901}; 2045};
1902 2046
2047static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc,
2048 struct ip_vs_service_user *usvc_compat)
2049{
2050 usvc->af = AF_INET;
2051 usvc->protocol = usvc_compat->protocol;
2052 usvc->addr.ip = usvc_compat->addr;
2053 usvc->port = usvc_compat->port;
2054 usvc->fwmark = usvc_compat->fwmark;
2055
2056 /* Deep copy of sched_name is not needed here */
2057 usvc->sched_name = usvc_compat->sched_name;
2058
2059 usvc->flags = usvc_compat->flags;
2060 usvc->timeout = usvc_compat->timeout;
2061 usvc->netmask = usvc_compat->netmask;
2062}
2063
2064static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
2065 struct ip_vs_dest_user *udest_compat)
2066{
2067 udest->addr.ip = udest_compat->addr;
2068 udest->port = udest_compat->port;
2069 udest->conn_flags = udest_compat->conn_flags;
2070 udest->weight = udest_compat->weight;
2071 udest->u_threshold = udest_compat->u_threshold;
2072 udest->l_threshold = udest_compat->l_threshold;
2073}
2074
1903static int 2075static int
1904do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) 2076do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1905{ 2077{
1906 int ret; 2078 int ret;
1907 unsigned char arg[MAX_ARG_LEN]; 2079 unsigned char arg[MAX_ARG_LEN];
1908 struct ip_vs_service_user *usvc; 2080 struct ip_vs_service_user *usvc_compat;
2081 struct ip_vs_service_user_kern usvc;
1909 struct ip_vs_service *svc; 2082 struct ip_vs_service *svc;
1910 struct ip_vs_dest_user *udest; 2083 struct ip_vs_dest_user *udest_compat;
2084 struct ip_vs_dest_user_kern udest;
1911 2085
1912 if (!capable(CAP_NET_ADMIN)) 2086 if (!capable(CAP_NET_ADMIN))
1913 return -EPERM; 2087 return -EPERM;
@@ -1947,35 +2121,40 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1947 goto out_unlock; 2121 goto out_unlock;
1948 } 2122 }
1949 2123
1950 usvc = (struct ip_vs_service_user *)arg; 2124 usvc_compat = (struct ip_vs_service_user *)arg;
1951 udest = (struct ip_vs_dest_user *)(usvc + 1); 2125 udest_compat = (struct ip_vs_dest_user *)(usvc_compat + 1);
2126
2127 /* We only use the new structs internally, so copy userspace compat
2128 * structs to extended internal versions */
2129 ip_vs_copy_usvc_compat(&usvc, usvc_compat);
2130 ip_vs_copy_udest_compat(&udest, udest_compat);
1952 2131
1953 if (cmd == IP_VS_SO_SET_ZERO) { 2132 if (cmd == IP_VS_SO_SET_ZERO) {
1954 /* if no service address is set, zero counters in all */ 2133 /* if no service address is set, zero counters in all */
1955 if (!usvc->fwmark && !usvc->addr && !usvc->port) { 2134 if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
1956 ret = ip_vs_zero_all(); 2135 ret = ip_vs_zero_all();
1957 goto out_unlock; 2136 goto out_unlock;
1958 } 2137 }
1959 } 2138 }
1960 2139
1961 /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */ 2140 /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */
1962 if (usvc->protocol!=IPPROTO_TCP && usvc->protocol!=IPPROTO_UDP) { 2141 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) {
1963 IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n", 2142 IP_VS_ERR("set_ctl: invalid protocol: %d %d.%d.%d.%d:%d %s\n",
1964 usvc->protocol, NIPQUAD(usvc->addr), 2143 usvc.protocol, NIPQUAD(usvc.addr.ip),
1965 ntohs(usvc->port), usvc->sched_name); 2144 ntohs(usvc.port), usvc.sched_name);
1966 ret = -EFAULT; 2145 ret = -EFAULT;
1967 goto out_unlock; 2146 goto out_unlock;
1968 } 2147 }
1969 2148
1970 /* Lookup the exact service by <protocol, addr, port> or fwmark */ 2149 /* Lookup the exact service by <protocol, addr, port> or fwmark */
1971 if (usvc->fwmark == 0) 2150 if (usvc.fwmark == 0)
1972 svc = __ip_vs_service_get(usvc->protocol, 2151 svc = __ip_vs_service_get(usvc.af, usvc.protocol,
1973 usvc->addr, usvc->port); 2152 &usvc.addr, usvc.port);
1974 else 2153 else
1975 svc = __ip_vs_svc_fwm_get(usvc->fwmark); 2154 svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
1976 2155
1977 if (cmd != IP_VS_SO_SET_ADD 2156 if (cmd != IP_VS_SO_SET_ADD
1978 && (svc == NULL || svc->protocol != usvc->protocol)) { 2157 && (svc == NULL || svc->protocol != usvc.protocol)) {
1979 ret = -ESRCH; 2158 ret = -ESRCH;
1980 goto out_unlock; 2159 goto out_unlock;
1981 } 2160 }
@@ -1985,10 +2164,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1985 if (svc != NULL) 2164 if (svc != NULL)
1986 ret = -EEXIST; 2165 ret = -EEXIST;
1987 else 2166 else
1988 ret = ip_vs_add_service(usvc, &svc); 2167 ret = ip_vs_add_service(&usvc, &svc);
1989 break; 2168 break;
1990 case IP_VS_SO_SET_EDIT: 2169 case IP_VS_SO_SET_EDIT:
1991 ret = ip_vs_edit_service(svc, usvc); 2170 ret = ip_vs_edit_service(svc, &usvc);
1992 break; 2171 break;
1993 case IP_VS_SO_SET_DEL: 2172 case IP_VS_SO_SET_DEL:
1994 ret = ip_vs_del_service(svc); 2173 ret = ip_vs_del_service(svc);
@@ -1999,13 +2178,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1999 ret = ip_vs_zero_service(svc); 2178 ret = ip_vs_zero_service(svc);
2000 break; 2179 break;
2001 case IP_VS_SO_SET_ADDDEST: 2180 case IP_VS_SO_SET_ADDDEST:
2002 ret = ip_vs_add_dest(svc, udest); 2181 ret = ip_vs_add_dest(svc, &udest);
2003 break; 2182 break;
2004 case IP_VS_SO_SET_EDITDEST: 2183 case IP_VS_SO_SET_EDITDEST:
2005 ret = ip_vs_edit_dest(svc, udest); 2184 ret = ip_vs_edit_dest(svc, &udest);
2006 break; 2185 break;
2007 case IP_VS_SO_SET_DELDEST: 2186 case IP_VS_SO_SET_DELDEST:
2008 ret = ip_vs_del_dest(svc, udest); 2187 ret = ip_vs_del_dest(svc, &udest);
2009 break; 2188 break;
2010 default: 2189 default:
2011 ret = -EINVAL; 2190 ret = -EINVAL;
@@ -2028,7 +2207,7 @@ static void
2028ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src) 2207ip_vs_copy_stats(struct ip_vs_stats_user *dst, struct ip_vs_stats *src)
2029{ 2208{
2030 spin_lock_bh(&src->lock); 2209 spin_lock_bh(&src->lock);
2031 memcpy(dst, src, (char*)&src->lock - (char*)src); 2210 memcpy(dst, &src->ustats, sizeof(*dst));
2032 spin_unlock_bh(&src->lock); 2211 spin_unlock_bh(&src->lock);
2033} 2212}
2034 2213
@@ -2036,7 +2215,7 @@ static void
2036ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) 2215ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2037{ 2216{
2038 dst->protocol = src->protocol; 2217 dst->protocol = src->protocol;
2039 dst->addr = src->addr; 2218 dst->addr = src->addr.ip;
2040 dst->port = src->port; 2219 dst->port = src->port;
2041 dst->fwmark = src->fwmark; 2220 dst->fwmark = src->fwmark;
2042 strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); 2221 strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name));
@@ -2058,6 +2237,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
2058 2237
2059 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 2238 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
2060 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { 2239 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
2240 /* Only expose IPv4 entries to old interface */
2241 if (svc->af != AF_INET)
2242 continue;
2243
2061 if (count >= get->num_services) 2244 if (count >= get->num_services)
2062 goto out; 2245 goto out;
2063 memset(&entry, 0, sizeof(entry)); 2246 memset(&entry, 0, sizeof(entry));
@@ -2073,6 +2256,10 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
2073 2256
2074 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 2257 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
2075 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { 2258 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
2259 /* Only expose IPv4 entries to old interface */
2260 if (svc->af != AF_INET)
2261 continue;
2262
2076 if (count >= get->num_services) 2263 if (count >= get->num_services)
2077 goto out; 2264 goto out;
2078 memset(&entry, 0, sizeof(entry)); 2265 memset(&entry, 0, sizeof(entry));
@@ -2094,13 +2281,15 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
2094 struct ip_vs_get_dests __user *uptr) 2281 struct ip_vs_get_dests __user *uptr)
2095{ 2282{
2096 struct ip_vs_service *svc; 2283 struct ip_vs_service *svc;
2284 union nf_inet_addr addr = { .ip = get->addr };
2097 int ret = 0; 2285 int ret = 0;
2098 2286
2099 if (get->fwmark) 2287 if (get->fwmark)
2100 svc = __ip_vs_svc_fwm_get(get->fwmark); 2288 svc = __ip_vs_svc_fwm_get(AF_INET, get->fwmark);
2101 else 2289 else
2102 svc = __ip_vs_service_get(get->protocol, 2290 svc = __ip_vs_service_get(AF_INET, get->protocol, &addr,
2103 get->addr, get->port); 2291 get->port);
2292
2104 if (svc) { 2293 if (svc) {
2105 int count = 0; 2294 int count = 0;
2106 struct ip_vs_dest *dest; 2295 struct ip_vs_dest *dest;
@@ -2110,7 +2299,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
2110 if (count >= get->num_dests) 2299 if (count >= get->num_dests)
2111 break; 2300 break;
2112 2301
2113 entry.addr = dest->addr; 2302 entry.addr = dest->addr.ip;
2114 entry.port = dest->port; 2303 entry.port = dest->port;
2115 entry.conn_flags = atomic_read(&dest->conn_flags); 2304 entry.conn_flags = atomic_read(&dest->conn_flags);
2116 entry.weight = atomic_read(&dest->weight); 2305 entry.weight = atomic_read(&dest->weight);
@@ -2235,13 +2424,15 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2235 { 2424 {
2236 struct ip_vs_service_entry *entry; 2425 struct ip_vs_service_entry *entry;
2237 struct ip_vs_service *svc; 2426 struct ip_vs_service *svc;
2427 union nf_inet_addr addr;
2238 2428
2239 entry = (struct ip_vs_service_entry *)arg; 2429 entry = (struct ip_vs_service_entry *)arg;
2430 addr.ip = entry->addr;
2240 if (entry->fwmark) 2431 if (entry->fwmark)
2241 svc = __ip_vs_svc_fwm_get(entry->fwmark); 2432 svc = __ip_vs_svc_fwm_get(AF_INET, entry->fwmark);
2242 else 2433 else
2243 svc = __ip_vs_service_get(entry->protocol, 2434 svc = __ip_vs_service_get(AF_INET, entry->protocol,
2244 entry->addr, entry->port); 2435 &addr, entry->port);
2245 if (svc) { 2436 if (svc) {
2246 ip_vs_copy_service(entry, svc); 2437 ip_vs_copy_service(entry, svc);
2247 if (copy_to_user(user, entry, sizeof(*entry)) != 0) 2438 if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2320,6 +2511,875 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
2320 .owner = THIS_MODULE, 2511 .owner = THIS_MODULE,
2321}; 2512};
2322 2513
2514/*
2515 * Generic Netlink interface
2516 */
2517
2518/* IPVS genetlink family */
2519static struct genl_family ip_vs_genl_family = {
2520 .id = GENL_ID_GENERATE,
2521 .hdrsize = 0,
2522 .name = IPVS_GENL_NAME,
2523 .version = IPVS_GENL_VERSION,
2524 .maxattr = IPVS_CMD_MAX,
2525};
2526
2527/* Policy used for first-level command attributes */
2528static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
2529 [IPVS_CMD_ATTR_SERVICE] = { .type = NLA_NESTED },
2530 [IPVS_CMD_ATTR_DEST] = { .type = NLA_NESTED },
2531 [IPVS_CMD_ATTR_DAEMON] = { .type = NLA_NESTED },
2532 [IPVS_CMD_ATTR_TIMEOUT_TCP] = { .type = NLA_U32 },
2533 [IPVS_CMD_ATTR_TIMEOUT_TCP_FIN] = { .type = NLA_U32 },
2534 [IPVS_CMD_ATTR_TIMEOUT_UDP] = { .type = NLA_U32 },
2535};
2536
2537/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DAEMON */
2538static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
2539 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
2540 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
2541 .len = IP_VS_IFNAME_MAXLEN },
2542 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
2543};
2544
2545/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
2546static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
2547 [IPVS_SVC_ATTR_AF] = { .type = NLA_U16 },
2548 [IPVS_SVC_ATTR_PROTOCOL] = { .type = NLA_U16 },
2549 [IPVS_SVC_ATTR_ADDR] = { .type = NLA_BINARY,
2550 .len = sizeof(union nf_inet_addr) },
2551 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
2552 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
2553 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
2554 .len = IP_VS_SCHEDNAME_MAXLEN },
2555 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
2556 .len = sizeof(struct ip_vs_flags) },
2557 [IPVS_SVC_ATTR_TIMEOUT] = { .type = NLA_U32 },
2558 [IPVS_SVC_ATTR_NETMASK] = { .type = NLA_U32 },
2559 [IPVS_SVC_ATTR_STATS] = { .type = NLA_NESTED },
2560};
2561
2562/* Policy used for attributes in nested attribute IPVS_CMD_ATTR_DEST */
2563static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
2564 [IPVS_DEST_ATTR_ADDR] = { .type = NLA_BINARY,
2565 .len = sizeof(union nf_inet_addr) },
2566 [IPVS_DEST_ATTR_PORT] = { .type = NLA_U16 },
2567 [IPVS_DEST_ATTR_FWD_METHOD] = { .type = NLA_U32 },
2568 [IPVS_DEST_ATTR_WEIGHT] = { .type = NLA_U32 },
2569 [IPVS_DEST_ATTR_U_THRESH] = { .type = NLA_U32 },
2570 [IPVS_DEST_ATTR_L_THRESH] = { .type = NLA_U32 },
2571 [IPVS_DEST_ATTR_ACTIVE_CONNS] = { .type = NLA_U32 },
2572 [IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 },
2573 [IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 },
2574 [IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED },
2575};
2576
2577static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
2578 struct ip_vs_stats *stats)
2579{
2580 struct nlattr *nl_stats = nla_nest_start(skb, container_type);
2581 if (!nl_stats)
2582 return -EMSGSIZE;
2583
2584 spin_lock_bh(&stats->lock);
2585
2586 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, stats->ustats.conns);
2587 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, stats->ustats.inpkts);
2588 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, stats->ustats.outpkts);
2589 NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, stats->ustats.inbytes);
2590 NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, stats->ustats.outbytes);
2591 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, stats->ustats.cps);
2592 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, stats->ustats.inpps);
2593 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, stats->ustats.outpps);
2594 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, stats->ustats.inbps);
2595 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, stats->ustats.outbps);
2596
2597 spin_unlock_bh(&stats->lock);
2598
2599 nla_nest_end(skb, nl_stats);
2600
2601 return 0;
2602
2603nla_put_failure:
2604 spin_unlock_bh(&stats->lock);
2605 nla_nest_cancel(skb, nl_stats);
2606 return -EMSGSIZE;
2607}
2608
2609static int ip_vs_genl_fill_service(struct sk_buff *skb,
2610 struct ip_vs_service *svc)
2611{
2612 struct nlattr *nl_service;
2613 struct ip_vs_flags flags = { .flags = svc->flags,
2614 .mask = ~0 };
2615
2616 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
2617 if (!nl_service)
2618 return -EMSGSIZE;
2619
2620 NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af);
2621
2622 if (svc->fwmark) {
2623 NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark);
2624 } else {
2625 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol);
2626 NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr);
2627 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port);
2628 }
2629
2630 NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name);
2631 NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags);
2632 NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ);
2633 NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask);
2634
2635 if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
2636 goto nla_put_failure;
2637
2638 nla_nest_end(skb, nl_service);
2639
2640 return 0;
2641
2642nla_put_failure:
2643 nla_nest_cancel(skb, nl_service);
2644 return -EMSGSIZE;
2645}
2646
2647static int ip_vs_genl_dump_service(struct sk_buff *skb,
2648 struct ip_vs_service *svc,
2649 struct netlink_callback *cb)
2650{
2651 void *hdr;
2652
2653 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
2654 &ip_vs_genl_family, NLM_F_MULTI,
2655 IPVS_CMD_NEW_SERVICE);
2656 if (!hdr)
2657 return -EMSGSIZE;
2658
2659 if (ip_vs_genl_fill_service(skb, svc) < 0)
2660 goto nla_put_failure;
2661
2662 return genlmsg_end(skb, hdr);
2663
2664nla_put_failure:
2665 genlmsg_cancel(skb, hdr);
2666 return -EMSGSIZE;
2667}
2668
2669static int ip_vs_genl_dump_services(struct sk_buff *skb,
2670 struct netlink_callback *cb)
2671{
2672 int idx = 0, i;
2673 int start = cb->args[0];
2674 struct ip_vs_service *svc;
2675
2676 mutex_lock(&__ip_vs_mutex);
2677 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
2678 list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
2679 if (++idx <= start)
2680 continue;
2681 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
2682 idx--;
2683 goto nla_put_failure;
2684 }
2685 }
2686 }
2687
2688 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
2689 list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
2690 if (++idx <= start)
2691 continue;
2692 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
2693 idx--;
2694 goto nla_put_failure;
2695 }
2696 }
2697 }
2698
2699nla_put_failure:
2700 mutex_unlock(&__ip_vs_mutex);
2701 cb->args[0] = idx;
2702
2703 return skb->len;
2704}
2705
2706static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
2707 struct nlattr *nla, int full_entry)
2708{
2709 struct nlattr *attrs[IPVS_SVC_ATTR_MAX + 1];
2710 struct nlattr *nla_af, *nla_port, *nla_fwmark, *nla_protocol, *nla_addr;
2711
2712 /* Parse mandatory identifying service fields first */
2713 if (nla == NULL ||
2714 nla_parse_nested(attrs, IPVS_SVC_ATTR_MAX, nla, ip_vs_svc_policy))
2715 return -EINVAL;
2716
2717 nla_af = attrs[IPVS_SVC_ATTR_AF];
2718 nla_protocol = attrs[IPVS_SVC_ATTR_PROTOCOL];
2719 nla_addr = attrs[IPVS_SVC_ATTR_ADDR];
2720 nla_port = attrs[IPVS_SVC_ATTR_PORT];
2721 nla_fwmark = attrs[IPVS_SVC_ATTR_FWMARK];
2722
2723 if (!(nla_af && (nla_fwmark || (nla_port && nla_protocol && nla_addr))))
2724 return -EINVAL;
2725
2726 usvc->af = nla_get_u16(nla_af);
2727#ifdef CONFIG_IP_VS_IPV6
2728 if (usvc->af != AF_INET && usvc->af != AF_INET6)
2729#else
2730 if (usvc->af != AF_INET)
2731#endif
2732 return -EAFNOSUPPORT;
2733
2734 if (nla_fwmark) {
2735 usvc->protocol = IPPROTO_TCP;
2736 usvc->fwmark = nla_get_u32(nla_fwmark);
2737 } else {
2738 usvc->protocol = nla_get_u16(nla_protocol);
2739 nla_memcpy(&usvc->addr, nla_addr, sizeof(usvc->addr));
2740 usvc->port = nla_get_u16(nla_port);
2741 usvc->fwmark = 0;
2742 }
2743
2744 /* If a full entry was requested, check for the additional fields */
2745 if (full_entry) {
2746 struct nlattr *nla_sched, *nla_flags, *nla_timeout,
2747 *nla_netmask;
2748 struct ip_vs_flags flags;
2749 struct ip_vs_service *svc;
2750
2751 nla_sched = attrs[IPVS_SVC_ATTR_SCHED_NAME];
2752 nla_flags = attrs[IPVS_SVC_ATTR_FLAGS];
2753 nla_timeout = attrs[IPVS_SVC_ATTR_TIMEOUT];
2754 nla_netmask = attrs[IPVS_SVC_ATTR_NETMASK];
2755
2756 if (!(nla_sched && nla_flags && nla_timeout && nla_netmask))
2757 return -EINVAL;
2758
2759 nla_memcpy(&flags, nla_flags, sizeof(flags));
2760
2761 /* prefill flags from service if it already exists */
2762 if (usvc->fwmark)
2763 svc = __ip_vs_svc_fwm_get(usvc->af, usvc->fwmark);
2764 else
2765 svc = __ip_vs_service_get(usvc->af, usvc->protocol,
2766 &usvc->addr, usvc->port);
2767 if (svc) {
2768 usvc->flags = svc->flags;
2769 ip_vs_service_put(svc);
2770 } else
2771 usvc->flags = 0;
2772
2773 /* set new flags from userland */
2774 usvc->flags = (usvc->flags & ~flags.mask) |
2775 (flags.flags & flags.mask);
2776 usvc->sched_name = nla_data(nla_sched);
2777 usvc->timeout = nla_get_u32(nla_timeout);
2778 usvc->netmask = nla_get_u32(nla_netmask);
2779 }
2780
2781 return 0;
2782}
2783
2784static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
2785{
2786 struct ip_vs_service_user_kern usvc;
2787 int ret;
2788
2789 ret = ip_vs_genl_parse_service(&usvc, nla, 0);
2790 if (ret)
2791 return ERR_PTR(ret);
2792
2793 if (usvc.fwmark)
2794 return __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
2795 else
2796 return __ip_vs_service_get(usvc.af, usvc.protocol,
2797 &usvc.addr, usvc.port);
2798}
2799
2800static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
2801{
2802 struct nlattr *nl_dest;
2803
2804 nl_dest = nla_nest_start(skb, IPVS_CMD_ATTR_DEST);
2805 if (!nl_dest)
2806 return -EMSGSIZE;
2807
2808 NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr);
2809 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
2810
2811 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
2812 atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
2813 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
2814 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
2815 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
2816 NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
2817 atomic_read(&dest->activeconns));
2818 NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS,
2819 atomic_read(&dest->inactconns));
2820 NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
2821 atomic_read(&dest->persistconns));
2822
2823 if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
2824 goto nla_put_failure;
2825
2826 nla_nest_end(skb, nl_dest);
2827
2828 return 0;
2829
2830nla_put_failure:
2831 nla_nest_cancel(skb, nl_dest);
2832 return -EMSGSIZE;
2833}
2834
2835static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest,
2836 struct netlink_callback *cb)
2837{
2838 void *hdr;
2839
2840 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
2841 &ip_vs_genl_family, NLM_F_MULTI,
2842 IPVS_CMD_NEW_DEST);
2843 if (!hdr)
2844 return -EMSGSIZE;
2845
2846 if (ip_vs_genl_fill_dest(skb, dest) < 0)
2847 goto nla_put_failure;
2848
2849 return genlmsg_end(skb, hdr);
2850
2851nla_put_failure:
2852 genlmsg_cancel(skb, hdr);
2853 return -EMSGSIZE;
2854}
2855
2856static int ip_vs_genl_dump_dests(struct sk_buff *skb,
2857 struct netlink_callback *cb)
2858{
2859 int idx = 0;
2860 int start = cb->args[0];
2861 struct ip_vs_service *svc;
2862 struct ip_vs_dest *dest;
2863 struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
2864
2865 mutex_lock(&__ip_vs_mutex);
2866
2867 /* Try to find the service for which to dump destinations */
2868 if (nlmsg_parse(cb->nlh, GENL_HDRLEN, attrs,
2869 IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
2870 goto out_err;
2871
2872 svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
2873 if (IS_ERR(svc) || svc == NULL)
2874 goto out_err;
2875
2876 /* Dump the destinations */
2877 list_for_each_entry(dest, &svc->destinations, n_list) {
2878 if (++idx <= start)
2879 continue;
2880 if (ip_vs_genl_dump_dest(skb, dest, cb) < 0) {
2881 idx--;
2882 goto nla_put_failure;
2883 }
2884 }
2885
2886nla_put_failure:
2887 cb->args[0] = idx;
2888 ip_vs_service_put(svc);
2889
2890out_err:
2891 mutex_unlock(&__ip_vs_mutex);
2892
2893 return skb->len;
2894}
2895
2896static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
2897 struct nlattr *nla, int full_entry)
2898{
2899 struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1];
2900 struct nlattr *nla_addr, *nla_port;
2901
2902 /* Parse mandatory identifying destination fields first */
2903 if (nla == NULL ||
2904 nla_parse_nested(attrs, IPVS_DEST_ATTR_MAX, nla, ip_vs_dest_policy))
2905 return -EINVAL;
2906
2907 nla_addr = attrs[IPVS_DEST_ATTR_ADDR];
2908 nla_port = attrs[IPVS_DEST_ATTR_PORT];
2909
2910 if (!(nla_addr && nla_port))
2911 return -EINVAL;
2912
2913 nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
2914 udest->port = nla_get_u16(nla_port);
2915
2916 /* If a full entry was requested, check for the additional fields */
2917 if (full_entry) {
2918 struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
2919 *nla_l_thresh;
2920
2921 nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD];
2922 nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT];
2923 nla_u_thresh = attrs[IPVS_DEST_ATTR_U_THRESH];
2924 nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH];
2925
2926 if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
2927 return -EINVAL;
2928
2929 udest->conn_flags = nla_get_u32(nla_fwd)
2930 & IP_VS_CONN_F_FWD_MASK;
2931 udest->weight = nla_get_u32(nla_weight);
2932 udest->u_threshold = nla_get_u32(nla_u_thresh);
2933 udest->l_threshold = nla_get_u32(nla_l_thresh);
2934 }
2935
2936 return 0;
2937}
2938
2939static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state,
2940 const char *mcast_ifn, __be32 syncid)
2941{
2942 struct nlattr *nl_daemon;
2943
2944 nl_daemon = nla_nest_start(skb, IPVS_CMD_ATTR_DAEMON);
2945 if (!nl_daemon)
2946 return -EMSGSIZE;
2947
2948 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state);
2949 NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn);
2950 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid);
2951
2952 nla_nest_end(skb, nl_daemon);
2953
2954 return 0;
2955
2956nla_put_failure:
2957 nla_nest_cancel(skb, nl_daemon);
2958 return -EMSGSIZE;
2959}
2960
2961static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __be32 state,
2962 const char *mcast_ifn, __be32 syncid,
2963 struct netlink_callback *cb)
2964{
2965 void *hdr;
2966 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
2967 &ip_vs_genl_family, NLM_F_MULTI,
2968 IPVS_CMD_NEW_DAEMON);
2969 if (!hdr)
2970 return -EMSGSIZE;
2971
2972 if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
2973 goto nla_put_failure;
2974
2975 return genlmsg_end(skb, hdr);
2976
2977nla_put_failure:
2978 genlmsg_cancel(skb, hdr);
2979 return -EMSGSIZE;
2980}
2981
2982static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
2983 struct netlink_callback *cb)
2984{
2985 mutex_lock(&__ip_vs_mutex);
2986 if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
2987 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
2988 ip_vs_master_mcast_ifn,
2989 ip_vs_master_syncid, cb) < 0)
2990 goto nla_put_failure;
2991
2992 cb->args[0] = 1;
2993 }
2994
2995 if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
2996 if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
2997 ip_vs_backup_mcast_ifn,
2998 ip_vs_backup_syncid, cb) < 0)
2999 goto nla_put_failure;
3000
3001 cb->args[1] = 1;
3002 }
3003
3004nla_put_failure:
3005 mutex_unlock(&__ip_vs_mutex);
3006
3007 return skb->len;
3008}
3009
3010static int ip_vs_genl_new_daemon(struct nlattr **attrs)
3011{
3012 if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
3013 attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
3014 attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
3015 return -EINVAL;
3016
3017 return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
3018 nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
3019 nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
3020}
3021
3022static int ip_vs_genl_del_daemon(struct nlattr **attrs)
3023{
3024 if (!attrs[IPVS_DAEMON_ATTR_STATE])
3025 return -EINVAL;
3026
3027 return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
3028}
3029
3030static int ip_vs_genl_set_config(struct nlattr **attrs)
3031{
3032 struct ip_vs_timeout_user t;
3033
3034 __ip_vs_get_timeouts(&t);
3035
3036 if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
3037 t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
3038
3039 if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN])
3040 t.tcp_fin_timeout =
3041 nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP_FIN]);
3042
3043 if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
3044 t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
3045
3046 return ip_vs_set_timeout(&t);
3047}
3048
3049static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3050{
3051 struct ip_vs_service *svc = NULL;
3052 struct ip_vs_service_user_kern usvc;
3053 struct ip_vs_dest_user_kern udest;
3054 int ret = 0, cmd;
3055 int need_full_svc = 0, need_full_dest = 0;
3056
3057 cmd = info->genlhdr->cmd;
3058
3059 mutex_lock(&__ip_vs_mutex);
3060
3061 if (cmd == IPVS_CMD_FLUSH) {
3062 ret = ip_vs_flush();
3063 goto out;
3064 } else if (cmd == IPVS_CMD_SET_CONFIG) {
3065 ret = ip_vs_genl_set_config(info->attrs);
3066 goto out;
3067 } else if (cmd == IPVS_CMD_NEW_DAEMON ||
3068 cmd == IPVS_CMD_DEL_DAEMON) {
3069
3070 struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
3071
3072 if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
3073 nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
3074 info->attrs[IPVS_CMD_ATTR_DAEMON],
3075 ip_vs_daemon_policy)) {
3076 ret = -EINVAL;
3077 goto out;
3078 }
3079
3080 if (cmd == IPVS_CMD_NEW_DAEMON)
3081 ret = ip_vs_genl_new_daemon(daemon_attrs);
3082 else
3083 ret = ip_vs_genl_del_daemon(daemon_attrs);
3084 goto out;
3085 } else if (cmd == IPVS_CMD_ZERO &&
3086 !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
3087 ret = ip_vs_zero_all();
3088 goto out;
3089 }
3090
3091 /* All following commands require a service argument, so check if we
3092 * received a valid one. We need a full service specification when
3093 * adding / editing a service. Only identifying members otherwise. */
3094 if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
3095 need_full_svc = 1;
3096
3097 ret = ip_vs_genl_parse_service(&usvc,
3098 info->attrs[IPVS_CMD_ATTR_SERVICE],
3099 need_full_svc);
3100 if (ret)
3101 goto out;
3102
3103 /* Lookup the exact service by <protocol, addr, port> or fwmark */
3104 if (usvc.fwmark == 0)
3105 svc = __ip_vs_service_get(usvc.af, usvc.protocol,
3106 &usvc.addr, usvc.port);
3107 else
3108 svc = __ip_vs_svc_fwm_get(usvc.af, usvc.fwmark);
3109
3110 /* Unless we're adding a new service, the service must already exist */
3111 if ((cmd != IPVS_CMD_NEW_SERVICE) && (svc == NULL)) {
3112 ret = -ESRCH;
3113 goto out;
3114 }
3115
3116 /* Destination commands require a valid destination argument. For
3117 * adding / editing a destination, we need a full destination
3118 * specification. */
3119 if (cmd == IPVS_CMD_NEW_DEST || cmd == IPVS_CMD_SET_DEST ||
3120 cmd == IPVS_CMD_DEL_DEST) {
3121 if (cmd != IPVS_CMD_DEL_DEST)
3122 need_full_dest = 1;
3123
3124 ret = ip_vs_genl_parse_dest(&udest,
3125 info->attrs[IPVS_CMD_ATTR_DEST],
3126 need_full_dest);
3127 if (ret)
3128 goto out;
3129 }
3130
3131 switch (cmd) {
3132 case IPVS_CMD_NEW_SERVICE:
3133 if (svc == NULL)
3134 ret = ip_vs_add_service(&usvc, &svc);
3135 else
3136 ret = -EEXIST;
3137 break;
3138 case IPVS_CMD_SET_SERVICE:
3139 ret = ip_vs_edit_service(svc, &usvc);
3140 break;
3141 case IPVS_CMD_DEL_SERVICE:
3142 ret = ip_vs_del_service(svc);
3143 break;
3144 case IPVS_CMD_NEW_DEST:
3145 ret = ip_vs_add_dest(svc, &udest);
3146 break;
3147 case IPVS_CMD_SET_DEST:
3148 ret = ip_vs_edit_dest(svc, &udest);
3149 break;
3150 case IPVS_CMD_DEL_DEST:
3151 ret = ip_vs_del_dest(svc, &udest);
3152 break;
3153 case IPVS_CMD_ZERO:
3154 ret = ip_vs_zero_service(svc);
3155 break;
3156 default:
3157 ret = -EINVAL;
3158 }
3159
3160out:
3161 if (svc)
3162 ip_vs_service_put(svc);
3163 mutex_unlock(&__ip_vs_mutex);
3164
3165 return ret;
3166}
3167
3168static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
3169{
3170 struct sk_buff *msg;
3171 void *reply;
3172 int ret, cmd, reply_cmd;
3173
3174 cmd = info->genlhdr->cmd;
3175
3176 if (cmd == IPVS_CMD_GET_SERVICE)
3177 reply_cmd = IPVS_CMD_NEW_SERVICE;
3178 else if (cmd == IPVS_CMD_GET_INFO)
3179 reply_cmd = IPVS_CMD_SET_INFO;
3180 else if (cmd == IPVS_CMD_GET_CONFIG)
3181 reply_cmd = IPVS_CMD_SET_CONFIG;
3182 else {
3183 IP_VS_ERR("unknown Generic Netlink command\n");
3184 return -EINVAL;
3185 }
3186
3187 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3188 if (!msg)
3189 return -ENOMEM;
3190
3191 mutex_lock(&__ip_vs_mutex);
3192
3193 reply = genlmsg_put_reply(msg, info, &ip_vs_genl_family, 0, reply_cmd);
3194 if (reply == NULL)
3195 goto nla_put_failure;
3196
3197 switch (cmd) {
3198 case IPVS_CMD_GET_SERVICE:
3199 {
3200 struct ip_vs_service *svc;
3201
3202 svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
3203 if (IS_ERR(svc)) {
3204 ret = PTR_ERR(svc);
3205 goto out_err;
3206 } else if (svc) {
3207 ret = ip_vs_genl_fill_service(msg, svc);
3208 ip_vs_service_put(svc);
3209 if (ret)
3210 goto nla_put_failure;
3211 } else {
3212 ret = -ESRCH;
3213 goto out_err;
3214 }
3215
3216 break;
3217 }
3218
3219 case IPVS_CMD_GET_CONFIG:
3220 {
3221 struct ip_vs_timeout_user t;
3222
3223 __ip_vs_get_timeouts(&t);
3224#ifdef CONFIG_IP_VS_PROTO_TCP
3225 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
3226 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
3227 t.tcp_fin_timeout);
3228#endif
3229#ifdef CONFIG_IP_VS_PROTO_UDP
3230 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout);
3231#endif
3232
3233 break;
3234 }
3235
3236 case IPVS_CMD_GET_INFO:
3237 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
3238 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
3239 IP_VS_CONN_TAB_SIZE);
3240 break;
3241 }
3242
3243 genlmsg_end(msg, reply);
3244 ret = genlmsg_unicast(msg, info->snd_pid);
3245 goto out;
3246
3247nla_put_failure:
3248 IP_VS_ERR("not enough space in Netlink message\n");
3249 ret = -EMSGSIZE;
3250
3251out_err:
3252 nlmsg_free(msg);
3253out:
3254 mutex_unlock(&__ip_vs_mutex);
3255
3256 return ret;
3257}
3258
3259
3260static struct genl_ops ip_vs_genl_ops[] __read_mostly = {
3261 {
3262 .cmd = IPVS_CMD_NEW_SERVICE,
3263 .flags = GENL_ADMIN_PERM,
3264 .policy = ip_vs_cmd_policy,
3265 .doit = ip_vs_genl_set_cmd,
3266 },
3267 {
3268 .cmd = IPVS_CMD_SET_SERVICE,
3269 .flags = GENL_ADMIN_PERM,
3270 .policy = ip_vs_cmd_policy,
3271 .doit = ip_vs_genl_set_cmd,
3272 },
3273 {
3274 .cmd = IPVS_CMD_DEL_SERVICE,
3275 .flags = GENL_ADMIN_PERM,
3276 .policy = ip_vs_cmd_policy,
3277 .doit = ip_vs_genl_set_cmd,
3278 },
3279 {
3280 .cmd = IPVS_CMD_GET_SERVICE,
3281 .flags = GENL_ADMIN_PERM,
3282 .doit = ip_vs_genl_get_cmd,
3283 .dumpit = ip_vs_genl_dump_services,
3284 .policy = ip_vs_cmd_policy,
3285 },
3286 {
3287 .cmd = IPVS_CMD_NEW_DEST,
3288 .flags = GENL_ADMIN_PERM,
3289 .policy = ip_vs_cmd_policy,
3290 .doit = ip_vs_genl_set_cmd,
3291 },
3292 {
3293 .cmd = IPVS_CMD_SET_DEST,
3294 .flags = GENL_ADMIN_PERM,
3295 .policy = ip_vs_cmd_policy,
3296 .doit = ip_vs_genl_set_cmd,
3297 },
3298 {
3299 .cmd = IPVS_CMD_DEL_DEST,
3300 .flags = GENL_ADMIN_PERM,
3301 .policy = ip_vs_cmd_policy,
3302 .doit = ip_vs_genl_set_cmd,
3303 },
3304 {
3305 .cmd = IPVS_CMD_GET_DEST,
3306 .flags = GENL_ADMIN_PERM,
3307 .policy = ip_vs_cmd_policy,
3308 .dumpit = ip_vs_genl_dump_dests,
3309 },
3310 {
3311 .cmd = IPVS_CMD_NEW_DAEMON,
3312 .flags = GENL_ADMIN_PERM,
3313 .policy = ip_vs_cmd_policy,
3314 .doit = ip_vs_genl_set_cmd,
3315 },
3316 {
3317 .cmd = IPVS_CMD_DEL_DAEMON,
3318 .flags = GENL_ADMIN_PERM,
3319 .policy = ip_vs_cmd_policy,
3320 .doit = ip_vs_genl_set_cmd,
3321 },
3322 {
3323 .cmd = IPVS_CMD_GET_DAEMON,
3324 .flags = GENL_ADMIN_PERM,
3325 .dumpit = ip_vs_genl_dump_daemons,
3326 },
3327 {
3328 .cmd = IPVS_CMD_SET_CONFIG,
3329 .flags = GENL_ADMIN_PERM,
3330 .policy = ip_vs_cmd_policy,
3331 .doit = ip_vs_genl_set_cmd,
3332 },
3333 {
3334 .cmd = IPVS_CMD_GET_CONFIG,
3335 .flags = GENL_ADMIN_PERM,
3336 .doit = ip_vs_genl_get_cmd,
3337 },
3338 {
3339 .cmd = IPVS_CMD_GET_INFO,
3340 .flags = GENL_ADMIN_PERM,
3341 .doit = ip_vs_genl_get_cmd,
3342 },
3343 {
3344 .cmd = IPVS_CMD_ZERO,
3345 .flags = GENL_ADMIN_PERM,
3346 .policy = ip_vs_cmd_policy,
3347 .doit = ip_vs_genl_set_cmd,
3348 },
3349 {
3350 .cmd = IPVS_CMD_FLUSH,
3351 .flags = GENL_ADMIN_PERM,
3352 .doit = ip_vs_genl_set_cmd,
3353 },
3354};
3355
3356static int __init ip_vs_genl_register(void)
3357{
3358 int ret, i;
3359
3360 ret = genl_register_family(&ip_vs_genl_family);
3361 if (ret)
3362 return ret;
3363
3364 for (i = 0; i < ARRAY_SIZE(ip_vs_genl_ops); i++) {
3365 ret = genl_register_ops(&ip_vs_genl_family, &ip_vs_genl_ops[i]);
3366 if (ret)
3367 goto err_out;
3368 }
3369 return 0;
3370
3371err_out:
3372 genl_unregister_family(&ip_vs_genl_family);
3373 return ret;
3374}
3375
3376static void ip_vs_genl_unregister(void)
3377{
3378 genl_unregister_family(&ip_vs_genl_family);
3379}
3380
3381/* End of Generic Netlink interface definitions */
3382
2323 3383
2324int __init ip_vs_control_init(void) 3384int __init ip_vs_control_init(void)
2325{ 3385{
@@ -2334,6 +3394,13 @@ int __init ip_vs_control_init(void)
2334 return ret; 3394 return ret;
2335 } 3395 }
2336 3396
3397 ret = ip_vs_genl_register();
3398 if (ret) {
3399 IP_VS_ERR("cannot register Generic Netlink interface.\n");
3400 nf_unregister_sockopt(&ip_vs_sockopts);
3401 return ret;
3402 }
3403
2337 proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops); 3404 proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
2338 proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops); 3405 proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
2339 3406
@@ -2368,6 +3435,7 @@ void ip_vs_control_cleanup(void)
2368 unregister_sysctl_table(sysctl_header); 3435 unregister_sysctl_table(sysctl_header);
2369 proc_net_remove(&init_net, "ip_vs_stats"); 3436 proc_net_remove(&init_net, "ip_vs_stats");
2370 proc_net_remove(&init_net, "ip_vs"); 3437 proc_net_remove(&init_net, "ip_vs");
3438 ip_vs_genl_unregister();
2371 nf_unregister_sockopt(&ip_vs_sockopts); 3439 nf_unregister_sockopt(&ip_vs_sockopts);
2372 LeaveFunction(2); 3440 LeaveFunction(2);
2373} 3441}
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index fa66824d264f..a16943fd72f1 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -218,7 +218,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
218 IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u " 218 IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u "
219 "--> server %u.%u.%u.%u:%d\n", 219 "--> server %u.%u.%u.%u:%d\n",
220 NIPQUAD(iph->daddr), 220 NIPQUAD(iph->daddr),
221 NIPQUAD(dest->addr), 221 NIPQUAD(dest->addr.ip),
222 ntohs(dest->port)); 222 ntohs(dest->port));
223 223
224 return dest; 224 return dest;
@@ -234,6 +234,9 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
234 .refcnt = ATOMIC_INIT(0), 234 .refcnt = ATOMIC_INIT(0),
235 .module = THIS_MODULE, 235 .module = THIS_MODULE,
236 .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), 236 .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
237#ifdef CONFIG_IP_VS_IPV6
238 .supports_ipv6 = 0,
239#endif
237 .init_service = ip_vs_dh_init_svc, 240 .init_service = ip_vs_dh_init_svc,
238 .done_service = ip_vs_dh_done_svc, 241 .done_service = ip_vs_dh_done_svc,
239 .update_service = ip_vs_dh_update_svc, 242 .update_service = ip_vs_dh_update_svc,
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index 5a20f93bd7f9..2eb2860dabb5 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -65,37 +65,37 @@ static void estimation_timer(unsigned long arg)
65 s = container_of(e, struct ip_vs_stats, est); 65 s = container_of(e, struct ip_vs_stats, est);
66 66
67 spin_lock(&s->lock); 67 spin_lock(&s->lock);
68 n_conns = s->conns; 68 n_conns = s->ustats.conns;
69 n_inpkts = s->inpkts; 69 n_inpkts = s->ustats.inpkts;
70 n_outpkts = s->outpkts; 70 n_outpkts = s->ustats.outpkts;
71 n_inbytes = s->inbytes; 71 n_inbytes = s->ustats.inbytes;
72 n_outbytes = s->outbytes; 72 n_outbytes = s->ustats.outbytes;
73 73
74 /* scaled by 2^10, but divided 2 seconds */ 74 /* scaled by 2^10, but divided 2 seconds */
75 rate = (n_conns - e->last_conns)<<9; 75 rate = (n_conns - e->last_conns)<<9;
76 e->last_conns = n_conns; 76 e->last_conns = n_conns;
77 e->cps += ((long)rate - (long)e->cps)>>2; 77 e->cps += ((long)rate - (long)e->cps)>>2;
78 s->cps = (e->cps+0x1FF)>>10; 78 s->ustats.cps = (e->cps+0x1FF)>>10;
79 79
80 rate = (n_inpkts - e->last_inpkts)<<9; 80 rate = (n_inpkts - e->last_inpkts)<<9;
81 e->last_inpkts = n_inpkts; 81 e->last_inpkts = n_inpkts;
82 e->inpps += ((long)rate - (long)e->inpps)>>2; 82 e->inpps += ((long)rate - (long)e->inpps)>>2;
83 s->inpps = (e->inpps+0x1FF)>>10; 83 s->ustats.inpps = (e->inpps+0x1FF)>>10;
84 84
85 rate = (n_outpkts - e->last_outpkts)<<9; 85 rate = (n_outpkts - e->last_outpkts)<<9;
86 e->last_outpkts = n_outpkts; 86 e->last_outpkts = n_outpkts;
87 e->outpps += ((long)rate - (long)e->outpps)>>2; 87 e->outpps += ((long)rate - (long)e->outpps)>>2;
88 s->outpps = (e->outpps+0x1FF)>>10; 88 s->ustats.outpps = (e->outpps+0x1FF)>>10;
89 89
90 rate = (n_inbytes - e->last_inbytes)<<4; 90 rate = (n_inbytes - e->last_inbytes)<<4;
91 e->last_inbytes = n_inbytes; 91 e->last_inbytes = n_inbytes;
92 e->inbps += ((long)rate - (long)e->inbps)>>2; 92 e->inbps += ((long)rate - (long)e->inbps)>>2;
93 s->inbps = (e->inbps+0xF)>>5; 93 s->ustats.inbps = (e->inbps+0xF)>>5;
94 94
95 rate = (n_outbytes - e->last_outbytes)<<4; 95 rate = (n_outbytes - e->last_outbytes)<<4;
96 e->last_outbytes = n_outbytes; 96 e->last_outbytes = n_outbytes;
97 e->outbps += ((long)rate - (long)e->outbps)>>2; 97 e->outbps += ((long)rate - (long)e->outbps)>>2;
98 s->outbps = (e->outbps+0xF)>>5; 98 s->ustats.outbps = (e->outbps+0xF)>>5;
99 spin_unlock(&s->lock); 99 spin_unlock(&s->lock);
100 } 100 }
101 spin_unlock(&est_lock); 101 spin_unlock(&est_lock);
@@ -108,24 +108,22 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats)
108 108
109 INIT_LIST_HEAD(&est->list); 109 INIT_LIST_HEAD(&est->list);
110 110
111 est->last_conns = stats->conns; 111 est->last_conns = stats->ustats.conns;
112 est->cps = stats->cps<<10; 112 est->cps = stats->ustats.cps<<10;
113 113
114 est->last_inpkts = stats->inpkts; 114 est->last_inpkts = stats->ustats.inpkts;
115 est->inpps = stats->inpps<<10; 115 est->inpps = stats->ustats.inpps<<10;
116 116
117 est->last_outpkts = stats->outpkts; 117 est->last_outpkts = stats->ustats.outpkts;
118 est->outpps = stats->outpps<<10; 118 est->outpps = stats->ustats.outpps<<10;
119 119
120 est->last_inbytes = stats->inbytes; 120 est->last_inbytes = stats->ustats.inbytes;
121 est->inbps = stats->inbps<<5; 121 est->inbps = stats->ustats.inbps<<5;
122 122
123 est->last_outbytes = stats->outbytes; 123 est->last_outbytes = stats->ustats.outbytes;
124 est->outbps = stats->outbps<<5; 124 est->outbps = stats->ustats.outbps<<5;
125 125
126 spin_lock_bh(&est_lock); 126 spin_lock_bh(&est_lock);
127 if (list_empty(&est_list))
128 mod_timer(&est_timer, jiffies + 2 * HZ);
129 list_add(&est->list, &est_list); 127 list_add(&est->list, &est_list);
130 spin_unlock_bh(&est_lock); 128 spin_unlock_bh(&est_lock);
131} 129}
@@ -136,11 +134,6 @@ void ip_vs_kill_estimator(struct ip_vs_stats *stats)
136 134
137 spin_lock_bh(&est_lock); 135 spin_lock_bh(&est_lock);
138 list_del(&est->list); 136 list_del(&est->list);
139 while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
140 spin_unlock_bh(&est_lock);
141 cpu_relax();
142 spin_lock_bh(&est_lock);
143 }
144 spin_unlock_bh(&est_lock); 137 spin_unlock_bh(&est_lock);
145} 138}
146 139
@@ -160,3 +153,14 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
160 est->inbps = 0; 153 est->inbps = 0;
161 est->outbps = 0; 154 est->outbps = 0;
162} 155}
156
157int __init ip_vs_estimator_init(void)
158{
159 mod_timer(&est_timer, jiffies + 2 * HZ);
160 return 0;
161}
162
163void ip_vs_estimator_cleanup(void)
164{
165 del_timer_sync(&est_timer);
166}
diff --git a/net/ipv4/ipvs/ip_vs_ftp.c b/net/ipv4/ipvs/ip_vs_ftp.c
index c1c758e4f733..2e7dbd8b73a4 100644
--- a/net/ipv4/ipvs/ip_vs_ftp.c
+++ b/net/ipv4/ipvs/ip_vs_ftp.c
@@ -140,13 +140,21 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
140 struct tcphdr *th; 140 struct tcphdr *th;
141 char *data, *data_limit; 141 char *data, *data_limit;
142 char *start, *end; 142 char *start, *end;
143 __be32 from; 143 union nf_inet_addr from;
144 __be16 port; 144 __be16 port;
145 struct ip_vs_conn *n_cp; 145 struct ip_vs_conn *n_cp;
146 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ 146 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */
147 unsigned buf_len; 147 unsigned buf_len;
148 int ret; 148 int ret;
149 149
150#ifdef CONFIG_IP_VS_IPV6
151 /* This application helper doesn't work with IPv6 yet,
152 * so turn this into a no-op for IPv6 packets
153 */
154 if (cp->af == AF_INET6)
155 return 1;
156#endif
157
150 *diff = 0; 158 *diff = 0;
151 159
152 /* Only useful for established sessions */ 160 /* Only useful for established sessions */
@@ -166,24 +174,25 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
166 if (ip_vs_ftp_get_addrport(data, data_limit, 174 if (ip_vs_ftp_get_addrport(data, data_limit,
167 SERVER_STRING, 175 SERVER_STRING,
168 sizeof(SERVER_STRING)-1, ')', 176 sizeof(SERVER_STRING)-1, ')',
169 &from, &port, 177 &from.ip, &port,
170 &start, &end) != 1) 178 &start, &end) != 1)
171 return 1; 179 return 1;
172 180
173 IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> " 181 IP_VS_DBG(7, "PASV response (%u.%u.%u.%u:%d) -> "
174 "%u.%u.%u.%u:%d detected\n", 182 "%u.%u.%u.%u:%d detected\n",
175 NIPQUAD(from), ntohs(port), NIPQUAD(cp->caddr), 0); 183 NIPQUAD(from.ip), ntohs(port),
184 NIPQUAD(cp->caddr.ip), 0);
176 185
177 /* 186 /*
178 * Now update or create an connection entry for it 187 * Now update or create an connection entry for it
179 */ 188 */
180 n_cp = ip_vs_conn_out_get(iph->protocol, from, port, 189 n_cp = ip_vs_conn_out_get(AF_INET, iph->protocol, &from, port,
181 cp->caddr, 0); 190 &cp->caddr, 0);
182 if (!n_cp) { 191 if (!n_cp) {
183 n_cp = ip_vs_conn_new(IPPROTO_TCP, 192 n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP,
184 cp->caddr, 0, 193 &cp->caddr, 0,
185 cp->vaddr, port, 194 &cp->vaddr, port,
186 from, port, 195 &from, port,
187 IP_VS_CONN_F_NO_CPORT, 196 IP_VS_CONN_F_NO_CPORT,
188 cp->dest); 197 cp->dest);
189 if (!n_cp) 198 if (!n_cp)
@@ -196,9 +205,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
196 /* 205 /*
197 * Replace the old passive address with the new one 206 * Replace the old passive address with the new one
198 */ 207 */
199 from = n_cp->vaddr; 208 from.ip = n_cp->vaddr.ip;
200 port = n_cp->vport; 209 port = n_cp->vport;
201 sprintf(buf,"%d,%d,%d,%d,%d,%d", NIPQUAD(from), 210 sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip),
202 (ntohs(port)>>8)&255, ntohs(port)&255); 211 (ntohs(port)>>8)&255, ntohs(port)&255);
203 buf_len = strlen(buf); 212 buf_len = strlen(buf);
204 213
@@ -243,10 +252,18 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
243 struct tcphdr *th; 252 struct tcphdr *th;
244 char *data, *data_start, *data_limit; 253 char *data, *data_start, *data_limit;
245 char *start, *end; 254 char *start, *end;
246 __be32 to; 255 union nf_inet_addr to;
247 __be16 port; 256 __be16 port;
248 struct ip_vs_conn *n_cp; 257 struct ip_vs_conn *n_cp;
249 258
259#ifdef CONFIG_IP_VS_IPV6
260 /* This application helper doesn't work with IPv6 yet,
261 * so turn this into a no-op for IPv6 packets
262 */
263 if (cp->af == AF_INET6)
264 return 1;
265#endif
266
250 /* no diff required for incoming packets */ 267 /* no diff required for incoming packets */
251 *diff = 0; 268 *diff = 0;
252 269
@@ -291,12 +308,12 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
291 */ 308 */
292 if (ip_vs_ftp_get_addrport(data_start, data_limit, 309 if (ip_vs_ftp_get_addrport(data_start, data_limit,
293 CLIENT_STRING, sizeof(CLIENT_STRING)-1, 310 CLIENT_STRING, sizeof(CLIENT_STRING)-1,
294 '\r', &to, &port, 311 '\r', &to.ip, &port,
295 &start, &end) != 1) 312 &start, &end) != 1)
296 return 1; 313 return 1;
297 314
298 IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n", 315 IP_VS_DBG(7, "PORT %u.%u.%u.%u:%d detected\n",
299 NIPQUAD(to), ntohs(port)); 316 NIPQUAD(to.ip), ntohs(port));
300 317
301 /* Passive mode off */ 318 /* Passive mode off */
302 cp->app_data = NULL; 319 cp->app_data = NULL;
@@ -306,16 +323,16 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
306 */ 323 */
307 IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n", 324 IP_VS_DBG(7, "protocol %s %u.%u.%u.%u:%d %u.%u.%u.%u:%d\n",
308 ip_vs_proto_name(iph->protocol), 325 ip_vs_proto_name(iph->protocol),
309 NIPQUAD(to), ntohs(port), NIPQUAD(cp->vaddr), 0); 326 NIPQUAD(to.ip), ntohs(port), NIPQUAD(cp->vaddr.ip), 0);
310 327
311 n_cp = ip_vs_conn_in_get(iph->protocol, 328 n_cp = ip_vs_conn_in_get(AF_INET, iph->protocol,
312 to, port, 329 &to, port,
313 cp->vaddr, htons(ntohs(cp->vport)-1)); 330 &cp->vaddr, htons(ntohs(cp->vport)-1));
314 if (!n_cp) { 331 if (!n_cp) {
315 n_cp = ip_vs_conn_new(IPPROTO_TCP, 332 n_cp = ip_vs_conn_new(AF_INET, IPPROTO_TCP,
316 to, port, 333 &to, port,
317 cp->vaddr, htons(ntohs(cp->vport)-1), 334 &cp->vaddr, htons(ntohs(cp->vport)-1),
318 cp->daddr, htons(ntohs(cp->dport)-1), 335 &cp->daddr, htons(ntohs(cp->dport)-1),
319 0, 336 0,
320 cp->dest); 337 cp->dest);
321 if (!n_cp) 338 if (!n_cp)
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 7a6a319f544a..6ecef3518cac 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -96,7 +96,6 @@ struct ip_vs_lblc_entry {
96 * IPVS lblc hash table 96 * IPVS lblc hash table
97 */ 97 */
98struct ip_vs_lblc_table { 98struct ip_vs_lblc_table {
99 rwlock_t lock; /* lock for this table */
100 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ 99 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
101 atomic_t entries; /* number of entries */ 100 atomic_t entries; /* number of entries */
102 int max_size; /* maximum size of entries */ 101 int max_size; /* maximum size of entries */
@@ -123,31 +122,6 @@ static ctl_table vs_vars_table[] = {
123 122
124static struct ctl_table_header * sysctl_header; 123static struct ctl_table_header * sysctl_header;
125 124
126/*
127 * new/free a ip_vs_lblc_entry, which is a mapping of a destionation
128 * IP address to a server.
129 */
130static inline struct ip_vs_lblc_entry *
131ip_vs_lblc_new(__be32 daddr, struct ip_vs_dest *dest)
132{
133 struct ip_vs_lblc_entry *en;
134
135 en = kmalloc(sizeof(struct ip_vs_lblc_entry), GFP_ATOMIC);
136 if (en == NULL) {
137 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
138 return NULL;
139 }
140
141 INIT_LIST_HEAD(&en->list);
142 en->addr = daddr;
143
144 atomic_inc(&dest->refcnt);
145 en->dest = dest;
146
147 return en;
148}
149
150
151static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) 125static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
152{ 126{
153 list_del(&en->list); 127 list_del(&en->list);
@@ -173,55 +147,66 @@ static inline unsigned ip_vs_lblc_hashkey(__be32 addr)
173 * Hash an entry in the ip_vs_lblc_table. 147 * Hash an entry in the ip_vs_lblc_table.
174 * returns bool success. 148 * returns bool success.
175 */ 149 */
176static int 150static void
177ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) 151ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
178{ 152{
179 unsigned hash; 153 unsigned hash = ip_vs_lblc_hashkey(en->addr);
180
181 if (!list_empty(&en->list)) {
182 IP_VS_ERR("ip_vs_lblc_hash(): request for already hashed, "
183 "called from %p\n", __builtin_return_address(0));
184 return 0;
185 }
186
187 /*
188 * Hash by destination IP address
189 */
190 hash = ip_vs_lblc_hashkey(en->addr);
191 154
192 write_lock(&tbl->lock);
193 list_add(&en->list, &tbl->bucket[hash]); 155 list_add(&en->list, &tbl->bucket[hash]);
194 atomic_inc(&tbl->entries); 156 atomic_inc(&tbl->entries);
195 write_unlock(&tbl->lock);
196
197 return 1;
198} 157}
199 158
200 159
201/* 160/*
202 * Get ip_vs_lblc_entry associated with supplied parameters. 161 * Get ip_vs_lblc_entry associated with supplied parameters. Called under read
162 * lock
203 */ 163 */
204static inline struct ip_vs_lblc_entry * 164static inline struct ip_vs_lblc_entry *
205ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr) 165ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
206{ 166{
207 unsigned hash; 167 unsigned hash = ip_vs_lblc_hashkey(addr);
208 struct ip_vs_lblc_entry *en; 168 struct ip_vs_lblc_entry *en;
209 169
210 hash = ip_vs_lblc_hashkey(addr); 170 list_for_each_entry(en, &tbl->bucket[hash], list)
171 if (en->addr == addr)
172 return en;
211 173
212 read_lock(&tbl->lock); 174 return NULL;
175}
213 176
214 list_for_each_entry(en, &tbl->bucket[hash], list) { 177
215 if (en->addr == addr) { 178/*
216 /* HIT */ 179 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
217 read_unlock(&tbl->lock); 180 * address to a server. Called under write lock.
218 return en; 181 */
182static inline struct ip_vs_lblc_entry *
183ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, __be32 daddr,
184 struct ip_vs_dest *dest)
185{
186 struct ip_vs_lblc_entry *en;
187
188 en = ip_vs_lblc_get(tbl, daddr);
189 if (!en) {
190 en = kmalloc(sizeof(*en), GFP_ATOMIC);
191 if (!en) {
192 IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
193 return NULL;
219 } 194 }
220 }
221 195
222 read_unlock(&tbl->lock); 196 en->addr = daddr;
197 en->lastuse = jiffies;
223 198
224 return NULL; 199 atomic_inc(&dest->refcnt);
200 en->dest = dest;
201
202 ip_vs_lblc_hash(tbl, en);
203 } else if (en->dest != dest) {
204 atomic_dec(&en->dest->refcnt);
205 atomic_inc(&dest->refcnt);
206 en->dest = dest;
207 }
208
209 return en;
225} 210}
226 211
227 212
@@ -230,30 +215,29 @@ ip_vs_lblc_get(struct ip_vs_lblc_table *tbl, __be32 addr)
230 */ 215 */
231static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) 216static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
232{ 217{
233 int i;
234 struct ip_vs_lblc_entry *en, *nxt; 218 struct ip_vs_lblc_entry *en, *nxt;
219 int i;
235 220
236 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 221 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
237 write_lock(&tbl->lock);
238 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { 222 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
239 ip_vs_lblc_free(en); 223 ip_vs_lblc_free(en);
240 atomic_dec(&tbl->entries); 224 atomic_dec(&tbl->entries);
241 } 225 }
242 write_unlock(&tbl->lock);
243 } 226 }
244} 227}
245 228
246 229
247static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl) 230static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
248{ 231{
232 struct ip_vs_lblc_table *tbl = svc->sched_data;
233 struct ip_vs_lblc_entry *en, *nxt;
249 unsigned long now = jiffies; 234 unsigned long now = jiffies;
250 int i, j; 235 int i, j;
251 struct ip_vs_lblc_entry *en, *nxt;
252 236
253 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { 237 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
254 j = (j + 1) & IP_VS_LBLC_TAB_MASK; 238 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
255 239
256 write_lock(&tbl->lock); 240 write_lock(&svc->sched_lock);
257 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 241 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
258 if (time_before(now, 242 if (time_before(now,
259 en->lastuse + sysctl_ip_vs_lblc_expiration)) 243 en->lastuse + sysctl_ip_vs_lblc_expiration))
@@ -262,7 +246,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
262 ip_vs_lblc_free(en); 246 ip_vs_lblc_free(en);
263 atomic_dec(&tbl->entries); 247 atomic_dec(&tbl->entries);
264 } 248 }
265 write_unlock(&tbl->lock); 249 write_unlock(&svc->sched_lock);
266 } 250 }
267 tbl->rover = j; 251 tbl->rover = j;
268} 252}
@@ -281,17 +265,16 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_lblc_table *tbl)
281 */ 265 */
282static void ip_vs_lblc_check_expire(unsigned long data) 266static void ip_vs_lblc_check_expire(unsigned long data)
283{ 267{
284 struct ip_vs_lblc_table *tbl; 268 struct ip_vs_service *svc = (struct ip_vs_service *) data;
269 struct ip_vs_lblc_table *tbl = svc->sched_data;
285 unsigned long now = jiffies; 270 unsigned long now = jiffies;
286 int goal; 271 int goal;
287 int i, j; 272 int i, j;
288 struct ip_vs_lblc_entry *en, *nxt; 273 struct ip_vs_lblc_entry *en, *nxt;
289 274
290 tbl = (struct ip_vs_lblc_table *)data;
291
292 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 275 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
293 /* do full expiration check */ 276 /* do full expiration check */
294 ip_vs_lblc_full_check(tbl); 277 ip_vs_lblc_full_check(svc);
295 tbl->counter = 1; 278 tbl->counter = 1;
296 goto out; 279 goto out;
297 } 280 }
@@ -308,7 +291,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
308 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { 291 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
309 j = (j + 1) & IP_VS_LBLC_TAB_MASK; 292 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
310 293
311 write_lock(&tbl->lock); 294 write_lock(&svc->sched_lock);
312 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 295 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
313 if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) 296 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
314 continue; 297 continue;
@@ -317,7 +300,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
317 atomic_dec(&tbl->entries); 300 atomic_dec(&tbl->entries);
318 goal--; 301 goal--;
319 } 302 }
320 write_unlock(&tbl->lock); 303 write_unlock(&svc->sched_lock);
321 if (goal <= 0) 304 if (goal <= 0)
322 break; 305 break;
323 } 306 }
@@ -336,15 +319,14 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
336 /* 319 /*
337 * Allocate the ip_vs_lblc_table for this service 320 * Allocate the ip_vs_lblc_table for this service
338 */ 321 */
339 tbl = kmalloc(sizeof(struct ip_vs_lblc_table), GFP_ATOMIC); 322 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
340 if (tbl == NULL) { 323 if (tbl == NULL) {
341 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n"); 324 IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
342 return -ENOMEM; 325 return -ENOMEM;
343 } 326 }
344 svc->sched_data = tbl; 327 svc->sched_data = tbl;
345 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for " 328 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
346 "current service\n", 329 "current service\n", sizeof(*tbl));
347 sizeof(struct ip_vs_lblc_table));
348 330
349 /* 331 /*
350 * Initialize the hash buckets 332 * Initialize the hash buckets
@@ -352,7 +334,6 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
352 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 334 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
353 INIT_LIST_HEAD(&tbl->bucket[i]); 335 INIT_LIST_HEAD(&tbl->bucket[i]);
354 } 336 }
355 rwlock_init(&tbl->lock);
356 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; 337 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
357 tbl->rover = 0; 338 tbl->rover = 0;
358 tbl->counter = 1; 339 tbl->counter = 1;
@@ -361,9 +342,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
361 * Hook periodic timer for garbage collection 342 * Hook periodic timer for garbage collection
362 */ 343 */
363 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire, 344 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
364 (unsigned long)tbl); 345 (unsigned long)svc);
365 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; 346 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
366 add_timer(&tbl->periodic_timer);
367 347
368 return 0; 348 return 0;
369} 349}
@@ -380,22 +360,16 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
380 ip_vs_lblc_flush(tbl); 360 ip_vs_lblc_flush(tbl);
381 361
382 /* release the table itself */ 362 /* release the table itself */
383 kfree(svc->sched_data); 363 kfree(tbl);
384 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", 364 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
385 sizeof(struct ip_vs_lblc_table)); 365 sizeof(*tbl));
386 366
387 return 0; 367 return 0;
388} 368}
389 369
390 370
391static int ip_vs_lblc_update_svc(struct ip_vs_service *svc)
392{
393 return 0;
394}
395
396
397static inline struct ip_vs_dest * 371static inline struct ip_vs_dest *
398__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) 372__ip_vs_lblc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
399{ 373{
400 struct ip_vs_dest *dest, *least; 374 struct ip_vs_dest *dest, *least;
401 int loh, doh; 375 int loh, doh;
@@ -448,7 +422,7 @@ __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
448 422
449 IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d " 423 IP_VS_DBG(6, "LBLC: server %d.%d.%d.%d:%d "
450 "activeconns %d refcnt %d weight %d overhead %d\n", 424 "activeconns %d refcnt %d weight %d overhead %d\n",
451 NIPQUAD(least->addr), ntohs(least->port), 425 NIPQUAD(least->addr.ip), ntohs(least->port),
452 atomic_read(&least->activeconns), 426 atomic_read(&least->activeconns),
453 atomic_read(&least->refcnt), 427 atomic_read(&least->refcnt),
454 atomic_read(&least->weight), loh); 428 atomic_read(&least->weight), loh);
@@ -484,47 +458,55 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
484static struct ip_vs_dest * 458static struct ip_vs_dest *
485ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 459ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
486{ 460{
487 struct ip_vs_dest *dest; 461 struct ip_vs_lblc_table *tbl = svc->sched_data;
488 struct ip_vs_lblc_table *tbl;
489 struct ip_vs_lblc_entry *en;
490 struct iphdr *iph = ip_hdr(skb); 462 struct iphdr *iph = ip_hdr(skb);
463 struct ip_vs_dest *dest = NULL;
464 struct ip_vs_lblc_entry *en;
491 465
492 IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n"); 466 IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
493 467
494 tbl = (struct ip_vs_lblc_table *)svc->sched_data; 468 /* First look in our cache */
469 read_lock(&svc->sched_lock);
495 en = ip_vs_lblc_get(tbl, iph->daddr); 470 en = ip_vs_lblc_get(tbl, iph->daddr);
496 if (en == NULL) { 471 if (en) {
497 dest = __ip_vs_wlc_schedule(svc, iph); 472 /* We only hold a read lock, but this is atomic */
498 if (dest == NULL) { 473 en->lastuse = jiffies;
499 IP_VS_DBG(1, "no destination available\n"); 474
500 return NULL; 475 /*
501 } 476 * If the destination is not available, i.e. it's in the trash,
502 en = ip_vs_lblc_new(iph->daddr, dest); 477 * we must ignore it, as it may be removed from under our feet,
503 if (en == NULL) { 478 * if someone drops our reference count. Our caller only makes
504 return NULL; 479 * sure that destinations, that are not in the trash, are not
505 } 480 * moved to the trash, while we are scheduling. But anyone can
506 ip_vs_lblc_hash(tbl, en); 481 * free up entries from the trash at any time.
507 } else { 482 */
508 dest = en->dest; 483
509 if (!(dest->flags & IP_VS_DEST_F_AVAILABLE) 484 if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
510 || atomic_read(&dest->weight) <= 0 485 dest = en->dest;
511 || is_overloaded(dest, svc)) { 486 }
512 dest = __ip_vs_wlc_schedule(svc, iph); 487 read_unlock(&svc->sched_lock);
513 if (dest == NULL) { 488
514 IP_VS_DBG(1, "no destination available\n"); 489 /* If the destination has a weight and is not overloaded, use it */
515 return NULL; 490 if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
516 } 491 goto out;
517 atomic_dec(&en->dest->refcnt); 492
518 atomic_inc(&dest->refcnt); 493 /* No cache entry or it is invalid, time to schedule */
519 en->dest = dest; 494 dest = __ip_vs_lblc_schedule(svc, iph);
520 } 495 if (!dest) {
496 IP_VS_DBG(1, "no destination available\n");
497 return NULL;
521 } 498 }
522 en->lastuse = jiffies;
523 499
500 /* If we fail to create a cache entry, we'll just use the valid dest */
501 write_lock(&svc->sched_lock);
502 ip_vs_lblc_new(tbl, iph->daddr, dest);
503 write_unlock(&svc->sched_lock);
504
505out:
524 IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u " 506 IP_VS_DBG(6, "LBLC: destination IP address %u.%u.%u.%u "
525 "--> server %u.%u.%u.%u:%d\n", 507 "--> server %u.%u.%u.%u:%d\n",
526 NIPQUAD(en->addr), 508 NIPQUAD(iph->daddr),
527 NIPQUAD(dest->addr), 509 NIPQUAD(dest->addr.ip),
528 ntohs(dest->port)); 510 ntohs(dest->port));
529 511
530 return dest; 512 return dest;
@@ -540,9 +522,11 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
540 .refcnt = ATOMIC_INIT(0), 522 .refcnt = ATOMIC_INIT(0),
541 .module = THIS_MODULE, 523 .module = THIS_MODULE,
542 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list), 524 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
525#ifdef CONFIG_IP_VS_IPV6
526 .supports_ipv6 = 0,
527#endif
543 .init_service = ip_vs_lblc_init_svc, 528 .init_service = ip_vs_lblc_init_svc,
544 .done_service = ip_vs_lblc_done_svc, 529 .done_service = ip_vs_lblc_done_svc,
545 .update_service = ip_vs_lblc_update_svc,
546 .schedule = ip_vs_lblc_schedule, 530 .schedule = ip_vs_lblc_schedule,
547}; 531};
548 532
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index c234e73968a6..1f75ea83bcf8 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -106,7 +106,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
106 return NULL; 106 return NULL;
107 } 107 }
108 108
109 e = kmalloc(sizeof(struct ip_vs_dest_list), GFP_ATOMIC); 109 e = kmalloc(sizeof(*e), GFP_ATOMIC);
110 if (e == NULL) { 110 if (e == NULL) {
111 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n"); 111 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
112 return NULL; 112 return NULL;
@@ -116,11 +116,9 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
116 e->dest = dest; 116 e->dest = dest;
117 117
118 /* link it to the list */ 118 /* link it to the list */
119 write_lock(&set->lock);
120 e->next = set->list; 119 e->next = set->list;
121 set->list = e; 120 set->list = e;
122 atomic_inc(&set->size); 121 atomic_inc(&set->size);
123 write_unlock(&set->lock);
124 122
125 set->lastmod = jiffies; 123 set->lastmod = jiffies;
126 return e; 124 return e;
@@ -131,7 +129,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
131{ 129{
132 struct ip_vs_dest_list *e, **ep; 130 struct ip_vs_dest_list *e, **ep;
133 131
134 write_lock(&set->lock);
135 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) { 132 for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
136 if (e->dest == dest) { 133 if (e->dest == dest) {
137 /* HIT */ 134 /* HIT */
@@ -144,7 +141,6 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
144 } 141 }
145 ep = &e->next; 142 ep = &e->next;
146 } 143 }
147 write_unlock(&set->lock);
148} 144}
149 145
150static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set) 146static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
@@ -174,7 +170,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
174 if (set == NULL) 170 if (set == NULL)
175 return NULL; 171 return NULL;
176 172
177 read_lock(&set->lock);
178 /* select the first destination server, whose weight > 0 */ 173 /* select the first destination server, whose weight > 0 */
179 for (e=set->list; e!=NULL; e=e->next) { 174 for (e=set->list; e!=NULL; e=e->next) {
180 least = e->dest; 175 least = e->dest;
@@ -188,7 +183,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
188 goto nextstage; 183 goto nextstage;
189 } 184 }
190 } 185 }
191 read_unlock(&set->lock);
192 return NULL; 186 return NULL;
193 187
194 /* find the destination with the weighted least load */ 188 /* find the destination with the weighted least load */
@@ -207,11 +201,10 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
207 loh = doh; 201 loh = doh;
208 } 202 }
209 } 203 }
210 read_unlock(&set->lock);
211 204
212 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d " 205 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
213 "activeconns %d refcnt %d weight %d overhead %d\n", 206 "activeconns %d refcnt %d weight %d overhead %d\n",
214 NIPQUAD(least->addr), ntohs(least->port), 207 NIPQUAD(least->addr.ip), ntohs(least->port),
215 atomic_read(&least->activeconns), 208 atomic_read(&least->activeconns),
216 atomic_read(&least->refcnt), 209 atomic_read(&least->refcnt),
217 atomic_read(&least->weight), loh); 210 atomic_read(&least->weight), loh);
@@ -229,7 +222,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
229 if (set == NULL) 222 if (set == NULL)
230 return NULL; 223 return NULL;
231 224
232 read_lock(&set->lock);
233 /* select the first destination server, whose weight > 0 */ 225 /* select the first destination server, whose weight > 0 */
234 for (e=set->list; e!=NULL; e=e->next) { 226 for (e=set->list; e!=NULL; e=e->next) {
235 most = e->dest; 227 most = e->dest;
@@ -239,7 +231,6 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
239 goto nextstage; 231 goto nextstage;
240 } 232 }
241 } 233 }
242 read_unlock(&set->lock);
243 return NULL; 234 return NULL;
244 235
245 /* find the destination with the weighted most load */ 236 /* find the destination with the weighted most load */
@@ -256,11 +247,10 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
256 moh = doh; 247 moh = doh;
257 } 248 }
258 } 249 }
259 read_unlock(&set->lock);
260 250
261 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d " 251 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
262 "activeconns %d refcnt %d weight %d overhead %d\n", 252 "activeconns %d refcnt %d weight %d overhead %d\n",
263 NIPQUAD(most->addr), ntohs(most->port), 253 NIPQUAD(most->addr.ip), ntohs(most->port),
264 atomic_read(&most->activeconns), 254 atomic_read(&most->activeconns),
265 atomic_read(&most->refcnt), 255 atomic_read(&most->refcnt),
266 atomic_read(&most->weight), moh); 256 atomic_read(&most->weight), moh);
@@ -284,7 +274,6 @@ struct ip_vs_lblcr_entry {
284 * IPVS lblcr hash table 274 * IPVS lblcr hash table
285 */ 275 */
286struct ip_vs_lblcr_table { 276struct ip_vs_lblcr_table {
287 rwlock_t lock; /* lock for this table */
288 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ 277 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
289 atomic_t entries; /* number of entries */ 278 atomic_t entries; /* number of entries */
290 int max_size; /* maximum size of entries */ 279 int max_size; /* maximum size of entries */
@@ -311,32 +300,6 @@ static ctl_table vs_vars_table[] = {
311 300
312static struct ctl_table_header * sysctl_header; 301static struct ctl_table_header * sysctl_header;
313 302
314/*
315 * new/free a ip_vs_lblcr_entry, which is a mapping of a destination
316 * IP address to a server.
317 */
318static inline struct ip_vs_lblcr_entry *ip_vs_lblcr_new(__be32 daddr)
319{
320 struct ip_vs_lblcr_entry *en;
321
322 en = kmalloc(sizeof(struct ip_vs_lblcr_entry), GFP_ATOMIC);
323 if (en == NULL) {
324 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
325 return NULL;
326 }
327
328 INIT_LIST_HEAD(&en->list);
329 en->addr = daddr;
330
331 /* initilize its dest set */
332 atomic_set(&(en->set.size), 0);
333 en->set.list = NULL;
334 rwlock_init(&en->set.lock);
335
336 return en;
337}
338
339
340static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) 303static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
341{ 304{
342 list_del(&en->list); 305 list_del(&en->list);
@@ -358,55 +321,68 @@ static inline unsigned ip_vs_lblcr_hashkey(__be32 addr)
358 * Hash an entry in the ip_vs_lblcr_table. 321 * Hash an entry in the ip_vs_lblcr_table.
359 * returns bool success. 322 * returns bool success.
360 */ 323 */
361static int 324static void
362ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) 325ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
363{ 326{
364 unsigned hash; 327 unsigned hash = ip_vs_lblcr_hashkey(en->addr);
365 328
366 if (!list_empty(&en->list)) {
367 IP_VS_ERR("ip_vs_lblcr_hash(): request for already hashed, "
368 "called from %p\n", __builtin_return_address(0));
369 return 0;
370 }
371
372 /*
373 * Hash by destination IP address
374 */
375 hash = ip_vs_lblcr_hashkey(en->addr);
376
377 write_lock(&tbl->lock);
378 list_add(&en->list, &tbl->bucket[hash]); 329 list_add(&en->list, &tbl->bucket[hash]);
379 atomic_inc(&tbl->entries); 330 atomic_inc(&tbl->entries);
380 write_unlock(&tbl->lock);
381
382 return 1;
383} 331}
384 332
385 333
386/* 334/*
387 * Get ip_vs_lblcr_entry associated with supplied parameters. 335 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
336 * read lock.
388 */ 337 */
389static inline struct ip_vs_lblcr_entry * 338static inline struct ip_vs_lblcr_entry *
390ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr) 339ip_vs_lblcr_get(struct ip_vs_lblcr_table *tbl, __be32 addr)
391{ 340{
392 unsigned hash; 341 unsigned hash = ip_vs_lblcr_hashkey(addr);
393 struct ip_vs_lblcr_entry *en; 342 struct ip_vs_lblcr_entry *en;
394 343
395 hash = ip_vs_lblcr_hashkey(addr); 344 list_for_each_entry(en, &tbl->bucket[hash], list)
345 if (en->addr == addr)
346 return en;
396 347
397 read_lock(&tbl->lock); 348 return NULL;
349}
398 350
399 list_for_each_entry(en, &tbl->bucket[hash], list) { 351
400 if (en->addr == addr) { 352/*
401 /* HIT */ 353 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
402 read_unlock(&tbl->lock); 354 * IP address to a server. Called under write lock.
403 return en; 355 */
356static inline struct ip_vs_lblcr_entry *
357ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, __be32 daddr,
358 struct ip_vs_dest *dest)
359{
360 struct ip_vs_lblcr_entry *en;
361
362 en = ip_vs_lblcr_get(tbl, daddr);
363 if (!en) {
364 en = kmalloc(sizeof(*en), GFP_ATOMIC);
365 if (!en) {
366 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
367 return NULL;
404 } 368 }
369
370 en->addr = daddr;
371 en->lastuse = jiffies;
372
373 /* initilize its dest set */
374 atomic_set(&(en->set.size), 0);
375 en->set.list = NULL;
376 rwlock_init(&en->set.lock);
377
378 ip_vs_lblcr_hash(tbl, en);
405 } 379 }
406 380
407 read_unlock(&tbl->lock); 381 write_lock(&en->set.lock);
382 ip_vs_dest_set_insert(&en->set, dest);
383 write_unlock(&en->set.lock);
408 384
409 return NULL; 385 return en;
410} 386}
411 387
412 388
@@ -418,19 +394,18 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
418 int i; 394 int i;
419 struct ip_vs_lblcr_entry *en, *nxt; 395 struct ip_vs_lblcr_entry *en, *nxt;
420 396
397 /* No locking required, only called during cleanup. */
421 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 398 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
422 write_lock(&tbl->lock);
423 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { 399 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
424 ip_vs_lblcr_free(en); 400 ip_vs_lblcr_free(en);
425 atomic_dec(&tbl->entries);
426 } 401 }
427 write_unlock(&tbl->lock);
428 } 402 }
429} 403}
430 404
431 405
432static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl) 406static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
433{ 407{
408 struct ip_vs_lblcr_table *tbl = svc->sched_data;
434 unsigned long now = jiffies; 409 unsigned long now = jiffies;
435 int i, j; 410 int i, j;
436 struct ip_vs_lblcr_entry *en, *nxt; 411 struct ip_vs_lblcr_entry *en, *nxt;
@@ -438,7 +413,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
438 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 413 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
439 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 414 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
440 415
441 write_lock(&tbl->lock); 416 write_lock(&svc->sched_lock);
442 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 417 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
443 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration, 418 if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
444 now)) 419 now))
@@ -447,7 +422,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
447 ip_vs_lblcr_free(en); 422 ip_vs_lblcr_free(en);
448 atomic_dec(&tbl->entries); 423 atomic_dec(&tbl->entries);
449 } 424 }
450 write_unlock(&tbl->lock); 425 write_unlock(&svc->sched_lock);
451 } 426 }
452 tbl->rover = j; 427 tbl->rover = j;
453} 428}
@@ -466,17 +441,16 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_lblcr_table *tbl)
466 */ 441 */
467static void ip_vs_lblcr_check_expire(unsigned long data) 442static void ip_vs_lblcr_check_expire(unsigned long data)
468{ 443{
469 struct ip_vs_lblcr_table *tbl; 444 struct ip_vs_service *svc = (struct ip_vs_service *) data;
445 struct ip_vs_lblcr_table *tbl = svc->sched_data;
470 unsigned long now = jiffies; 446 unsigned long now = jiffies;
471 int goal; 447 int goal;
472 int i, j; 448 int i, j;
473 struct ip_vs_lblcr_entry *en, *nxt; 449 struct ip_vs_lblcr_entry *en, *nxt;
474 450
475 tbl = (struct ip_vs_lblcr_table *)data;
476
477 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 451 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
478 /* do full expiration check */ 452 /* do full expiration check */
479 ip_vs_lblcr_full_check(tbl); 453 ip_vs_lblcr_full_check(svc);
480 tbl->counter = 1; 454 tbl->counter = 1;
481 goto out; 455 goto out;
482 } 456 }
@@ -493,7 +467,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
493 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 467 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
494 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 468 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
495 469
496 write_lock(&tbl->lock); 470 write_lock(&svc->sched_lock);
497 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 471 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
498 if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) 472 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
499 continue; 473 continue;
@@ -502,7 +476,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
502 atomic_dec(&tbl->entries); 476 atomic_dec(&tbl->entries);
503 goal--; 477 goal--;
504 } 478 }
505 write_unlock(&tbl->lock); 479 write_unlock(&svc->sched_lock);
506 if (goal <= 0) 480 if (goal <= 0)
507 break; 481 break;
508 } 482 }
@@ -520,15 +494,14 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
520 /* 494 /*
521 * Allocate the ip_vs_lblcr_table for this service 495 * Allocate the ip_vs_lblcr_table for this service
522 */ 496 */
523 tbl = kmalloc(sizeof(struct ip_vs_lblcr_table), GFP_ATOMIC); 497 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
524 if (tbl == NULL) { 498 if (tbl == NULL) {
525 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n"); 499 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
526 return -ENOMEM; 500 return -ENOMEM;
527 } 501 }
528 svc->sched_data = tbl; 502 svc->sched_data = tbl;
529 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for " 503 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
530 "current service\n", 504 "current service\n", sizeof(*tbl));
531 sizeof(struct ip_vs_lblcr_table));
532 505
533 /* 506 /*
534 * Initialize the hash buckets 507 * Initialize the hash buckets
@@ -536,7 +509,6 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
536 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 509 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
537 INIT_LIST_HEAD(&tbl->bucket[i]); 510 INIT_LIST_HEAD(&tbl->bucket[i]);
538 } 511 }
539 rwlock_init(&tbl->lock);
540 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; 512 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
541 tbl->rover = 0; 513 tbl->rover = 0;
542 tbl->counter = 1; 514 tbl->counter = 1;
@@ -545,9 +517,8 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
545 * Hook periodic timer for garbage collection 517 * Hook periodic timer for garbage collection
546 */ 518 */
547 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire, 519 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
548 (unsigned long)tbl); 520 (unsigned long)svc);
549 tbl->periodic_timer.expires = jiffies+CHECK_EXPIRE_INTERVAL; 521 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
550 add_timer(&tbl->periodic_timer);
551 522
552 return 0; 523 return 0;
553} 524}
@@ -564,22 +535,16 @@ static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
564 ip_vs_lblcr_flush(tbl); 535 ip_vs_lblcr_flush(tbl);
565 536
566 /* release the table itself */ 537 /* release the table itself */
567 kfree(svc->sched_data); 538 kfree(tbl);
568 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", 539 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
569 sizeof(struct ip_vs_lblcr_table)); 540 sizeof(*tbl));
570 541
571 return 0; 542 return 0;
572} 543}
573 544
574 545
575static int ip_vs_lblcr_update_svc(struct ip_vs_service *svc)
576{
577 return 0;
578}
579
580
581static inline struct ip_vs_dest * 546static inline struct ip_vs_dest *
582__ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph) 547__ip_vs_lblcr_schedule(struct ip_vs_service *svc, struct iphdr *iph)
583{ 548{
584 struct ip_vs_dest *dest, *least; 549 struct ip_vs_dest *dest, *least;
585 int loh, doh; 550 int loh, doh;
@@ -633,7 +598,7 @@ __ip_vs_wlc_schedule(struct ip_vs_service *svc, struct iphdr *iph)
633 598
634 IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d " 599 IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d "
635 "activeconns %d refcnt %d weight %d overhead %d\n", 600 "activeconns %d refcnt %d weight %d overhead %d\n",
636 NIPQUAD(least->addr), ntohs(least->port), 601 NIPQUAD(least->addr.ip), ntohs(least->port),
637 atomic_read(&least->activeconns), 602 atomic_read(&least->activeconns),
638 atomic_read(&least->refcnt), 603 atomic_read(&least->refcnt),
639 atomic_read(&least->weight), loh); 604 atomic_read(&least->weight), loh);
@@ -669,51 +634,79 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
669static struct ip_vs_dest * 634static struct ip_vs_dest *
670ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 635ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
671{ 636{
672 struct ip_vs_dest *dest; 637 struct ip_vs_lblcr_table *tbl = svc->sched_data;
673 struct ip_vs_lblcr_table *tbl;
674 struct ip_vs_lblcr_entry *en;
675 struct iphdr *iph = ip_hdr(skb); 638 struct iphdr *iph = ip_hdr(skb);
639 struct ip_vs_dest *dest = NULL;
640 struct ip_vs_lblcr_entry *en;
676 641
677 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n"); 642 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
678 643
679 tbl = (struct ip_vs_lblcr_table *)svc->sched_data; 644 /* First look in our cache */
645 read_lock(&svc->sched_lock);
680 en = ip_vs_lblcr_get(tbl, iph->daddr); 646 en = ip_vs_lblcr_get(tbl, iph->daddr);
681 if (en == NULL) { 647 if (en) {
682 dest = __ip_vs_wlc_schedule(svc, iph); 648 /* We only hold a read lock, but this is atomic */
683 if (dest == NULL) { 649 en->lastuse = jiffies;
684 IP_VS_DBG(1, "no destination available\n"); 650
685 return NULL; 651 /* Get the least loaded destination */
686 } 652 read_lock(&en->set.lock);
687 en = ip_vs_lblcr_new(iph->daddr);
688 if (en == NULL) {
689 return NULL;
690 }
691 ip_vs_dest_set_insert(&en->set, dest);
692 ip_vs_lblcr_hash(tbl, en);
693 } else {
694 dest = ip_vs_dest_set_min(&en->set); 653 dest = ip_vs_dest_set_min(&en->set);
695 if (!dest || is_overloaded(dest, svc)) { 654 read_unlock(&en->set.lock);
696 dest = __ip_vs_wlc_schedule(svc, iph); 655
697 if (dest == NULL) { 656 /* More than one destination + enough time passed by, cleanup */
698 IP_VS_DBG(1, "no destination available\n");
699 return NULL;
700 }
701 ip_vs_dest_set_insert(&en->set, dest);
702 }
703 if (atomic_read(&en->set.size) > 1 && 657 if (atomic_read(&en->set.size) > 1 &&
704 jiffies-en->set.lastmod > sysctl_ip_vs_lblcr_expiration) { 658 time_after(jiffies, en->set.lastmod +
659 sysctl_ip_vs_lblcr_expiration)) {
705 struct ip_vs_dest *m; 660 struct ip_vs_dest *m;
661
662 write_lock(&en->set.lock);
706 m = ip_vs_dest_set_max(&en->set); 663 m = ip_vs_dest_set_max(&en->set);
707 if (m) 664 if (m)
708 ip_vs_dest_set_erase(&en->set, m); 665 ip_vs_dest_set_erase(&en->set, m);
666 write_unlock(&en->set.lock);
709 } 667 }
668
669 /* If the destination is not overloaded, use it */
670 if (dest && !is_overloaded(dest, svc)) {
671 read_unlock(&svc->sched_lock);
672 goto out;
673 }
674
675 /* The cache entry is invalid, time to schedule */
676 dest = __ip_vs_lblcr_schedule(svc, iph);
677 if (!dest) {
678 IP_VS_DBG(1, "no destination available\n");
679 read_unlock(&svc->sched_lock);
680 return NULL;
681 }
682
683 /* Update our cache entry */
684 write_lock(&en->set.lock);
685 ip_vs_dest_set_insert(&en->set, dest);
686 write_unlock(&en->set.lock);
687 }
688 read_unlock(&svc->sched_lock);
689
690 if (dest)
691 goto out;
692
693 /* No cache entry, time to schedule */
694 dest = __ip_vs_lblcr_schedule(svc, iph);
695 if (!dest) {
696 IP_VS_DBG(1, "no destination available\n");
697 return NULL;
710 } 698 }
711 en->lastuse = jiffies;
712 699
700 /* If we fail to create a cache entry, we'll just use the valid dest */
701 write_lock(&svc->sched_lock);
702 ip_vs_lblcr_new(tbl, iph->daddr, dest);
703 write_unlock(&svc->sched_lock);
704
705out:
713 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u " 706 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
714 "--> server %u.%u.%u.%u:%d\n", 707 "--> server %u.%u.%u.%u:%d\n",
715 NIPQUAD(en->addr), 708 NIPQUAD(iph->daddr),
716 NIPQUAD(dest->addr), 709 NIPQUAD(dest->addr.ip),
717 ntohs(dest->port)); 710 ntohs(dest->port));
718 711
719 return dest; 712 return dest;
@@ -729,9 +722,11 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
729 .refcnt = ATOMIC_INIT(0), 722 .refcnt = ATOMIC_INIT(0),
730 .module = THIS_MODULE, 723 .module = THIS_MODULE,
731 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list), 724 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
725#ifdef CONFIG_IP_VS_IPV6
726 .supports_ipv6 = 0,
727#endif
732 .init_service = ip_vs_lblcr_init_svc, 728 .init_service = ip_vs_lblcr_init_svc,
733 .done_service = ip_vs_lblcr_done_svc, 729 .done_service = ip_vs_lblcr_done_svc,
734 .update_service = ip_vs_lblcr_update_svc,
735 .schedule = ip_vs_lblcr_schedule, 730 .schedule = ip_vs_lblcr_schedule,
736}; 731};
737 732
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index ebcdbf75ac65..b69f808ac461 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -20,24 +20,6 @@
20#include <net/ip_vs.h> 20#include <net/ip_vs.h>
21 21
22 22
23static int ip_vs_lc_init_svc(struct ip_vs_service *svc)
24{
25 return 0;
26}
27
28
29static int ip_vs_lc_done_svc(struct ip_vs_service *svc)
30{
31 return 0;
32}
33
34
35static int ip_vs_lc_update_svc(struct ip_vs_service *svc)
36{
37 return 0;
38}
39
40
41static inline unsigned int 23static inline unsigned int
42ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) 24ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
43{ 25{
@@ -85,10 +67,10 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
85 } 67 }
86 68
87 if (least) 69 if (least)
88 IP_VS_DBG(6, "LC: server %u.%u.%u.%u:%u activeconns %d inactconns %d\n", 70 IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n",
89 NIPQUAD(least->addr), ntohs(least->port), 71 IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
90 atomic_read(&least->activeconns), 72 atomic_read(&least->activeconns),
91 atomic_read(&least->inactconns)); 73 atomic_read(&least->inactconns));
92 74
93 return least; 75 return least;
94} 76}
@@ -99,9 +81,9 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
99 .refcnt = ATOMIC_INIT(0), 81 .refcnt = ATOMIC_INIT(0),
100 .module = THIS_MODULE, 82 .module = THIS_MODULE,
101 .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list), 83 .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
102 .init_service = ip_vs_lc_init_svc, 84#ifdef CONFIG_IP_VS_IPV6
103 .done_service = ip_vs_lc_done_svc, 85 .supports_ipv6 = 1,
104 .update_service = ip_vs_lc_update_svc, 86#endif
105 .schedule = ip_vs_lc_schedule, 87 .schedule = ip_vs_lc_schedule,
106}; 88};
107 89
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index 92f3a6770031..9a2d8033f08f 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -37,27 +37,6 @@
37#include <net/ip_vs.h> 37#include <net/ip_vs.h>
38 38
39 39
40static int
41ip_vs_nq_init_svc(struct ip_vs_service *svc)
42{
43 return 0;
44}
45
46
47static int
48ip_vs_nq_done_svc(struct ip_vs_service *svc)
49{
50 return 0;
51}
52
53
54static int
55ip_vs_nq_update_svc(struct ip_vs_service *svc)
56{
57 return 0;
58}
59
60
61static inline unsigned int 40static inline unsigned int
62ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) 41ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
63{ 42{
@@ -120,12 +99,12 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
120 return NULL; 99 return NULL;
121 100
122 out: 101 out:
123 IP_VS_DBG(6, "NQ: server %u.%u.%u.%u:%u " 102 IP_VS_DBG_BUF(6, "NQ: server %s:%u "
124 "activeconns %d refcnt %d weight %d overhead %d\n", 103 "activeconns %d refcnt %d weight %d overhead %d\n",
125 NIPQUAD(least->addr), ntohs(least->port), 104 IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
126 atomic_read(&least->activeconns), 105 atomic_read(&least->activeconns),
127 atomic_read(&least->refcnt), 106 atomic_read(&least->refcnt),
128 atomic_read(&least->weight), loh); 107 atomic_read(&least->weight), loh);
129 108
130 return least; 109 return least;
131} 110}
@@ -137,9 +116,9 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
137 .refcnt = ATOMIC_INIT(0), 116 .refcnt = ATOMIC_INIT(0),
138 .module = THIS_MODULE, 117 .module = THIS_MODULE,
139 .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list), 118 .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
140 .init_service = ip_vs_nq_init_svc, 119#ifdef CONFIG_IP_VS_IPV6
141 .done_service = ip_vs_nq_done_svc, 120 .supports_ipv6 = 1,
142 .update_service = ip_vs_nq_update_svc, 121#endif
143 .schedule = ip_vs_nq_schedule, 122 .schedule = ip_vs_nq_schedule,
144}; 123};
145 124
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 6099a88fc200..0791f9e08feb 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -151,11 +151,11 @@ const char * ip_vs_state_name(__u16 proto, int state)
151} 151}
152 152
153 153
154void 154static void
155ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, 155ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp,
156 const struct sk_buff *skb, 156 const struct sk_buff *skb,
157 int offset, 157 int offset,
158 const char *msg) 158 const char *msg)
159{ 159{
160 char buf[128]; 160 char buf[128];
161 struct iphdr _iph, *ih; 161 struct iphdr _iph, *ih;
@@ -189,6 +189,61 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
189 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); 189 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
190} 190}
191 191
192#ifdef CONFIG_IP_VS_IPV6
193static void
194ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp,
195 const struct sk_buff *skb,
196 int offset,
197 const char *msg)
198{
199 char buf[192];
200 struct ipv6hdr _iph, *ih;
201
202 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
203 if (ih == NULL)
204 sprintf(buf, "%s TRUNCATED", pp->name);
205 else if (ih->nexthdr == IPPROTO_FRAGMENT)
206 sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT " frag",
207 pp->name, NIP6(ih->saddr),
208 NIP6(ih->daddr));
209 else {
210 __be16 _ports[2], *pptr;
211
212 pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr),
213 sizeof(_ports), _ports);
214 if (pptr == NULL)
215 sprintf(buf, "%s TRUNCATED " NIP6_FMT "->" NIP6_FMT,
216 pp->name,
217 NIP6(ih->saddr),
218 NIP6(ih->daddr));
219 else
220 sprintf(buf, "%s " NIP6_FMT ":%u->" NIP6_FMT ":%u",
221 pp->name,
222 NIP6(ih->saddr),
223 ntohs(pptr[0]),
224 NIP6(ih->daddr),
225 ntohs(pptr[1]));
226 }
227
228 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
229}
230#endif
231
232
233void
234ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
235 const struct sk_buff *skb,
236 int offset,
237 const char *msg)
238{
239#ifdef CONFIG_IP_VS_IPV6
240 if (skb->protocol == htons(ETH_P_IPV6))
241 ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg);
242 else
243#endif
244 ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
245}
246
192 247
193int __init ip_vs_protocol_init(void) 248int __init ip_vs_protocol_init(void)
194{ 249{
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c
deleted file mode 100644
index 73e0ea87c1f5..000000000000
--- a/net/ipv4/ipvs/ip_vs_proto_ah.c
+++ /dev/null
@@ -1,178 +0,0 @@
1/*
2 * ip_vs_proto_ah.c: AH IPSec load balancing support for IPVS
3 *
4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
5 * Wensong Zhang <wensong@linuxvirtualserver.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation;
10 *
11 */
12
13#include <linux/in.h>
14#include <linux/ip.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netfilter.h>
18#include <linux/netfilter_ipv4.h>
19
20#include <net/ip_vs.h>
21
22
23/* TODO:
24
25struct isakmp_hdr {
26 __u8 icookie[8];
27 __u8 rcookie[8];
28 __u8 np;
29 __u8 version;
30 __u8 xchgtype;
31 __u8 flags;
32 __u32 msgid;
33 __u32 length;
34};
35
36*/
37
38#define PORT_ISAKMP 500
39
40
41static struct ip_vs_conn *
42ah_conn_in_get(const struct sk_buff *skb,
43 struct ip_vs_protocol *pp,
44 const struct iphdr *iph,
45 unsigned int proto_off,
46 int inverse)
47{
48 struct ip_vs_conn *cp;
49
50 if (likely(!inverse)) {
51 cp = ip_vs_conn_in_get(IPPROTO_UDP,
52 iph->saddr,
53 htons(PORT_ISAKMP),
54 iph->daddr,
55 htons(PORT_ISAKMP));
56 } else {
57 cp = ip_vs_conn_in_get(IPPROTO_UDP,
58 iph->daddr,
59 htons(PORT_ISAKMP),
60 iph->saddr,
61 htons(PORT_ISAKMP));
62 }
63
64 if (!cp) {
65 /*
66 * We are not sure if the packet is from our
67 * service, so our conn_schedule hook should return NF_ACCEPT
68 */
69 IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet "
70 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
71 inverse ? "ICMP+" : "",
72 pp->name,
73 NIPQUAD(iph->saddr),
74 NIPQUAD(iph->daddr));
75 }
76
77 return cp;
78}
79
80
81static struct ip_vs_conn *
82ah_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
83 const struct iphdr *iph, unsigned int proto_off, int inverse)
84{
85 struct ip_vs_conn *cp;
86
87 if (likely(!inverse)) {
88 cp = ip_vs_conn_out_get(IPPROTO_UDP,
89 iph->saddr,
90 htons(PORT_ISAKMP),
91 iph->daddr,
92 htons(PORT_ISAKMP));
93 } else {
94 cp = ip_vs_conn_out_get(IPPROTO_UDP,
95 iph->daddr,
96 htons(PORT_ISAKMP),
97 iph->saddr,
98 htons(PORT_ISAKMP));
99 }
100
101 if (!cp) {
102 IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet "
103 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
104 inverse ? "ICMP+" : "",
105 pp->name,
106 NIPQUAD(iph->saddr),
107 NIPQUAD(iph->daddr));
108 }
109
110 return cp;
111}
112
113
114static int
115ah_conn_schedule(struct sk_buff *skb,
116 struct ip_vs_protocol *pp,
117 int *verdict, struct ip_vs_conn **cpp)
118{
119 /*
120 * AH is only related traffic. Pass the packet to IP stack.
121 */
122 *verdict = NF_ACCEPT;
123 return 0;
124}
125
126
127static void
128ah_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
129 int offset, const char *msg)
130{
131 char buf[256];
132 struct iphdr _iph, *ih;
133
134 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
135 if (ih == NULL)
136 sprintf(buf, "%s TRUNCATED", pp->name);
137 else
138 sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
139 pp->name, NIPQUAD(ih->saddr),
140 NIPQUAD(ih->daddr));
141
142 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
143}
144
145
146static void ah_init(struct ip_vs_protocol *pp)
147{
148 /* nothing to do now */
149}
150
151
152static void ah_exit(struct ip_vs_protocol *pp)
153{
154 /* nothing to do now */
155}
156
157
158struct ip_vs_protocol ip_vs_protocol_ah = {
159 .name = "AH",
160 .protocol = IPPROTO_AH,
161 .num_states = 1,
162 .dont_defrag = 1,
163 .init = ah_init,
164 .exit = ah_exit,
165 .conn_schedule = ah_conn_schedule,
166 .conn_in_get = ah_conn_in_get,
167 .conn_out_get = ah_conn_out_get,
168 .snat_handler = NULL,
169 .dnat_handler = NULL,
170 .csum_check = NULL,
171 .state_transition = NULL,
172 .register_app = NULL,
173 .unregister_app = NULL,
174 .app_conn_bind = NULL,
175 .debug_packet = ah_debug_packet,
176 .timeout_change = NULL, /* ISAKMP */
177 .set_state_timeout = NULL,
178};
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah_esp.c b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c
new file mode 100644
index 000000000000..80ab0c8e5b4a
--- /dev/null
+++ b/net/ipv4/ipvs/ip_vs_proto_ah_esp.c
@@ -0,0 +1,235 @@
1/*
2 * ip_vs_proto_ah_esp.c: AH/ESP IPSec load balancing support for IPVS
3 *
4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
5 * Wensong Zhang <wensong@linuxvirtualserver.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation;
10 *
11 */
12
13#include <linux/in.h>
14#include <linux/ip.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netfilter.h>
18#include <linux/netfilter_ipv4.h>
19
20#include <net/ip_vs.h>
21
22
23/* TODO:
24
25struct isakmp_hdr {
26 __u8 icookie[8];
27 __u8 rcookie[8];
28 __u8 np;
29 __u8 version;
30 __u8 xchgtype;
31 __u8 flags;
32 __u32 msgid;
33 __u32 length;
34};
35
36*/
37
38#define PORT_ISAKMP 500
39
40
41static struct ip_vs_conn *
42ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
43 const struct ip_vs_iphdr *iph, unsigned int proto_off,
44 int inverse)
45{
46 struct ip_vs_conn *cp;
47
48 if (likely(!inverse)) {
49 cp = ip_vs_conn_in_get(af, IPPROTO_UDP,
50 &iph->saddr,
51 htons(PORT_ISAKMP),
52 &iph->daddr,
53 htons(PORT_ISAKMP));
54 } else {
55 cp = ip_vs_conn_in_get(af, IPPROTO_UDP,
56 &iph->daddr,
57 htons(PORT_ISAKMP),
58 &iph->saddr,
59 htons(PORT_ISAKMP));
60 }
61
62 if (!cp) {
63 /*
64 * We are not sure if the packet is from our
65 * service, so our conn_schedule hook should return NF_ACCEPT
66 */
67 IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
68 "%s%s %s->%s\n",
69 inverse ? "ICMP+" : "",
70 pp->name,
71 IP_VS_DBG_ADDR(af, &iph->saddr),
72 IP_VS_DBG_ADDR(af, &iph->daddr));
73 }
74
75 return cp;
76}
77
78
79static struct ip_vs_conn *
80ah_esp_conn_out_get(int af, const struct sk_buff *skb,
81 struct ip_vs_protocol *pp,
82 const struct ip_vs_iphdr *iph,
83 unsigned int proto_off,
84 int inverse)
85{
86 struct ip_vs_conn *cp;
87
88 if (likely(!inverse)) {
89 cp = ip_vs_conn_out_get(af, IPPROTO_UDP,
90 &iph->saddr,
91 htons(PORT_ISAKMP),
92 &iph->daddr,
93 htons(PORT_ISAKMP));
94 } else {
95 cp = ip_vs_conn_out_get(af, IPPROTO_UDP,
96 &iph->daddr,
97 htons(PORT_ISAKMP),
98 &iph->saddr,
99 htons(PORT_ISAKMP));
100 }
101
102 if (!cp) {
103 IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
104 "%s%s %s->%s\n",
105 inverse ? "ICMP+" : "",
106 pp->name,
107 IP_VS_DBG_ADDR(af, &iph->saddr),
108 IP_VS_DBG_ADDR(af, &iph->daddr));
109 }
110
111 return cp;
112}
113
114
115static int
116ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
117 int *verdict, struct ip_vs_conn **cpp)
118{
119 /*
120 * AH/ESP is only related traffic. Pass the packet to IP stack.
121 */
122 *verdict = NF_ACCEPT;
123 return 0;
124}
125
126
127static void
128ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb,
129 int offset, const char *msg)
130{
131 char buf[256];
132 struct iphdr _iph, *ih;
133
134 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
135 if (ih == NULL)
136 sprintf(buf, "%s TRUNCATED", pp->name);
137 else
138 sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
139 pp->name, NIPQUAD(ih->saddr),
140 NIPQUAD(ih->daddr));
141
142 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
143}
144
145#ifdef CONFIG_IP_VS_IPV6
146static void
147ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb,
148 int offset, const char *msg)
149{
150 char buf[256];
151 struct ipv6hdr _iph, *ih;
152
153 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
154 if (ih == NULL)
155 sprintf(buf, "%s TRUNCATED", pp->name);
156 else
157 sprintf(buf, "%s " NIP6_FMT "->" NIP6_FMT,
158 pp->name, NIP6(ih->saddr),
159 NIP6(ih->daddr));
160
161 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
162}
163#endif
164
165static void
166ah_esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
167 int offset, const char *msg)
168{
169#ifdef CONFIG_IP_VS_IPV6
170 if (skb->protocol == htons(ETH_P_IPV6))
171 ah_esp_debug_packet_v6(pp, skb, offset, msg);
172 else
173#endif
174 ah_esp_debug_packet_v4(pp, skb, offset, msg);
175}
176
177
178static void ah_esp_init(struct ip_vs_protocol *pp)
179{
180 /* nothing to do now */
181}
182
183
184static void ah_esp_exit(struct ip_vs_protocol *pp)
185{
186 /* nothing to do now */
187}
188
189
190#ifdef CONFIG_IP_VS_PROTO_AH
191struct ip_vs_protocol ip_vs_protocol_ah = {
192 .name = "AH",
193 .protocol = IPPROTO_AH,
194 .num_states = 1,
195 .dont_defrag = 1,
196 .init = ah_esp_init,
197 .exit = ah_esp_exit,
198 .conn_schedule = ah_esp_conn_schedule,
199 .conn_in_get = ah_esp_conn_in_get,
200 .conn_out_get = ah_esp_conn_out_get,
201 .snat_handler = NULL,
202 .dnat_handler = NULL,
203 .csum_check = NULL,
204 .state_transition = NULL,
205 .register_app = NULL,
206 .unregister_app = NULL,
207 .app_conn_bind = NULL,
208 .debug_packet = ah_esp_debug_packet,
209 .timeout_change = NULL, /* ISAKMP */
210 .set_state_timeout = NULL,
211};
212#endif
213
214#ifdef CONFIG_IP_VS_PROTO_ESP
215struct ip_vs_protocol ip_vs_protocol_esp = {
216 .name = "ESP",
217 .protocol = IPPROTO_ESP,
218 .num_states = 1,
219 .dont_defrag = 1,
220 .init = ah_esp_init,
221 .exit = ah_esp_exit,
222 .conn_schedule = ah_esp_conn_schedule,
223 .conn_in_get = ah_esp_conn_in_get,
224 .conn_out_get = ah_esp_conn_out_get,
225 .snat_handler = NULL,
226 .dnat_handler = NULL,
227 .csum_check = NULL,
228 .state_transition = NULL,
229 .register_app = NULL,
230 .unregister_app = NULL,
231 .app_conn_bind = NULL,
232 .debug_packet = ah_esp_debug_packet,
233 .timeout_change = NULL, /* ISAKMP */
234};
235#endif
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c
deleted file mode 100644
index 21d70c8ffa54..000000000000
--- a/net/ipv4/ipvs/ip_vs_proto_esp.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * ip_vs_proto_esp.c: ESP IPSec load balancing support for IPVS
3 *
4 * Authors: Julian Anastasov <ja@ssi.bg>, February 2002
5 * Wensong Zhang <wensong@linuxvirtualserver.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation;
10 *
11 */
12
13#include <linux/in.h>
14#include <linux/ip.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netfilter.h>
18#include <linux/netfilter_ipv4.h>
19
20#include <net/ip_vs.h>
21
22
23/* TODO:
24
25struct isakmp_hdr {
26 __u8 icookie[8];
27 __u8 rcookie[8];
28 __u8 np;
29 __u8 version;
30 __u8 xchgtype;
31 __u8 flags;
32 __u32 msgid;
33 __u32 length;
34};
35
36*/
37
38#define PORT_ISAKMP 500
39
40
41static struct ip_vs_conn *
42esp_conn_in_get(const struct sk_buff *skb,
43 struct ip_vs_protocol *pp,
44 const struct iphdr *iph,
45 unsigned int proto_off,
46 int inverse)
47{
48 struct ip_vs_conn *cp;
49
50 if (likely(!inverse)) {
51 cp = ip_vs_conn_in_get(IPPROTO_UDP,
52 iph->saddr,
53 htons(PORT_ISAKMP),
54 iph->daddr,
55 htons(PORT_ISAKMP));
56 } else {
57 cp = ip_vs_conn_in_get(IPPROTO_UDP,
58 iph->daddr,
59 htons(PORT_ISAKMP),
60 iph->saddr,
61 htons(PORT_ISAKMP));
62 }
63
64 if (!cp) {
65 /*
66 * We are not sure if the packet is from our
67 * service, so our conn_schedule hook should return NF_ACCEPT
68 */
69 IP_VS_DBG(12, "Unknown ISAKMP entry for outin packet "
70 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
71 inverse ? "ICMP+" : "",
72 pp->name,
73 NIPQUAD(iph->saddr),
74 NIPQUAD(iph->daddr));
75 }
76
77 return cp;
78}
79
80
81static struct ip_vs_conn *
82esp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
83 const struct iphdr *iph, unsigned int proto_off, int inverse)
84{
85 struct ip_vs_conn *cp;
86
87 if (likely(!inverse)) {
88 cp = ip_vs_conn_out_get(IPPROTO_UDP,
89 iph->saddr,
90 htons(PORT_ISAKMP),
91 iph->daddr,
92 htons(PORT_ISAKMP));
93 } else {
94 cp = ip_vs_conn_out_get(IPPROTO_UDP,
95 iph->daddr,
96 htons(PORT_ISAKMP),
97 iph->saddr,
98 htons(PORT_ISAKMP));
99 }
100
101 if (!cp) {
102 IP_VS_DBG(12, "Unknown ISAKMP entry for inout packet "
103 "%s%s %u.%u.%u.%u->%u.%u.%u.%u\n",
104 inverse ? "ICMP+" : "",
105 pp->name,
106 NIPQUAD(iph->saddr),
107 NIPQUAD(iph->daddr));
108 }
109
110 return cp;
111}
112
113
114static int
115esp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
116 int *verdict, struct ip_vs_conn **cpp)
117{
118 /*
119 * ESP is only related traffic. Pass the packet to IP stack.
120 */
121 *verdict = NF_ACCEPT;
122 return 0;
123}
124
125
126static void
127esp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb,
128 int offset, const char *msg)
129{
130 char buf[256];
131 struct iphdr _iph, *ih;
132
133 ih = skb_header_pointer(skb, offset, sizeof(_iph), &_iph);
134 if (ih == NULL)
135 sprintf(buf, "%s TRUNCATED", pp->name);
136 else
137 sprintf(buf, "%s %u.%u.%u.%u->%u.%u.%u.%u",
138 pp->name, NIPQUAD(ih->saddr),
139 NIPQUAD(ih->daddr));
140
141 printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
142}
143
144
145static void esp_init(struct ip_vs_protocol *pp)
146{
147 /* nothing to do now */
148}
149
150
151static void esp_exit(struct ip_vs_protocol *pp)
152{
153 /* nothing to do now */
154}
155
156
157struct ip_vs_protocol ip_vs_protocol_esp = {
158 .name = "ESP",
159 .protocol = IPPROTO_ESP,
160 .num_states = 1,
161 .dont_defrag = 1,
162 .init = esp_init,
163 .exit = esp_exit,
164 .conn_schedule = esp_conn_schedule,
165 .conn_in_get = esp_conn_in_get,
166 .conn_out_get = esp_conn_out_get,
167 .snat_handler = NULL,
168 .dnat_handler = NULL,
169 .csum_check = NULL,
170 .state_transition = NULL,
171 .register_app = NULL,
172 .unregister_app = NULL,
173 .app_conn_bind = NULL,
174 .debug_packet = esp_debug_packet,
175 .timeout_change = NULL, /* ISAKMP */
176};
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index d0ea467986a0..dd4566ea2bff 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -18,6 +18,7 @@
18#include <linux/tcp.h> /* for tcphdr */ 18#include <linux/tcp.h> /* for tcphdr */
19#include <net/ip.h> 19#include <net/ip.h>
20#include <net/tcp.h> /* for csum_tcpudp_magic */ 20#include <net/tcp.h> /* for csum_tcpudp_magic */
21#include <net/ip6_checksum.h>
21#include <linux/netfilter.h> 22#include <linux/netfilter.h>
22#include <linux/netfilter_ipv4.h> 23#include <linux/netfilter_ipv4.h>
23 24
@@ -25,8 +26,9 @@
25 26
26 27
27static struct ip_vs_conn * 28static struct ip_vs_conn *
28tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 29tcp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
29 const struct iphdr *iph, unsigned int proto_off, int inverse) 30 const struct ip_vs_iphdr *iph, unsigned int proto_off,
31 int inverse)
30{ 32{
31 __be16 _ports[2], *pptr; 33 __be16 _ports[2], *pptr;
32 34
@@ -35,19 +37,20 @@ tcp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
35 return NULL; 37 return NULL;
36 38
37 if (likely(!inverse)) { 39 if (likely(!inverse)) {
38 return ip_vs_conn_in_get(iph->protocol, 40 return ip_vs_conn_in_get(af, iph->protocol,
39 iph->saddr, pptr[0], 41 &iph->saddr, pptr[0],
40 iph->daddr, pptr[1]); 42 &iph->daddr, pptr[1]);
41 } else { 43 } else {
42 return ip_vs_conn_in_get(iph->protocol, 44 return ip_vs_conn_in_get(af, iph->protocol,
43 iph->daddr, pptr[1], 45 &iph->daddr, pptr[1],
44 iph->saddr, pptr[0]); 46 &iph->saddr, pptr[0]);
45 } 47 }
46} 48}
47 49
48static struct ip_vs_conn * 50static struct ip_vs_conn *
49tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 51tcp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
50 const struct iphdr *iph, unsigned int proto_off, int inverse) 52 const struct ip_vs_iphdr *iph, unsigned int proto_off,
53 int inverse)
51{ 54{
52 __be16 _ports[2], *pptr; 55 __be16 _ports[2], *pptr;
53 56
@@ -56,34 +59,36 @@ tcp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
56 return NULL; 59 return NULL;
57 60
58 if (likely(!inverse)) { 61 if (likely(!inverse)) {
59 return ip_vs_conn_out_get(iph->protocol, 62 return ip_vs_conn_out_get(af, iph->protocol,
60 iph->saddr, pptr[0], 63 &iph->saddr, pptr[0],
61 iph->daddr, pptr[1]); 64 &iph->daddr, pptr[1]);
62 } else { 65 } else {
63 return ip_vs_conn_out_get(iph->protocol, 66 return ip_vs_conn_out_get(af, iph->protocol,
64 iph->daddr, pptr[1], 67 &iph->daddr, pptr[1],
65 iph->saddr, pptr[0]); 68 &iph->saddr, pptr[0]);
66 } 69 }
67} 70}
68 71
69 72
70static int 73static int
71tcp_conn_schedule(struct sk_buff *skb, 74tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
72 struct ip_vs_protocol *pp,
73 int *verdict, struct ip_vs_conn **cpp) 75 int *verdict, struct ip_vs_conn **cpp)
74{ 76{
75 struct ip_vs_service *svc; 77 struct ip_vs_service *svc;
76 struct tcphdr _tcph, *th; 78 struct tcphdr _tcph, *th;
79 struct ip_vs_iphdr iph;
77 80
78 th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); 81 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
82
83 th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph);
79 if (th == NULL) { 84 if (th == NULL) {
80 *verdict = NF_DROP; 85 *verdict = NF_DROP;
81 return 0; 86 return 0;
82 } 87 }
83 88
84 if (th->syn && 89 if (th->syn &&
85 (svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, 90 (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr,
86 ip_hdr(skb)->daddr, th->dest))) { 91 th->dest))) {
87 if (ip_vs_todrop()) { 92 if (ip_vs_todrop()) {
88 /* 93 /*
89 * It seems that we are very loaded. 94 * It seems that we are very loaded.
@@ -110,22 +115,62 @@ tcp_conn_schedule(struct sk_buff *skb,
110 115
111 116
112static inline void 117static inline void
113tcp_fast_csum_update(struct tcphdr *tcph, __be32 oldip, __be32 newip, 118tcp_fast_csum_update(int af, struct tcphdr *tcph,
119 const union nf_inet_addr *oldip,
120 const union nf_inet_addr *newip,
114 __be16 oldport, __be16 newport) 121 __be16 oldport, __be16 newport)
115{ 122{
123#ifdef CONFIG_IP_VS_IPV6
124 if (af == AF_INET6)
125 tcph->check =
126 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
127 ip_vs_check_diff2(oldport, newport,
128 ~csum_unfold(tcph->check))));
129 else
130#endif
116 tcph->check = 131 tcph->check =
117 csum_fold(ip_vs_check_diff4(oldip, newip, 132 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
118 ip_vs_check_diff2(oldport, newport, 133 ip_vs_check_diff2(oldport, newport,
119 ~csum_unfold(tcph->check)))); 134 ~csum_unfold(tcph->check))));
120} 135}
121 136
122 137
138static inline void
139tcp_partial_csum_update(int af, struct tcphdr *tcph,
140 const union nf_inet_addr *oldip,
141 const union nf_inet_addr *newip,
142 __be16 oldlen, __be16 newlen)
143{
144#ifdef CONFIG_IP_VS_IPV6
145 if (af == AF_INET6)
146 tcph->check =
147 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
148 ip_vs_check_diff2(oldlen, newlen,
149 ~csum_unfold(tcph->check))));
150 else
151#endif
152 tcph->check =
153 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
154 ip_vs_check_diff2(oldlen, newlen,
155 ~csum_unfold(tcph->check))));
156}
157
158
123static int 159static int
124tcp_snat_handler(struct sk_buff *skb, 160tcp_snat_handler(struct sk_buff *skb,
125 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 161 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
126{ 162{
127 struct tcphdr *tcph; 163 struct tcphdr *tcph;
128 const unsigned int tcphoff = ip_hdrlen(skb); 164 unsigned int tcphoff;
165 int oldlen;
166
167#ifdef CONFIG_IP_VS_IPV6
168 if (cp->af == AF_INET6)
169 tcphoff = sizeof(struct ipv6hdr);
170 else
171#endif
172 tcphoff = ip_hdrlen(skb);
173 oldlen = skb->len - tcphoff;
129 174
130 /* csum_check requires unshared skb */ 175 /* csum_check requires unshared skb */
131 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) 176 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
@@ -133,7 +178,7 @@ tcp_snat_handler(struct sk_buff *skb,
133 178
134 if (unlikely(cp->app != NULL)) { 179 if (unlikely(cp->app != NULL)) {
135 /* Some checks before mangling */ 180 /* Some checks before mangling */
136 if (pp->csum_check && !pp->csum_check(skb, pp)) 181 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
137 return 0; 182 return 0;
138 183
139 /* Call application helper if needed */ 184 /* Call application helper if needed */
@@ -141,13 +186,17 @@ tcp_snat_handler(struct sk_buff *skb,
141 return 0; 186 return 0;
142 } 187 }
143 188
144 tcph = (void *)ip_hdr(skb) + tcphoff; 189 tcph = (void *)skb_network_header(skb) + tcphoff;
145 tcph->source = cp->vport; 190 tcph->source = cp->vport;
146 191
147 /* Adjust TCP checksums */ 192 /* Adjust TCP checksums */
148 if (!cp->app) { 193 if (skb->ip_summed == CHECKSUM_PARTIAL) {
194 tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
195 htonl(oldlen),
196 htonl(skb->len - tcphoff));
197 } else if (!cp->app) {
149 /* Only port and addr are changed, do fast csum update */ 198 /* Only port and addr are changed, do fast csum update */
150 tcp_fast_csum_update(tcph, cp->daddr, cp->vaddr, 199 tcp_fast_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
151 cp->dport, cp->vport); 200 cp->dport, cp->vport);
152 if (skb->ip_summed == CHECKSUM_COMPLETE) 201 if (skb->ip_summed == CHECKSUM_COMPLETE)
153 skb->ip_summed = CHECKSUM_NONE; 202 skb->ip_summed = CHECKSUM_NONE;
@@ -155,9 +204,20 @@ tcp_snat_handler(struct sk_buff *skb,
155 /* full checksum calculation */ 204 /* full checksum calculation */
156 tcph->check = 0; 205 tcph->check = 0;
157 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); 206 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
158 tcph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, 207#ifdef CONFIG_IP_VS_IPV6
159 skb->len - tcphoff, 208 if (cp->af == AF_INET6)
160 cp->protocol, skb->csum); 209 tcph->check = csum_ipv6_magic(&cp->vaddr.in6,
210 &cp->caddr.in6,
211 skb->len - tcphoff,
212 cp->protocol, skb->csum);
213 else
214#endif
215 tcph->check = csum_tcpudp_magic(cp->vaddr.ip,
216 cp->caddr.ip,
217 skb->len - tcphoff,
218 cp->protocol,
219 skb->csum);
220
161 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", 221 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
162 pp->name, tcph->check, 222 pp->name, tcph->check,
163 (char*)&(tcph->check) - (char*)tcph); 223 (char*)&(tcph->check) - (char*)tcph);
@@ -171,7 +231,16 @@ tcp_dnat_handler(struct sk_buff *skb,
171 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 231 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
172{ 232{
173 struct tcphdr *tcph; 233 struct tcphdr *tcph;
174 const unsigned int tcphoff = ip_hdrlen(skb); 234 unsigned int tcphoff;
235 int oldlen;
236
237#ifdef CONFIG_IP_VS_IPV6
238 if (cp->af == AF_INET6)
239 tcphoff = sizeof(struct ipv6hdr);
240 else
241#endif
242 tcphoff = ip_hdrlen(skb);
243 oldlen = skb->len - tcphoff;
175 244
176 /* csum_check requires unshared skb */ 245 /* csum_check requires unshared skb */
177 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph))) 246 if (!skb_make_writable(skb, tcphoff+sizeof(*tcph)))
@@ -179,7 +248,7 @@ tcp_dnat_handler(struct sk_buff *skb,
179 248
180 if (unlikely(cp->app != NULL)) { 249 if (unlikely(cp->app != NULL)) {
181 /* Some checks before mangling */ 250 /* Some checks before mangling */
182 if (pp->csum_check && !pp->csum_check(skb, pp)) 251 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
183 return 0; 252 return 0;
184 253
185 /* 254 /*
@@ -190,15 +259,19 @@ tcp_dnat_handler(struct sk_buff *skb,
190 return 0; 259 return 0;
191 } 260 }
192 261
193 tcph = (void *)ip_hdr(skb) + tcphoff; 262 tcph = (void *)skb_network_header(skb) + tcphoff;
194 tcph->dest = cp->dport; 263 tcph->dest = cp->dport;
195 264
196 /* 265 /*
197 * Adjust TCP checksums 266 * Adjust TCP checksums
198 */ 267 */
199 if (!cp->app) { 268 if (skb->ip_summed == CHECKSUM_PARTIAL) {
269 tcp_partial_csum_update(cp->af, tcph, &cp->daddr, &cp->vaddr,
270 htonl(oldlen),
271 htonl(skb->len - tcphoff));
272 } else if (!cp->app) {
200 /* Only port and addr are changed, do fast csum update */ 273 /* Only port and addr are changed, do fast csum update */
201 tcp_fast_csum_update(tcph, cp->vaddr, cp->daddr, 274 tcp_fast_csum_update(cp->af, tcph, &cp->vaddr, &cp->daddr,
202 cp->vport, cp->dport); 275 cp->vport, cp->dport);
203 if (skb->ip_summed == CHECKSUM_COMPLETE) 276 if (skb->ip_summed == CHECKSUM_COMPLETE)
204 skb->ip_summed = CHECKSUM_NONE; 277 skb->ip_summed = CHECKSUM_NONE;
@@ -206,9 +279,19 @@ tcp_dnat_handler(struct sk_buff *skb,
206 /* full checksum calculation */ 279 /* full checksum calculation */
207 tcph->check = 0; 280 tcph->check = 0;
208 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); 281 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
209 tcph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, 282#ifdef CONFIG_IP_VS_IPV6
210 skb->len - tcphoff, 283 if (cp->af == AF_INET6)
211 cp->protocol, skb->csum); 284 tcph->check = csum_ipv6_magic(&cp->caddr.in6,
285 &cp->daddr.in6,
286 skb->len - tcphoff,
287 cp->protocol, skb->csum);
288 else
289#endif
290 tcph->check = csum_tcpudp_magic(cp->caddr.ip,
291 cp->daddr.ip,
292 skb->len - tcphoff,
293 cp->protocol,
294 skb->csum);
212 skb->ip_summed = CHECKSUM_UNNECESSARY; 295 skb->ip_summed = CHECKSUM_UNNECESSARY;
213 } 296 }
214 return 1; 297 return 1;
@@ -216,21 +299,43 @@ tcp_dnat_handler(struct sk_buff *skb,
216 299
217 300
218static int 301static int
219tcp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) 302tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
220{ 303{
221 const unsigned int tcphoff = ip_hdrlen(skb); 304 unsigned int tcphoff;
305
306#ifdef CONFIG_IP_VS_IPV6
307 if (af == AF_INET6)
308 tcphoff = sizeof(struct ipv6hdr);
309 else
310#endif
311 tcphoff = ip_hdrlen(skb);
222 312
223 switch (skb->ip_summed) { 313 switch (skb->ip_summed) {
224 case CHECKSUM_NONE: 314 case CHECKSUM_NONE:
225 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); 315 skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
226 case CHECKSUM_COMPLETE: 316 case CHECKSUM_COMPLETE:
227 if (csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 317#ifdef CONFIG_IP_VS_IPV6
228 skb->len - tcphoff, 318 if (af == AF_INET6) {
229 ip_hdr(skb)->protocol, skb->csum)) { 319 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
230 IP_VS_DBG_RL_PKT(0, pp, skb, 0, 320 &ipv6_hdr(skb)->daddr,
231 "Failed checksum for"); 321 skb->len - tcphoff,
232 return 0; 322 ipv6_hdr(skb)->nexthdr,
233 } 323 skb->csum)) {
324 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
325 "Failed checksum for");
326 return 0;
327 }
328 } else
329#endif
330 if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
331 ip_hdr(skb)->daddr,
332 skb->len - tcphoff,
333 ip_hdr(skb)->protocol,
334 skb->csum)) {
335 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
336 "Failed checksum for");
337 return 0;
338 }
234 break; 339 break;
235 default: 340 default:
236 /* No need to checksum. */ 341 /* No need to checksum. */
@@ -419,19 +524,23 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
419 if (new_state != cp->state) { 524 if (new_state != cp->state) {
420 struct ip_vs_dest *dest = cp->dest; 525 struct ip_vs_dest *dest = cp->dest;
421 526
422 IP_VS_DBG(8, "%s %s [%c%c%c%c] %u.%u.%u.%u:%d->" 527 IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
423 "%u.%u.%u.%u:%d state: %s->%s conn->refcnt:%d\n", 528 "%s:%d state: %s->%s conn->refcnt:%d\n",
424 pp->name, 529 pp->name,
425 (state_off==TCP_DIR_OUTPUT)?"output ":"input ", 530 ((state_off == TCP_DIR_OUTPUT) ?
426 th->syn? 'S' : '.', 531 "output " : "input "),
427 th->fin? 'F' : '.', 532 th->syn ? 'S' : '.',
428 th->ack? 'A' : '.', 533 th->fin ? 'F' : '.',
429 th->rst? 'R' : '.', 534 th->ack ? 'A' : '.',
430 NIPQUAD(cp->daddr), ntohs(cp->dport), 535 th->rst ? 'R' : '.',
431 NIPQUAD(cp->caddr), ntohs(cp->cport), 536 IP_VS_DBG_ADDR(cp->af, &cp->daddr),
432 tcp_state_name(cp->state), 537 ntohs(cp->dport),
433 tcp_state_name(new_state), 538 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
434 atomic_read(&cp->refcnt)); 539 ntohs(cp->cport),
540 tcp_state_name(cp->state),
541 tcp_state_name(new_state),
542 atomic_read(&cp->refcnt));
543
435 if (dest) { 544 if (dest) {
436 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && 545 if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
437 (new_state != IP_VS_TCP_S_ESTABLISHED)) { 546 (new_state != IP_VS_TCP_S_ESTABLISHED)) {
@@ -461,7 +570,13 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
461{ 570{
462 struct tcphdr _tcph, *th; 571 struct tcphdr _tcph, *th;
463 572
464 th = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_tcph), &_tcph); 573#ifdef CONFIG_IP_VS_IPV6
574 int ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
575#else
576 int ihl = ip_hdrlen(skb);
577#endif
578
579 th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
465 if (th == NULL) 580 if (th == NULL)
466 return 0; 581 return 0;
467 582
@@ -546,12 +661,15 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
546 break; 661 break;
547 spin_unlock(&tcp_app_lock); 662 spin_unlock(&tcp_app_lock);
548 663
549 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" 664 IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
550 "%u.%u.%u.%u:%u to app %s on port %u\n", 665 "%s:%u to app %s on port %u\n",
551 __func__, 666 __func__,
552 NIPQUAD(cp->caddr), ntohs(cp->cport), 667 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
553 NIPQUAD(cp->vaddr), ntohs(cp->vport), 668 ntohs(cp->cport),
554 inc->name, ntohs(inc->port)); 669 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
670 ntohs(cp->vport),
671 inc->name, ntohs(inc->port));
672
555 cp->app = inc; 673 cp->app = inc;
556 if (inc->init_conn) 674 if (inc->init_conn)
557 result = inc->init_conn(inc, cp); 675 result = inc->init_conn(inc, cp);
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index c6be5d56823f..6eb6039d6343 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -22,10 +22,12 @@
22 22
23#include <net/ip_vs.h> 23#include <net/ip_vs.h>
24#include <net/ip.h> 24#include <net/ip.h>
25#include <net/ip6_checksum.h>
25 26
26static struct ip_vs_conn * 27static struct ip_vs_conn *
27udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 28udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
28 const struct iphdr *iph, unsigned int proto_off, int inverse) 29 const struct ip_vs_iphdr *iph, unsigned int proto_off,
30 int inverse)
29{ 31{
30 struct ip_vs_conn *cp; 32 struct ip_vs_conn *cp;
31 __be16 _ports[2], *pptr; 33 __be16 _ports[2], *pptr;
@@ -35,13 +37,13 @@ udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
35 return NULL; 37 return NULL;
36 38
37 if (likely(!inverse)) { 39 if (likely(!inverse)) {
38 cp = ip_vs_conn_in_get(iph->protocol, 40 cp = ip_vs_conn_in_get(af, iph->protocol,
39 iph->saddr, pptr[0], 41 &iph->saddr, pptr[0],
40 iph->daddr, pptr[1]); 42 &iph->daddr, pptr[1]);
41 } else { 43 } else {
42 cp = ip_vs_conn_in_get(iph->protocol, 44 cp = ip_vs_conn_in_get(af, iph->protocol,
43 iph->daddr, pptr[1], 45 &iph->daddr, pptr[1],
44 iph->saddr, pptr[0]); 46 &iph->saddr, pptr[0]);
45 } 47 }
46 48
47 return cp; 49 return cp;
@@ -49,25 +51,25 @@ udp_conn_in_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
49 51
50 52
51static struct ip_vs_conn * 53static struct ip_vs_conn *
52udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp, 54udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
53 const struct iphdr *iph, unsigned int proto_off, int inverse) 55 const struct ip_vs_iphdr *iph, unsigned int proto_off,
56 int inverse)
54{ 57{
55 struct ip_vs_conn *cp; 58 struct ip_vs_conn *cp;
56 __be16 _ports[2], *pptr; 59 __be16 _ports[2], *pptr;
57 60
58 pptr = skb_header_pointer(skb, ip_hdrlen(skb), 61 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
59 sizeof(_ports), _ports);
60 if (pptr == NULL) 62 if (pptr == NULL)
61 return NULL; 63 return NULL;
62 64
63 if (likely(!inverse)) { 65 if (likely(!inverse)) {
64 cp = ip_vs_conn_out_get(iph->protocol, 66 cp = ip_vs_conn_out_get(af, iph->protocol,
65 iph->saddr, pptr[0], 67 &iph->saddr, pptr[0],
66 iph->daddr, pptr[1]); 68 &iph->daddr, pptr[1]);
67 } else { 69 } else {
68 cp = ip_vs_conn_out_get(iph->protocol, 70 cp = ip_vs_conn_out_get(af, iph->protocol,
69 iph->daddr, pptr[1], 71 &iph->daddr, pptr[1],
70 iph->saddr, pptr[0]); 72 &iph->saddr, pptr[0]);
71 } 73 }
72 74
73 return cp; 75 return cp;
@@ -75,21 +77,24 @@ udp_conn_out_get(const struct sk_buff *skb, struct ip_vs_protocol *pp,
75 77
76 78
77static int 79static int
78udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp, 80udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
79 int *verdict, struct ip_vs_conn **cpp) 81 int *verdict, struct ip_vs_conn **cpp)
80{ 82{
81 struct ip_vs_service *svc; 83 struct ip_vs_service *svc;
82 struct udphdr _udph, *uh; 84 struct udphdr _udph, *uh;
85 struct ip_vs_iphdr iph;
86
87 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
83 88
84 uh = skb_header_pointer(skb, ip_hdrlen(skb), 89 uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph);
85 sizeof(_udph), &_udph);
86 if (uh == NULL) { 90 if (uh == NULL) {
87 *verdict = NF_DROP; 91 *verdict = NF_DROP;
88 return 0; 92 return 0;
89 } 93 }
90 94
91 if ((svc = ip_vs_service_get(skb->mark, ip_hdr(skb)->protocol, 95 svc = ip_vs_service_get(af, skb->mark, iph.protocol,
92 ip_hdr(skb)->daddr, uh->dest))) { 96 &iph.daddr, uh->dest);
97 if (svc) {
93 if (ip_vs_todrop()) { 98 if (ip_vs_todrop()) {
94 /* 99 /*
95 * It seems that we are very loaded. 100 * It seems that we are very loaded.
@@ -116,23 +121,63 @@ udp_conn_schedule(struct sk_buff *skb, struct ip_vs_protocol *pp,
116 121
117 122
118static inline void 123static inline void
119udp_fast_csum_update(struct udphdr *uhdr, __be32 oldip, __be32 newip, 124udp_fast_csum_update(int af, struct udphdr *uhdr,
125 const union nf_inet_addr *oldip,
126 const union nf_inet_addr *newip,
120 __be16 oldport, __be16 newport) 127 __be16 oldport, __be16 newport)
121{ 128{
122 uhdr->check = 129#ifdef CONFIG_IP_VS_IPV6
123 csum_fold(ip_vs_check_diff4(oldip, newip, 130 if (af == AF_INET6)
124 ip_vs_check_diff2(oldport, newport, 131 uhdr->check =
125 ~csum_unfold(uhdr->check)))); 132 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
133 ip_vs_check_diff2(oldport, newport,
134 ~csum_unfold(uhdr->check))));
135 else
136#endif
137 uhdr->check =
138 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
139 ip_vs_check_diff2(oldport, newport,
140 ~csum_unfold(uhdr->check))));
126 if (!uhdr->check) 141 if (!uhdr->check)
127 uhdr->check = CSUM_MANGLED_0; 142 uhdr->check = CSUM_MANGLED_0;
128} 143}
129 144
145static inline void
146udp_partial_csum_update(int af, struct udphdr *uhdr,
147 const union nf_inet_addr *oldip,
148 const union nf_inet_addr *newip,
149 __be16 oldlen, __be16 newlen)
150{
151#ifdef CONFIG_IP_VS_IPV6
152 if (af == AF_INET6)
153 uhdr->check =
154 csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
155 ip_vs_check_diff2(oldlen, newlen,
156 ~csum_unfold(uhdr->check))));
157 else
158#endif
159 uhdr->check =
160 csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
161 ip_vs_check_diff2(oldlen, newlen,
162 ~csum_unfold(uhdr->check))));
163}
164
165
130static int 166static int
131udp_snat_handler(struct sk_buff *skb, 167udp_snat_handler(struct sk_buff *skb,
132 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 168 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
133{ 169{
134 struct udphdr *udph; 170 struct udphdr *udph;
135 const unsigned int udphoff = ip_hdrlen(skb); 171 unsigned int udphoff;
172 int oldlen;
173
174#ifdef CONFIG_IP_VS_IPV6
175 if (cp->af == AF_INET6)
176 udphoff = sizeof(struct ipv6hdr);
177 else
178#endif
179 udphoff = ip_hdrlen(skb);
180 oldlen = skb->len - udphoff;
136 181
137 /* csum_check requires unshared skb */ 182 /* csum_check requires unshared skb */
138 if (!skb_make_writable(skb, udphoff+sizeof(*udph))) 183 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
@@ -140,7 +185,7 @@ udp_snat_handler(struct sk_buff *skb,
140 185
141 if (unlikely(cp->app != NULL)) { 186 if (unlikely(cp->app != NULL)) {
142 /* Some checks before mangling */ 187 /* Some checks before mangling */
143 if (pp->csum_check && !pp->csum_check(skb, pp)) 188 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
144 return 0; 189 return 0;
145 190
146 /* 191 /*
@@ -150,15 +195,19 @@ udp_snat_handler(struct sk_buff *skb,
150 return 0; 195 return 0;
151 } 196 }
152 197
153 udph = (void *)ip_hdr(skb) + udphoff; 198 udph = (void *)skb_network_header(skb) + udphoff;
154 udph->source = cp->vport; 199 udph->source = cp->vport;
155 200
156 /* 201 /*
157 * Adjust UDP checksums 202 * Adjust UDP checksums
158 */ 203 */
159 if (!cp->app && (udph->check != 0)) { 204 if (skb->ip_summed == CHECKSUM_PARTIAL) {
205 udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
206 htonl(oldlen),
207 htonl(skb->len - udphoff));
208 } else if (!cp->app && (udph->check != 0)) {
160 /* Only port and addr are changed, do fast csum update */ 209 /* Only port and addr are changed, do fast csum update */
161 udp_fast_csum_update(udph, cp->daddr, cp->vaddr, 210 udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
162 cp->dport, cp->vport); 211 cp->dport, cp->vport);
163 if (skb->ip_summed == CHECKSUM_COMPLETE) 212 if (skb->ip_summed == CHECKSUM_COMPLETE)
164 skb->ip_summed = CHECKSUM_NONE; 213 skb->ip_summed = CHECKSUM_NONE;
@@ -166,9 +215,19 @@ udp_snat_handler(struct sk_buff *skb,
166 /* full checksum calculation */ 215 /* full checksum calculation */
167 udph->check = 0; 216 udph->check = 0;
168 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); 217 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
169 udph->check = csum_tcpudp_magic(cp->vaddr, cp->caddr, 218#ifdef CONFIG_IP_VS_IPV6
170 skb->len - udphoff, 219 if (cp->af == AF_INET6)
171 cp->protocol, skb->csum); 220 udph->check = csum_ipv6_magic(&cp->vaddr.in6,
221 &cp->caddr.in6,
222 skb->len - udphoff,
223 cp->protocol, skb->csum);
224 else
225#endif
226 udph->check = csum_tcpudp_magic(cp->vaddr.ip,
227 cp->caddr.ip,
228 skb->len - udphoff,
229 cp->protocol,
230 skb->csum);
172 if (udph->check == 0) 231 if (udph->check == 0)
173 udph->check = CSUM_MANGLED_0; 232 udph->check = CSUM_MANGLED_0;
174 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n", 233 IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
@@ -184,7 +243,16 @@ udp_dnat_handler(struct sk_buff *skb,
184 struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 243 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
185{ 244{
186 struct udphdr *udph; 245 struct udphdr *udph;
187 unsigned int udphoff = ip_hdrlen(skb); 246 unsigned int udphoff;
247 int oldlen;
248
249#ifdef CONFIG_IP_VS_IPV6
250 if (cp->af == AF_INET6)
251 udphoff = sizeof(struct ipv6hdr);
252 else
253#endif
254 udphoff = ip_hdrlen(skb);
255 oldlen = skb->len - udphoff;
188 256
189 /* csum_check requires unshared skb */ 257 /* csum_check requires unshared skb */
190 if (!skb_make_writable(skb, udphoff+sizeof(*udph))) 258 if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
@@ -192,7 +260,7 @@ udp_dnat_handler(struct sk_buff *skb,
192 260
193 if (unlikely(cp->app != NULL)) { 261 if (unlikely(cp->app != NULL)) {
194 /* Some checks before mangling */ 262 /* Some checks before mangling */
195 if (pp->csum_check && !pp->csum_check(skb, pp)) 263 if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
196 return 0; 264 return 0;
197 265
198 /* 266 /*
@@ -203,15 +271,19 @@ udp_dnat_handler(struct sk_buff *skb,
203 return 0; 271 return 0;
204 } 272 }
205 273
206 udph = (void *)ip_hdr(skb) + udphoff; 274 udph = (void *)skb_network_header(skb) + udphoff;
207 udph->dest = cp->dport; 275 udph->dest = cp->dport;
208 276
209 /* 277 /*
210 * Adjust UDP checksums 278 * Adjust UDP checksums
211 */ 279 */
212 if (!cp->app && (udph->check != 0)) { 280 if (skb->ip_summed == CHECKSUM_PARTIAL) {
281 udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
282 htonl(oldlen),
283 htonl(skb->len - udphoff));
284 } else if (!cp->app && (udph->check != 0)) {
213 /* Only port and addr are changed, do fast csum update */ 285 /* Only port and addr are changed, do fast csum update */
214 udp_fast_csum_update(udph, cp->vaddr, cp->daddr, 286 udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
215 cp->vport, cp->dport); 287 cp->vport, cp->dport);
216 if (skb->ip_summed == CHECKSUM_COMPLETE) 288 if (skb->ip_summed == CHECKSUM_COMPLETE)
217 skb->ip_summed = CHECKSUM_NONE; 289 skb->ip_summed = CHECKSUM_NONE;
@@ -219,9 +291,19 @@ udp_dnat_handler(struct sk_buff *skb,
219 /* full checksum calculation */ 291 /* full checksum calculation */
220 udph->check = 0; 292 udph->check = 0;
221 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); 293 skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
222 udph->check = csum_tcpudp_magic(cp->caddr, cp->daddr, 294#ifdef CONFIG_IP_VS_IPV6
223 skb->len - udphoff, 295 if (cp->af == AF_INET6)
224 cp->protocol, skb->csum); 296 udph->check = csum_ipv6_magic(&cp->caddr.in6,
297 &cp->daddr.in6,
298 skb->len - udphoff,
299 cp->protocol, skb->csum);
300 else
301#endif
302 udph->check = csum_tcpudp_magic(cp->caddr.ip,
303 cp->daddr.ip,
304 skb->len - udphoff,
305 cp->protocol,
306 skb->csum);
225 if (udph->check == 0) 307 if (udph->check == 0)
226 udph->check = CSUM_MANGLED_0; 308 udph->check = CSUM_MANGLED_0;
227 skb->ip_summed = CHECKSUM_UNNECESSARY; 309 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -231,10 +313,17 @@ udp_dnat_handler(struct sk_buff *skb,
231 313
232 314
233static int 315static int
234udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp) 316udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
235{ 317{
236 struct udphdr _udph, *uh; 318 struct udphdr _udph, *uh;
237 const unsigned int udphoff = ip_hdrlen(skb); 319 unsigned int udphoff;
320
321#ifdef CONFIG_IP_VS_IPV6
322 if (af == AF_INET6)
323 udphoff = sizeof(struct ipv6hdr);
324 else
325#endif
326 udphoff = ip_hdrlen(skb);
238 327
239 uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph); 328 uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
240 if (uh == NULL) 329 if (uh == NULL)
@@ -246,15 +335,28 @@ udp_csum_check(struct sk_buff *skb, struct ip_vs_protocol *pp)
246 skb->csum = skb_checksum(skb, udphoff, 335 skb->csum = skb_checksum(skb, udphoff,
247 skb->len - udphoff, 0); 336 skb->len - udphoff, 0);
248 case CHECKSUM_COMPLETE: 337 case CHECKSUM_COMPLETE:
249 if (csum_tcpudp_magic(ip_hdr(skb)->saddr, 338#ifdef CONFIG_IP_VS_IPV6
250 ip_hdr(skb)->daddr, 339 if (af == AF_INET6) {
251 skb->len - udphoff, 340 if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
252 ip_hdr(skb)->protocol, 341 &ipv6_hdr(skb)->daddr,
253 skb->csum)) { 342 skb->len - udphoff,
254 IP_VS_DBG_RL_PKT(0, pp, skb, 0, 343 ipv6_hdr(skb)->nexthdr,
255 "Failed checksum for"); 344 skb->csum)) {
256 return 0; 345 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
257 } 346 "Failed checksum for");
347 return 0;
348 }
349 } else
350#endif
351 if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
352 ip_hdr(skb)->daddr,
353 skb->len - udphoff,
354 ip_hdr(skb)->protocol,
355 skb->csum)) {
356 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
357 "Failed checksum for");
358 return 0;
359 }
258 break; 360 break;
259 default: 361 default:
260 /* No need to checksum. */ 362 /* No need to checksum. */
@@ -340,12 +442,15 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
340 break; 442 break;
341 spin_unlock(&udp_app_lock); 443 spin_unlock(&udp_app_lock);
342 444
343 IP_VS_DBG(9, "%s: Binding conn %u.%u.%u.%u:%u->" 445 IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
344 "%u.%u.%u.%u:%u to app %s on port %u\n", 446 "%s:%u to app %s on port %u\n",
345 __func__, 447 __func__,
346 NIPQUAD(cp->caddr), ntohs(cp->cport), 448 IP_VS_DBG_ADDR(cp->af, &cp->caddr),
347 NIPQUAD(cp->vaddr), ntohs(cp->vport), 449 ntohs(cp->cport),
348 inc->name, ntohs(inc->port)); 450 IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
451 ntohs(cp->vport),
452 inc->name, ntohs(inc->port));
453
349 cp->app = inc; 454 cp->app = inc;
350 if (inc->init_conn) 455 if (inc->init_conn)
351 result = inc->init_conn(inc, cp); 456 result = inc->init_conn(inc, cp);
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index 358110d17e59..a22195f68ac4 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -32,12 +32,6 @@ static int ip_vs_rr_init_svc(struct ip_vs_service *svc)
32} 32}
33 33
34 34
35static int ip_vs_rr_done_svc(struct ip_vs_service *svc)
36{
37 return 0;
38}
39
40
41static int ip_vs_rr_update_svc(struct ip_vs_service *svc) 35static int ip_vs_rr_update_svc(struct ip_vs_service *svc)
42{ 36{
43 svc->sched_data = &svc->destinations; 37 svc->sched_data = &svc->destinations;
@@ -80,11 +74,11 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
80 out: 74 out:
81 svc->sched_data = q; 75 svc->sched_data = q;
82 write_unlock(&svc->sched_lock); 76 write_unlock(&svc->sched_lock);
83 IP_VS_DBG(6, "RR: server %u.%u.%u.%u:%u " 77 IP_VS_DBG_BUF(6, "RR: server %s:%u "
84 "activeconns %d refcnt %d weight %d\n", 78 "activeconns %d refcnt %d weight %d\n",
85 NIPQUAD(dest->addr), ntohs(dest->port), 79 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
86 atomic_read(&dest->activeconns), 80 atomic_read(&dest->activeconns),
87 atomic_read(&dest->refcnt), atomic_read(&dest->weight)); 81 atomic_read(&dest->refcnt), atomic_read(&dest->weight));
88 82
89 return dest; 83 return dest;
90} 84}
@@ -95,8 +89,10 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
95 .refcnt = ATOMIC_INIT(0), 89 .refcnt = ATOMIC_INIT(0),
96 .module = THIS_MODULE, 90 .module = THIS_MODULE,
97 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), 91 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
92#ifdef CONFIG_IP_VS_IPV6
93 .supports_ipv6 = 1,
94#endif
98 .init_service = ip_vs_rr_init_svc, 95 .init_service = ip_vs_rr_init_svc,
99 .done_service = ip_vs_rr_done_svc,
100 .update_service = ip_vs_rr_update_svc, 96 .update_service = ip_vs_rr_update_svc,
101 .schedule = ip_vs_rr_schedule, 97 .schedule = ip_vs_rr_schedule,
102}; 98};
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index 77663d84cbd1..7d2f22f04b83 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -41,27 +41,6 @@
41#include <net/ip_vs.h> 41#include <net/ip_vs.h>
42 42
43 43
44static int
45ip_vs_sed_init_svc(struct ip_vs_service *svc)
46{
47 return 0;
48}
49
50
51static int
52ip_vs_sed_done_svc(struct ip_vs_service *svc)
53{
54 return 0;
55}
56
57
58static int
59ip_vs_sed_update_svc(struct ip_vs_service *svc)
60{
61 return 0;
62}
63
64
65static inline unsigned int 44static inline unsigned int
66ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) 45ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
67{ 46{
@@ -122,12 +101,12 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
122 } 101 }
123 } 102 }
124 103
125 IP_VS_DBG(6, "SED: server %u.%u.%u.%u:%u " 104 IP_VS_DBG_BUF(6, "SED: server %s:%u "
126 "activeconns %d refcnt %d weight %d overhead %d\n", 105 "activeconns %d refcnt %d weight %d overhead %d\n",
127 NIPQUAD(least->addr), ntohs(least->port), 106 IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
128 atomic_read(&least->activeconns), 107 atomic_read(&least->activeconns),
129 atomic_read(&least->refcnt), 108 atomic_read(&least->refcnt),
130 atomic_read(&least->weight), loh); 109 atomic_read(&least->weight), loh);
131 110
132 return least; 111 return least;
133} 112}
@@ -139,9 +118,9 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
139 .refcnt = ATOMIC_INIT(0), 118 .refcnt = ATOMIC_INIT(0),
140 .module = THIS_MODULE, 119 .module = THIS_MODULE,
141 .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), 120 .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
142 .init_service = ip_vs_sed_init_svc, 121#ifdef CONFIG_IP_VS_IPV6
143 .done_service = ip_vs_sed_done_svc, 122 .supports_ipv6 = 1,
144 .update_service = ip_vs_sed_update_svc, 123#endif
145 .schedule = ip_vs_sed_schedule, 124 .schedule = ip_vs_sed_schedule,
146}; 125};
147 126
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index 7b979e228056..1d96de27fefd 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -215,7 +215,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
215 IP_VS_DBG(6, "SH: source IP address %u.%u.%u.%u " 215 IP_VS_DBG(6, "SH: source IP address %u.%u.%u.%u "
216 "--> server %u.%u.%u.%u:%d\n", 216 "--> server %u.%u.%u.%u:%d\n",
217 NIPQUAD(iph->saddr), 217 NIPQUAD(iph->saddr),
218 NIPQUAD(dest->addr), 218 NIPQUAD(dest->addr.ip),
219 ntohs(dest->port)); 219 ntohs(dest->port));
220 220
221 return dest; 221 return dest;
@@ -231,6 +231,9 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
231 .refcnt = ATOMIC_INIT(0), 231 .refcnt = ATOMIC_INIT(0),
232 .module = THIS_MODULE, 232 .module = THIS_MODULE,
233 .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), 233 .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
234#ifdef CONFIG_IP_VS_IPV6
235 .supports_ipv6 = 0,
236#endif
234 .init_service = ip_vs_sh_init_svc, 237 .init_service = ip_vs_sh_init_svc,
235 .done_service = ip_vs_sh_done_svc, 238 .done_service = ip_vs_sh_done_svc,
236 .update_service = ip_vs_sh_update_svc, 239 .update_service = ip_vs_sh_update_svc,
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index a652da2c3200..28237a5f62e2 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -256,9 +256,9 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
256 s->cport = cp->cport; 256 s->cport = cp->cport;
257 s->vport = cp->vport; 257 s->vport = cp->vport;
258 s->dport = cp->dport; 258 s->dport = cp->dport;
259 s->caddr = cp->caddr; 259 s->caddr = cp->caddr.ip;
260 s->vaddr = cp->vaddr; 260 s->vaddr = cp->vaddr.ip;
261 s->daddr = cp->daddr; 261 s->daddr = cp->daddr.ip;
262 s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); 262 s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED);
263 s->state = htons(cp->state); 263 s->state = htons(cp->state);
264 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { 264 if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
@@ -366,21 +366,28 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
366 } 366 }
367 367
368 if (!(flags & IP_VS_CONN_F_TEMPLATE)) 368 if (!(flags & IP_VS_CONN_F_TEMPLATE))
369 cp = ip_vs_conn_in_get(s->protocol, 369 cp = ip_vs_conn_in_get(AF_INET, s->protocol,
370 s->caddr, s->cport, 370 (union nf_inet_addr *)&s->caddr,
371 s->vaddr, s->vport); 371 s->cport,
372 (union nf_inet_addr *)&s->vaddr,
373 s->vport);
372 else 374 else
373 cp = ip_vs_ct_in_get(s->protocol, 375 cp = ip_vs_ct_in_get(AF_INET, s->protocol,
374 s->caddr, s->cport, 376 (union nf_inet_addr *)&s->caddr,
375 s->vaddr, s->vport); 377 s->cport,
378 (union nf_inet_addr *)&s->vaddr,
379 s->vport);
376 if (!cp) { 380 if (!cp) {
377 /* 381 /*
378 * Find the appropriate destination for the connection. 382 * Find the appropriate destination for the connection.
379 * If it is not found the connection will remain unbound 383 * If it is not found the connection will remain unbound
380 * but still handled. 384 * but still handled.
381 */ 385 */
382 dest = ip_vs_find_dest(s->daddr, s->dport, 386 dest = ip_vs_find_dest(AF_INET,
383 s->vaddr, s->vport, 387 (union nf_inet_addr *)&s->daddr,
388 s->dport,
389 (union nf_inet_addr *)&s->vaddr,
390 s->vport,
384 s->protocol); 391 s->protocol);
385 /* Set the approprite ativity flag */ 392 /* Set the approprite ativity flag */
386 if (s->protocol == IPPROTO_TCP) { 393 if (s->protocol == IPPROTO_TCP) {
@@ -389,10 +396,13 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
389 else 396 else
390 flags &= ~IP_VS_CONN_F_INACTIVE; 397 flags &= ~IP_VS_CONN_F_INACTIVE;
391 } 398 }
392 cp = ip_vs_conn_new(s->protocol, 399 cp = ip_vs_conn_new(AF_INET, s->protocol,
393 s->caddr, s->cport, 400 (union nf_inet_addr *)&s->caddr,
394 s->vaddr, s->vport, 401 s->cport,
395 s->daddr, s->dport, 402 (union nf_inet_addr *)&s->vaddr,
403 s->vport,
404 (union nf_inet_addr *)&s->daddr,
405 s->dport,
396 flags, dest); 406 flags, dest);
397 if (dest) 407 if (dest)
398 atomic_dec(&dest->refcnt); 408 atomic_dec(&dest->refcnt);
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 9b0ef86bb1f7..8c596e712599 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -25,27 +25,6 @@
25#include <net/ip_vs.h> 25#include <net/ip_vs.h>
26 26
27 27
28static int
29ip_vs_wlc_init_svc(struct ip_vs_service *svc)
30{
31 return 0;
32}
33
34
35static int
36ip_vs_wlc_done_svc(struct ip_vs_service *svc)
37{
38 return 0;
39}
40
41
42static int
43ip_vs_wlc_update_svc(struct ip_vs_service *svc)
44{
45 return 0;
46}
47
48
49static inline unsigned int 28static inline unsigned int
50ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) 29ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
51{ 30{
@@ -110,12 +89,12 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
110 } 89 }
111 } 90 }
112 91
113 IP_VS_DBG(6, "WLC: server %u.%u.%u.%u:%u " 92 IP_VS_DBG_BUF(6, "WLC: server %s:%u "
114 "activeconns %d refcnt %d weight %d overhead %d\n", 93 "activeconns %d refcnt %d weight %d overhead %d\n",
115 NIPQUAD(least->addr), ntohs(least->port), 94 IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
116 atomic_read(&least->activeconns), 95 atomic_read(&least->activeconns),
117 atomic_read(&least->refcnt), 96 atomic_read(&least->refcnt),
118 atomic_read(&least->weight), loh); 97 atomic_read(&least->weight), loh);
119 98
120 return least; 99 return least;
121} 100}
@@ -127,9 +106,9 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
127 .refcnt = ATOMIC_INIT(0), 106 .refcnt = ATOMIC_INIT(0),
128 .module = THIS_MODULE, 107 .module = THIS_MODULE,
129 .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list), 108 .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
130 .init_service = ip_vs_wlc_init_svc, 109#ifdef CONFIG_IP_VS_IPV6
131 .done_service = ip_vs_wlc_done_svc, 110 .supports_ipv6 = 1,
132 .update_service = ip_vs_wlc_update_svc, 111#endif
133 .schedule = ip_vs_wlc_schedule, 112 .schedule = ip_vs_wlc_schedule,
134}; 113};
135 114
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 0d86a79b87b5..7ea92fed50bf 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -195,12 +195,12 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
195 } 195 }
196 } 196 }
197 197
198 IP_VS_DBG(6, "WRR: server %u.%u.%u.%u:%u " 198 IP_VS_DBG_BUF(6, "WRR: server %s:%u "
199 "activeconns %d refcnt %d weight %d\n", 199 "activeconns %d refcnt %d weight %d\n",
200 NIPQUAD(dest->addr), ntohs(dest->port), 200 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
201 atomic_read(&dest->activeconns), 201 atomic_read(&dest->activeconns),
202 atomic_read(&dest->refcnt), 202 atomic_read(&dest->refcnt),
203 atomic_read(&dest->weight)); 203 atomic_read(&dest->weight));
204 204
205 out: 205 out:
206 write_unlock(&svc->sched_lock); 206 write_unlock(&svc->sched_lock);
@@ -213,6 +213,9 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
213 .refcnt = ATOMIC_INIT(0), 213 .refcnt = ATOMIC_INIT(0),
214 .module = THIS_MODULE, 214 .module = THIS_MODULE,
215 .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), 215 .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
216#ifdef CONFIG_IP_VS_IPV6
217 .supports_ipv6 = 1,
218#endif
216 .init_service = ip_vs_wrr_init_svc, 219 .init_service = ip_vs_wrr_init_svc,
217 .done_service = ip_vs_wrr_done_svc, 220 .done_service = ip_vs_wrr_done_svc,
218 .update_service = ip_vs_wrr_update_svc, 221 .update_service = ip_vs_wrr_update_svc,
diff --git a/net/ipv4/ipvs/ip_vs_xmit.c b/net/ipv4/ipvs/ip_vs_xmit.c
index 9892d4aca42e..02ddc2b3ce2e 100644
--- a/net/ipv4/ipvs/ip_vs_xmit.c
+++ b/net/ipv4/ipvs/ip_vs_xmit.c
@@ -20,6 +20,9 @@
20#include <net/udp.h> 20#include <net/udp.h>
21#include <net/icmp.h> /* for icmp_send */ 21#include <net/icmp.h> /* for icmp_send */
22#include <net/route.h> /* for ip_route_output */ 22#include <net/route.h> /* for ip_route_output */
23#include <net/ipv6.h>
24#include <net/ip6_route.h>
25#include <linux/icmpv6.h>
23#include <linux/netfilter.h> 26#include <linux/netfilter.h>
24#include <linux/netfilter_ipv4.h> 27#include <linux/netfilter_ipv4.h>
25 28
@@ -47,7 +50,8 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos, u32 cookie)
47 50
48 if (!dst) 51 if (!dst)
49 return NULL; 52 return NULL;
50 if ((dst->obsolete || rtos != dest->dst_rtos) && 53 if ((dst->obsolete
54 || (dest->af == AF_INET && rtos != dest->dst_rtos)) &&
51 dst->ops->check(dst, cookie) == NULL) { 55 dst->ops->check(dst, cookie) == NULL) {
52 dest->dst_cache = NULL; 56 dest->dst_cache = NULL;
53 dst_release(dst); 57 dst_release(dst);
@@ -71,7 +75,7 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
71 .oif = 0, 75 .oif = 0,
72 .nl_u = { 76 .nl_u = {
73 .ip4_u = { 77 .ip4_u = {
74 .daddr = dest->addr, 78 .daddr = dest->addr.ip,
75 .saddr = 0, 79 .saddr = 0,
76 .tos = rtos, } }, 80 .tos = rtos, } },
77 }; 81 };
@@ -80,12 +84,12 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
80 spin_unlock(&dest->dst_lock); 84 spin_unlock(&dest->dst_lock);
81 IP_VS_DBG_RL("ip_route_output error, " 85 IP_VS_DBG_RL("ip_route_output error, "
82 "dest: %u.%u.%u.%u\n", 86 "dest: %u.%u.%u.%u\n",
83 NIPQUAD(dest->addr)); 87 NIPQUAD(dest->addr.ip));
84 return NULL; 88 return NULL;
85 } 89 }
86 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst)); 90 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst));
87 IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n", 91 IP_VS_DBG(10, "new dst %u.%u.%u.%u, refcnt=%d, rtos=%X\n",
88 NIPQUAD(dest->addr), 92 NIPQUAD(dest->addr.ip),
89 atomic_read(&rt->u.dst.__refcnt), rtos); 93 atomic_read(&rt->u.dst.__refcnt), rtos);
90 } 94 }
91 spin_unlock(&dest->dst_lock); 95 spin_unlock(&dest->dst_lock);
@@ -94,14 +98,14 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
94 .oif = 0, 98 .oif = 0,
95 .nl_u = { 99 .nl_u = {
96 .ip4_u = { 100 .ip4_u = {
97 .daddr = cp->daddr, 101 .daddr = cp->daddr.ip,
98 .saddr = 0, 102 .saddr = 0,
99 .tos = rtos, } }, 103 .tos = rtos, } },
100 }; 104 };
101 105
102 if (ip_route_output_key(&init_net, &rt, &fl)) { 106 if (ip_route_output_key(&init_net, &rt, &fl)) {
103 IP_VS_DBG_RL("ip_route_output error, dest: " 107 IP_VS_DBG_RL("ip_route_output error, dest: "
104 "%u.%u.%u.%u\n", NIPQUAD(cp->daddr)); 108 "%u.%u.%u.%u\n", NIPQUAD(cp->daddr.ip));
105 return NULL; 109 return NULL;
106 } 110 }
107 } 111 }
@@ -109,6 +113,70 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
109 return rt; 113 return rt;
110} 114}
111 115
116#ifdef CONFIG_IP_VS_IPV6
117static struct rt6_info *
118__ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
119{
120 struct rt6_info *rt; /* Route to the other host */
121 struct ip_vs_dest *dest = cp->dest;
122
123 if (dest) {
124 spin_lock(&dest->dst_lock);
125 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0, 0);
126 if (!rt) {
127 struct flowi fl = {
128 .oif = 0,
129 .nl_u = {
130 .ip6_u = {
131 .daddr = dest->addr.in6,
132 .saddr = {
133 .s6_addr32 =
134 { 0, 0, 0, 0 },
135 },
136 },
137 },
138 };
139
140 rt = (struct rt6_info *)ip6_route_output(&init_net,
141 NULL, &fl);
142 if (!rt) {
143 spin_unlock(&dest->dst_lock);
144 IP_VS_DBG_RL("ip6_route_output error, "
145 "dest: " NIP6_FMT "\n",
146 NIP6(dest->addr.in6));
147 return NULL;
148 }
149 __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst));
150 IP_VS_DBG(10, "new dst " NIP6_FMT ", refcnt=%d\n",
151 NIP6(dest->addr.in6),
152 atomic_read(&rt->u.dst.__refcnt));
153 }
154 spin_unlock(&dest->dst_lock);
155 } else {
156 struct flowi fl = {
157 .oif = 0,
158 .nl_u = {
159 .ip6_u = {
160 .daddr = cp->daddr.in6,
161 .saddr = {
162 .s6_addr32 = { 0, 0, 0, 0 },
163 },
164 },
165 },
166 };
167
168 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
169 if (!rt) {
170 IP_VS_DBG_RL("ip6_route_output error, dest: "
171 NIP6_FMT "\n", NIP6(cp->daddr.in6));
172 return NULL;
173 }
174 }
175
176 return rt;
177}
178#endif
179
112 180
113/* 181/*
114 * Release dest->dst_cache before a dest is removed 182 * Release dest->dst_cache before a dest is removed
@@ -123,11 +191,11 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
123 dst_release(old_dst); 191 dst_release(old_dst);
124} 192}
125 193
126#define IP_VS_XMIT(skb, rt) \ 194#define IP_VS_XMIT(pf, skb, rt) \
127do { \ 195do { \
128 (skb)->ipvs_property = 1; \ 196 (skb)->ipvs_property = 1; \
129 skb_forward_csum(skb); \ 197 skb_forward_csum(skb); \
130 NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, (skb), NULL, \ 198 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
131 (rt)->u.dst.dev, dst_output); \ 199 (rt)->u.dst.dev, dst_output); \
132} while (0) 200} while (0)
133 201
@@ -200,7 +268,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
200 /* Another hack: avoid icmp_send in ip_fragment */ 268 /* Another hack: avoid icmp_send in ip_fragment */
201 skb->local_df = 1; 269 skb->local_df = 1;
202 270
203 IP_VS_XMIT(skb, rt); 271 IP_VS_XMIT(PF_INET, skb, rt);
204 272
205 LeaveFunction(10); 273 LeaveFunction(10);
206 return NF_STOLEN; 274 return NF_STOLEN;
@@ -213,6 +281,70 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
213 return NF_STOLEN; 281 return NF_STOLEN;
214} 282}
215 283
284#ifdef CONFIG_IP_VS_IPV6
285int
286ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
287 struct ip_vs_protocol *pp)
288{
289 struct rt6_info *rt; /* Route to the other host */
290 struct ipv6hdr *iph = ipv6_hdr(skb);
291 int mtu;
292 struct flowi fl = {
293 .oif = 0,
294 .nl_u = {
295 .ip6_u = {
296 .daddr = iph->daddr,
297 .saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
298 };
299
300 EnterFunction(10);
301
302 rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
303 if (!rt) {
304 IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, "
305 "dest: " NIP6_FMT "\n", NIP6(iph->daddr));
306 goto tx_error_icmp;
307 }
308
309 /* MTU checking */
310 mtu = dst_mtu(&rt->u.dst);
311 if (skb->len > mtu) {
312 dst_release(&rt->u.dst);
313 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
314 IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): frag needed\n");
315 goto tx_error;
316 }
317
318 /*
319 * Call ip_send_check because we are not sure it is called
320 * after ip_defrag. Is copy-on-write needed?
321 */
322 skb = skb_share_check(skb, GFP_ATOMIC);
323 if (unlikely(skb == NULL)) {
324 dst_release(&rt->u.dst);
325 return NF_STOLEN;
326 }
327
328 /* drop old route */
329 dst_release(skb->dst);
330 skb->dst = &rt->u.dst;
331
332 /* Another hack: avoid icmp_send in ip_fragment */
333 skb->local_df = 1;
334
335 IP_VS_XMIT(PF_INET6, skb, rt);
336
337 LeaveFunction(10);
338 return NF_STOLEN;
339
340 tx_error_icmp:
341 dst_link_failure(skb);
342 tx_error:
343 kfree_skb(skb);
344 LeaveFunction(10);
345 return NF_STOLEN;
346}
347#endif
216 348
217/* 349/*
218 * NAT transmitter (only for outside-to-inside nat forwarding) 350 * NAT transmitter (only for outside-to-inside nat forwarding)
@@ -264,7 +396,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
264 /* mangle the packet */ 396 /* mangle the packet */
265 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 397 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
266 goto tx_error; 398 goto tx_error;
267 ip_hdr(skb)->daddr = cp->daddr; 399 ip_hdr(skb)->daddr = cp->daddr.ip;
268 ip_send_check(ip_hdr(skb)); 400 ip_send_check(ip_hdr(skb));
269 401
270 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); 402 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
@@ -276,7 +408,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
276 /* Another hack: avoid icmp_send in ip_fragment */ 408 /* Another hack: avoid icmp_send in ip_fragment */
277 skb->local_df = 1; 409 skb->local_df = 1;
278 410
279 IP_VS_XMIT(skb, rt); 411 IP_VS_XMIT(PF_INET, skb, rt);
280 412
281 LeaveFunction(10); 413 LeaveFunction(10);
282 return NF_STOLEN; 414 return NF_STOLEN;
@@ -292,6 +424,83 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
292 goto tx_error; 424 goto tx_error;
293} 425}
294 426
427#ifdef CONFIG_IP_VS_IPV6
428int
429ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
430 struct ip_vs_protocol *pp)
431{
432 struct rt6_info *rt; /* Route to the other host */
433 int mtu;
434
435 EnterFunction(10);
436
437 /* check if it is a connection of no-client-port */
438 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
439 __be16 _pt, *p;
440 p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
441 sizeof(_pt), &_pt);
442 if (p == NULL)
443 goto tx_error;
444 ip_vs_conn_fill_cport(cp, *p);
445 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
446 }
447
448 rt = __ip_vs_get_out_rt_v6(cp);
449 if (!rt)
450 goto tx_error_icmp;
451
452 /* MTU checking */
453 mtu = dst_mtu(&rt->u.dst);
454 if (skb->len > mtu) {
455 dst_release(&rt->u.dst);
456 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
457 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
458 "ip_vs_nat_xmit_v6(): frag needed for");
459 goto tx_error;
460 }
461
462 /* copy-on-write the packet before mangling it */
463 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
464 goto tx_error_put;
465
466 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
467 goto tx_error_put;
468
469 /* drop old route */
470 dst_release(skb->dst);
471 skb->dst = &rt->u.dst;
472
473 /* mangle the packet */
474 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
475 goto tx_error;
476 ipv6_hdr(skb)->daddr = cp->daddr.in6;
477
478 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
479
480 /* FIXME: when application helper enlarges the packet and the length
481 is larger than the MTU of outgoing device, there will be still
482 MTU problem. */
483
484 /* Another hack: avoid icmp_send in ip_fragment */
485 skb->local_df = 1;
486
487 IP_VS_XMIT(PF_INET6, skb, rt);
488
489 LeaveFunction(10);
490 return NF_STOLEN;
491
492tx_error_icmp:
493 dst_link_failure(skb);
494tx_error:
495 LeaveFunction(10);
496 kfree_skb(skb);
497 return NF_STOLEN;
498tx_error_put:
499 dst_release(&rt->u.dst);
500 goto tx_error;
501}
502#endif
503
295 504
296/* 505/*
297 * IP Tunneling transmitter 506 * IP Tunneling transmitter
@@ -423,6 +632,112 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
423 return NF_STOLEN; 632 return NF_STOLEN;
424} 633}
425 634
635#ifdef CONFIG_IP_VS_IPV6
636int
637ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
638 struct ip_vs_protocol *pp)
639{
640 struct rt6_info *rt; /* Route to the other host */
641 struct net_device *tdev; /* Device to other host */
642 struct ipv6hdr *old_iph = ipv6_hdr(skb);
643 sk_buff_data_t old_transport_header = skb->transport_header;
644 struct ipv6hdr *iph; /* Our new IP header */
645 unsigned int max_headroom; /* The extra header space needed */
646 int mtu;
647
648 EnterFunction(10);
649
650 if (skb->protocol != htons(ETH_P_IPV6)) {
651 IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): protocol error, "
652 "ETH_P_IPV6: %d, skb protocol: %d\n",
653 htons(ETH_P_IPV6), skb->protocol);
654 goto tx_error;
655 }
656
657 rt = __ip_vs_get_out_rt_v6(cp);
658 if (!rt)
659 goto tx_error_icmp;
660
661 tdev = rt->u.dst.dev;
662
663 mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr);
664 /* TODO IPv6: do we need this check in IPv6? */
665 if (mtu < 1280) {
666 dst_release(&rt->u.dst);
667 IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n");
668 goto tx_error;
669 }
670 if (skb->dst)
671 skb->dst->ops->update_pmtu(skb->dst, mtu);
672
673 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
674 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
675 dst_release(&rt->u.dst);
676 IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): frag needed\n");
677 goto tx_error;
678 }
679
680 /*
681 * Okay, now see if we can stuff it in the buffer as-is.
682 */
683 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
684
685 if (skb_headroom(skb) < max_headroom
686 || skb_cloned(skb) || skb_shared(skb)) {
687 struct sk_buff *new_skb =
688 skb_realloc_headroom(skb, max_headroom);
689 if (!new_skb) {
690 dst_release(&rt->u.dst);
691 kfree_skb(skb);
692 IP_VS_ERR_RL("ip_vs_tunnel_xmit_v6(): no memory\n");
693 return NF_STOLEN;
694 }
695 kfree_skb(skb);
696 skb = new_skb;
697 old_iph = ipv6_hdr(skb);
698 }
699
700 skb->transport_header = old_transport_header;
701
702 skb_push(skb, sizeof(struct ipv6hdr));
703 skb_reset_network_header(skb);
704 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
705
706 /* drop old route */
707 dst_release(skb->dst);
708 skb->dst = &rt->u.dst;
709
710 /*
711 * Push down and install the IPIP header.
712 */
713 iph = ipv6_hdr(skb);
714 iph->version = 6;
715 iph->nexthdr = IPPROTO_IPV6;
716 iph->payload_len = old_iph->payload_len + sizeof(old_iph);
717 iph->priority = old_iph->priority;
718 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
719 iph->daddr = rt->rt6i_dst.addr;
720 iph->saddr = cp->vaddr.in6; /* rt->rt6i_src.addr; */
721 iph->hop_limit = old_iph->hop_limit;
722
723 /* Another hack: avoid icmp_send in ip_fragment */
724 skb->local_df = 1;
725
726 ip6_local_out(skb);
727
728 LeaveFunction(10);
729
730 return NF_STOLEN;
731
732tx_error_icmp:
733 dst_link_failure(skb);
734tx_error:
735 kfree_skb(skb);
736 LeaveFunction(10);
737 return NF_STOLEN;
738}
739#endif
740
426 741
427/* 742/*
428 * Direct Routing transmitter 743 * Direct Routing transmitter
@@ -467,7 +782,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
467 /* Another hack: avoid icmp_send in ip_fragment */ 782 /* Another hack: avoid icmp_send in ip_fragment */
468 skb->local_df = 1; 783 skb->local_df = 1;
469 784
470 IP_VS_XMIT(skb, rt); 785 IP_VS_XMIT(PF_INET, skb, rt);
471 786
472 LeaveFunction(10); 787 LeaveFunction(10);
473 return NF_STOLEN; 788 return NF_STOLEN;
@@ -480,6 +795,60 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
480 return NF_STOLEN; 795 return NF_STOLEN;
481} 796}
482 797
798#ifdef CONFIG_IP_VS_IPV6
799int
800ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
801 struct ip_vs_protocol *pp)
802{
803 struct rt6_info *rt; /* Route to the other host */
804 int mtu;
805
806 EnterFunction(10);
807
808 rt = __ip_vs_get_out_rt_v6(cp);
809 if (!rt)
810 goto tx_error_icmp;
811
812 /* MTU checking */
813 mtu = dst_mtu(&rt->u.dst);
814 if (skb->len > mtu) {
815 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
816 dst_release(&rt->u.dst);
817 IP_VS_DBG_RL("ip_vs_dr_xmit_v6(): frag needed\n");
818 goto tx_error;
819 }
820
821 /*
822 * Call ip_send_check because we are not sure it is called
823 * after ip_defrag. Is copy-on-write needed?
824 */
825 skb = skb_share_check(skb, GFP_ATOMIC);
826 if (unlikely(skb == NULL)) {
827 dst_release(&rt->u.dst);
828 return NF_STOLEN;
829 }
830
831 /* drop old route */
832 dst_release(skb->dst);
833 skb->dst = &rt->u.dst;
834
835 /* Another hack: avoid icmp_send in ip_fragment */
836 skb->local_df = 1;
837
838 IP_VS_XMIT(PF_INET6, skb, rt);
839
840 LeaveFunction(10);
841 return NF_STOLEN;
842
843tx_error_icmp:
844 dst_link_failure(skb);
845tx_error:
846 kfree_skb(skb);
847 LeaveFunction(10);
848 return NF_STOLEN;
849}
850#endif
851
483 852
484/* 853/*
485 * ICMP packet transmitter 854 * ICMP packet transmitter
@@ -540,7 +909,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
540 /* Another hack: avoid icmp_send in ip_fragment */ 909 /* Another hack: avoid icmp_send in ip_fragment */
541 skb->local_df = 1; 910 skb->local_df = 1;
542 911
543 IP_VS_XMIT(skb, rt); 912 IP_VS_XMIT(PF_INET, skb, rt);
544 913
545 rc = NF_STOLEN; 914 rc = NF_STOLEN;
546 goto out; 915 goto out;
@@ -557,3 +926,79 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
557 ip_rt_put(rt); 926 ip_rt_put(rt);
558 goto tx_error; 927 goto tx_error;
559} 928}
929
930#ifdef CONFIG_IP_VS_IPV6
931int
932ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
933 struct ip_vs_protocol *pp, int offset)
934{
935 struct rt6_info *rt; /* Route to the other host */
936 int mtu;
937 int rc;
938
939 EnterFunction(10);
940
941 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
942 forwarded directly here, because there is no need to
943 translate address/port back */
944 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
945 if (cp->packet_xmit)
946 rc = cp->packet_xmit(skb, cp, pp);
947 else
948 rc = NF_ACCEPT;
949 /* do not touch skb anymore */
950 atomic_inc(&cp->in_pkts);
951 goto out;
952 }
953
954 /*
955 * mangle and send the packet here (only for VS/NAT)
956 */
957
958 rt = __ip_vs_get_out_rt_v6(cp);
959 if (!rt)
960 goto tx_error_icmp;
961
962 /* MTU checking */
963 mtu = dst_mtu(&rt->u.dst);
964 if (skb->len > mtu) {
965 dst_release(&rt->u.dst);
966 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
967 IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
968 goto tx_error;
969 }
970
971 /* copy-on-write the packet before mangling it */
972 if (!skb_make_writable(skb, offset))
973 goto tx_error_put;
974
975 if (skb_cow(skb, rt->u.dst.dev->hard_header_len))
976 goto tx_error_put;
977
978 /* drop the old route when skb is not shared */
979 dst_release(skb->dst);
980 skb->dst = &rt->u.dst;
981
982 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
983
984 /* Another hack: avoid icmp_send in ip_fragment */
985 skb->local_df = 1;
986
987 IP_VS_XMIT(PF_INET6, skb, rt);
988
989 rc = NF_STOLEN;
990 goto out;
991
992tx_error_icmp:
993 dst_link_failure(skb);
994tx_error:
995 dev_kfree_skb(skb);
996 rc = NF_STOLEN;
997out:
998 LeaveFunction(10);
999 return rc;
1000tx_error_put:
1001 dst_release(&rt->u.dst);
1002 goto tx_error;
1003}
1004#endif
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6ee5354c9aa1..f62187bb6d08 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -282,6 +282,8 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
282 struct rtable *r = NULL; 282 struct rtable *r = NULL;
283 283
284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) { 284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
285 if (!rt_hash_table[st->bucket].chain)
286 continue;
285 rcu_read_lock_bh(); 287 rcu_read_lock_bh();
286 r = rcu_dereference(rt_hash_table[st->bucket].chain); 288 r = rcu_dereference(rt_hash_table[st->bucket].chain);
287 while (r) { 289 while (r) {
@@ -299,11 +301,14 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
299 struct rtable *r) 301 struct rtable *r)
300{ 302{
301 struct rt_cache_iter_state *st = seq->private; 303 struct rt_cache_iter_state *st = seq->private;
304
302 r = r->u.dst.rt_next; 305 r = r->u.dst.rt_next;
303 while (!r) { 306 while (!r) {
304 rcu_read_unlock_bh(); 307 rcu_read_unlock_bh();
305 if (--st->bucket < 0) 308 do {
306 break; 309 if (--st->bucket < 0)
310 return NULL;
311 } while (!rt_hash_table[st->bucket].chain);
307 rcu_read_lock_bh(); 312 rcu_read_lock_bh();
308 r = rt_hash_table[st->bucket].chain; 313 r = rt_hash_table[st->bucket].chain;
309 } 314 }
@@ -2840,7 +2845,9 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2840 if (s_h < 0) 2845 if (s_h < 0)
2841 s_h = 0; 2846 s_h = 0;
2842 s_idx = idx = cb->args[1]; 2847 s_idx = idx = cb->args[1];
2843 for (h = s_h; h <= rt_hash_mask; h++) { 2848 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2849 if (!rt_hash_table[h].chain)
2850 continue;
2844 rcu_read_lock_bh(); 2851 rcu_read_lock_bh();
2845 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt; 2852 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2846 rt = rcu_dereference(rt->u.dst.rt_next), idx++) { 2853 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
@@ -2859,7 +2866,6 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2859 dst_release(xchg(&skb->dst, NULL)); 2866 dst_release(xchg(&skb->dst, NULL));
2860 } 2867 }
2861 rcu_read_unlock_bh(); 2868 rcu_read_unlock_bh();
2862 s_idx = 0;
2863 } 2869 }
2864 2870
2865done: 2871done:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 67ccce2a96bd..3b76bce769dd 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -979,6 +979,39 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
979 } 979 }
980} 980}
981 981
982/* This must be called before lost_out is incremented */
983static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
984{
985 if ((tp->retransmit_skb_hint == NULL) ||
986 before(TCP_SKB_CB(skb)->seq,
987 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
988 tp->retransmit_skb_hint = skb;
989
990 if (!tp->lost_out ||
991 after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
992 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
993}
994
995static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
996{
997 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
998 tcp_verify_retransmit_hint(tp, skb);
999
1000 tp->lost_out += tcp_skb_pcount(skb);
1001 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1002 }
1003}
1004
1005void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
1006{
1007 tcp_verify_retransmit_hint(tp, skb);
1008
1009 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1010 tp->lost_out += tcp_skb_pcount(skb);
1011 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1012 }
1013}
1014
982/* This procedure tags the retransmission queue when SACKs arrive. 1015/* This procedure tags the retransmission queue when SACKs arrive.
983 * 1016 *
984 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). 1017 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
@@ -1155,13 +1188,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1155 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1188 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1156 tp->retrans_out -= tcp_skb_pcount(skb); 1189 tp->retrans_out -= tcp_skb_pcount(skb);
1157 1190
1158 /* clear lost hint */ 1191 tcp_skb_mark_lost_uncond_verify(tp, skb);
1159 tp->retransmit_skb_hint = NULL;
1160
1161 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1162 tp->lost_out += tcp_skb_pcount(skb);
1163 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1164 }
1165 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT); 1192 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1166 } else { 1193 } else {
1167 if (before(ack_seq, new_low_seq)) 1194 if (before(ack_seq, new_low_seq))
@@ -1271,9 +1298,6 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1271 ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); 1298 ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1272 tp->lost_out -= tcp_skb_pcount(skb); 1299 tp->lost_out -= tcp_skb_pcount(skb);
1273 tp->retrans_out -= tcp_skb_pcount(skb); 1300 tp->retrans_out -= tcp_skb_pcount(skb);
1274
1275 /* clear lost hint */
1276 tp->retransmit_skb_hint = NULL;
1277 } 1301 }
1278 } else { 1302 } else {
1279 if (!(sacked & TCPCB_RETRANS)) { 1303 if (!(sacked & TCPCB_RETRANS)) {
@@ -1292,9 +1316,6 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1292 if (sacked & TCPCB_LOST) { 1316 if (sacked & TCPCB_LOST) {
1293 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; 1317 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1294 tp->lost_out -= tcp_skb_pcount(skb); 1318 tp->lost_out -= tcp_skb_pcount(skb);
1295
1296 /* clear lost hint */
1297 tp->retransmit_skb_hint = NULL;
1298 } 1319 }
1299 } 1320 }
1300 1321
@@ -1324,7 +1345,6 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1324 if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) { 1345 if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) {
1325 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1346 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1326 tp->retrans_out -= tcp_skb_pcount(skb); 1347 tp->retrans_out -= tcp_skb_pcount(skb);
1327 tp->retransmit_skb_hint = NULL;
1328 } 1348 }
1329 1349
1330 return flag; 1350 return flag;
@@ -1726,6 +1746,8 @@ int tcp_use_frto(struct sock *sk)
1726 return 0; 1746 return 0;
1727 1747
1728 skb = tcp_write_queue_head(sk); 1748 skb = tcp_write_queue_head(sk);
1749 if (tcp_skb_is_last(sk, skb))
1750 return 1;
1729 skb = tcp_write_queue_next(sk, skb); /* Skips head */ 1751 skb = tcp_write_queue_next(sk, skb); /* Skips head */
1730 tcp_for_write_queue_from(skb, sk) { 1752 tcp_for_write_queue_from(skb, sk) {
1731 if (skb == tcp_send_head(sk)) 1753 if (skb == tcp_send_head(sk))
@@ -1867,6 +1889,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1867 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { 1889 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1868 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1890 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1869 tp->lost_out += tcp_skb_pcount(skb); 1891 tp->lost_out += tcp_skb_pcount(skb);
1892 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
1870 } 1893 }
1871 } 1894 }
1872 tcp_verify_left_out(tp); 1895 tcp_verify_left_out(tp);
@@ -1883,7 +1906,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1883 tp->high_seq = tp->snd_nxt; 1906 tp->high_seq = tp->snd_nxt;
1884 TCP_ECN_queue_cwr(tp); 1907 TCP_ECN_queue_cwr(tp);
1885 1908
1886 tcp_clear_retrans_hints_partial(tp); 1909 tcp_clear_all_retrans_hints(tp);
1887} 1910}
1888 1911
1889static void tcp_clear_retrans_partial(struct tcp_sock *tp) 1912static void tcp_clear_retrans_partial(struct tcp_sock *tp)
@@ -1934,12 +1957,11 @@ void tcp_enter_loss(struct sock *sk, int how)
1934 /* Push undo marker, if it was plain RTO and nothing 1957 /* Push undo marker, if it was plain RTO and nothing
1935 * was retransmitted. */ 1958 * was retransmitted. */
1936 tp->undo_marker = tp->snd_una; 1959 tp->undo_marker = tp->snd_una;
1937 tcp_clear_retrans_hints_partial(tp);
1938 } else { 1960 } else {
1939 tp->sacked_out = 0; 1961 tp->sacked_out = 0;
1940 tp->fackets_out = 0; 1962 tp->fackets_out = 0;
1941 tcp_clear_all_retrans_hints(tp);
1942 } 1963 }
1964 tcp_clear_all_retrans_hints(tp);
1943 1965
1944 tcp_for_write_queue(skb, sk) { 1966 tcp_for_write_queue(skb, sk) {
1945 if (skb == tcp_send_head(sk)) 1967 if (skb == tcp_send_head(sk))
@@ -1952,6 +1974,7 @@ void tcp_enter_loss(struct sock *sk, int how)
1952 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; 1974 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
1953 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1975 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1954 tp->lost_out += tcp_skb_pcount(skb); 1976 tp->lost_out += tcp_skb_pcount(skb);
1977 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
1955 } 1978 }
1956 } 1979 }
1957 tcp_verify_left_out(tp); 1980 tcp_verify_left_out(tp);
@@ -2157,19 +2180,6 @@ static int tcp_time_to_recover(struct sock *sk)
2157 return 0; 2180 return 0;
2158} 2181}
2159 2182
2160/* RFC: This is from the original, I doubt that this is necessary at all:
2161 * clear xmit_retrans hint if seq of this skb is beyond hint. How could we
2162 * retransmitted past LOST markings in the first place? I'm not fully sure
2163 * about undo and end of connection cases, which can cause R without L?
2164 */
2165static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
2166{
2167 if ((tp->retransmit_skb_hint != NULL) &&
2168 before(TCP_SKB_CB(skb)->seq,
2169 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
2170 tp->retransmit_skb_hint = NULL;
2171}
2172
2173/* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2183/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
2174 * is against sacked "cnt", otherwise it's against facked "cnt" 2184 * is against sacked "cnt", otherwise it's against facked "cnt"
2175 */ 2185 */
@@ -2217,11 +2227,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
2217 cnt = packets; 2227 cnt = packets;
2218 } 2228 }
2219 2229
2220 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 2230 tcp_skb_mark_lost(tp, skb);
2221 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
2222 tp->lost_out += tcp_skb_pcount(skb);
2223 tcp_verify_retransmit_hint(tp, skb);
2224 }
2225 } 2231 }
2226 tcp_verify_left_out(tp); 2232 tcp_verify_left_out(tp);
2227} 2233}
@@ -2263,11 +2269,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2263 if (!tcp_skb_timedout(sk, skb)) 2269 if (!tcp_skb_timedout(sk, skb))
2264 break; 2270 break;
2265 2271
2266 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 2272 tcp_skb_mark_lost(tp, skb);
2267 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
2268 tp->lost_out += tcp_skb_pcount(skb);
2269 tcp_verify_retransmit_hint(tp, skb);
2270 }
2271 } 2273 }
2272 2274
2273 tp->scoreboard_skb_hint = skb; 2275 tp->scoreboard_skb_hint = skb;
@@ -2378,10 +2380,6 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
2378 } 2380 }
2379 tcp_moderate_cwnd(tp); 2381 tcp_moderate_cwnd(tp);
2380 tp->snd_cwnd_stamp = tcp_time_stamp; 2382 tp->snd_cwnd_stamp = tcp_time_stamp;
2381
2382 /* There is something screwy going on with the retrans hints after
2383 an undo */
2384 tcp_clear_all_retrans_hints(tp);
2385} 2383}
2386 2384
2387static inline int tcp_may_undo(struct tcp_sock *tp) 2385static inline int tcp_may_undo(struct tcp_sock *tp)
@@ -2848,6 +2846,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2848 int flag = 0; 2846 int flag = 0;
2849 u32 pkts_acked = 0; 2847 u32 pkts_acked = 0;
2850 u32 reord = tp->packets_out; 2848 u32 reord = tp->packets_out;
2849 u32 prior_sacked = tp->sacked_out;
2851 s32 seq_rtt = -1; 2850 s32 seq_rtt = -1;
2852 s32 ca_seq_rtt = -1; 2851 s32 ca_seq_rtt = -1;
2853 ktime_t last_ackt = net_invalid_timestamp(); 2852 ktime_t last_ackt = net_invalid_timestamp();
@@ -2929,7 +2928,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2929 2928
2930 tcp_unlink_write_queue(skb, sk); 2929 tcp_unlink_write_queue(skb, sk);
2931 sk_wmem_free_skb(sk, skb); 2930 sk_wmem_free_skb(sk, skb);
2932 tcp_clear_all_retrans_hints(tp); 2931 tp->scoreboard_skb_hint = NULL;
2932 if (skb == tp->retransmit_skb_hint)
2933 tp->retransmit_skb_hint = NULL;
2934 if (skb == tp->lost_skb_hint)
2935 tp->lost_skb_hint = NULL;
2933 } 2936 }
2934 2937
2935 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2938 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
@@ -2948,6 +2951,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets)
2948 /* Non-retransmitted hole got filled? That's reordering */ 2951 /* Non-retransmitted hole got filled? That's reordering */
2949 if (reord < prior_fackets) 2952 if (reord < prior_fackets)
2950 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 2953 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
2954
2955 /* No need to care for underflows here because
2956 * the lost_skb_hint gets NULLed if we're past it
2957 * (or something non-trivial happened)
2958 */
2959 if (tcp_is_fack(tp))
2960 tp->lost_cnt_hint -= pkts_acked;
2961 else
2962 tp->lost_cnt_hint -= prior_sacked - tp->sacked_out;
2951 } 2963 }
2952 2964
2953 tp->fackets_out -= min(pkts_acked, tp->fackets_out); 2965 tp->fackets_out -= min(pkts_acked, tp->fackets_out);
@@ -3442,6 +3454,22 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3442 } 3454 }
3443} 3455}
3444 3456
3457static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
3458{
3459 __be32 *ptr = (__be32 *)(th + 1);
3460
3461 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3462 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
3463 tp->rx_opt.saw_tstamp = 1;
3464 ++ptr;
3465 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3466 ++ptr;
3467 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3468 return 1;
3469 }
3470 return 0;
3471}
3472
3445/* Fast parse options. This hopes to only see timestamps. 3473/* Fast parse options. This hopes to only see timestamps.
3446 * If it is wrong it falls back on tcp_parse_options(). 3474 * If it is wrong it falls back on tcp_parse_options().
3447 */ 3475 */
@@ -3453,16 +3481,8 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3453 return 0; 3481 return 0;
3454 } else if (tp->rx_opt.tstamp_ok && 3482 } else if (tp->rx_opt.tstamp_ok &&
3455 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) { 3483 th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
3456 __be32 *ptr = (__be32 *)(th + 1); 3484 if (tcp_parse_aligned_timestamp(tp, th))
3457 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3458 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
3459 tp->rx_opt.saw_tstamp = 1;
3460 ++ptr;
3461 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3462 ++ptr;
3463 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3464 return 1; 3485 return 1;
3465 }
3466 } 3486 }
3467 tcp_parse_options(skb, &tp->rx_opt, 1); 3487 tcp_parse_options(skb, &tp->rx_opt, 1);
3468 return 1; 3488 return 1;
@@ -4138,7 +4158,7 @@ drop:
4138 skb1 = skb1->prev; 4158 skb1 = skb1->prev;
4139 } 4159 }
4140 } 4160 }
4141 __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue); 4161 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4142 4162
4143 /* And clean segments covered by new one as whole. */ 4163 /* And clean segments covered by new one as whole. */
4144 while ((skb1 = skb->next) != 4164 while ((skb1 = skb->next) !=
@@ -4161,6 +4181,18 @@ add_sack:
4161 } 4181 }
4162} 4182}
4163 4183
4184static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4185 struct sk_buff_head *list)
4186{
4187 struct sk_buff *next = skb->next;
4188
4189 __skb_unlink(skb, list);
4190 __kfree_skb(skb);
4191 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4192
4193 return next;
4194}
4195
4164/* Collapse contiguous sequence of skbs head..tail with 4196/* Collapse contiguous sequence of skbs head..tail with
4165 * sequence numbers start..end. 4197 * sequence numbers start..end.
4166 * Segments with FIN/SYN are not collapsed (only because this 4198 * Segments with FIN/SYN are not collapsed (only because this
@@ -4178,11 +4210,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4178 for (skb = head; skb != tail;) { 4210 for (skb = head; skb != tail;) {
4179 /* No new bits? It is possible on ofo queue. */ 4211 /* No new bits? It is possible on ofo queue. */
4180 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4212 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4181 struct sk_buff *next = skb->next; 4213 skb = tcp_collapse_one(sk, skb, list);
4182 __skb_unlink(skb, list);
4183 __kfree_skb(skb);
4184 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4185 skb = next;
4186 continue; 4214 continue;
4187 } 4215 }
4188 4216
@@ -4228,7 +4256,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4228 memcpy(nskb->head, skb->head, header); 4256 memcpy(nskb->head, skb->head, header);
4229 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 4257 memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
4230 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; 4258 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
4231 __skb_insert(nskb, skb->prev, skb, list); 4259 __skb_queue_before(list, skb, nskb);
4232 skb_set_owner_r(nskb, sk); 4260 skb_set_owner_r(nskb, sk);
4233 4261
4234 /* Copy data, releasing collapsed skbs. */ 4262 /* Copy data, releasing collapsed skbs. */
@@ -4246,11 +4274,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4246 start += size; 4274 start += size;
4247 } 4275 }
4248 if (!before(start, TCP_SKB_CB(skb)->end_seq)) { 4276 if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4249 struct sk_buff *next = skb->next; 4277 skb = tcp_collapse_one(sk, skb, list);
4250 __skb_unlink(skb, list);
4251 __kfree_skb(skb);
4252 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4253 skb = next;
4254 if (skb == tail || 4278 if (skb == tail ||
4255 tcp_hdr(skb)->syn || 4279 tcp_hdr(skb)->syn ||
4256 tcp_hdr(skb)->fin) 4280 tcp_hdr(skb)->fin)
@@ -4691,6 +4715,67 @@ out:
4691} 4715}
4692#endif /* CONFIG_NET_DMA */ 4716#endif /* CONFIG_NET_DMA */
4693 4717
4718/* Does PAWS and seqno based validation of an incoming segment, flags will
4719 * play significant role here.
4720 */
4721static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
4722 struct tcphdr *th, int syn_inerr)
4723{
4724 struct tcp_sock *tp = tcp_sk(sk);
4725
4726 /* RFC1323: H1. Apply PAWS check first. */
4727 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4728 tcp_paws_discard(sk, skb)) {
4729 if (!th->rst) {
4730 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4731 tcp_send_dupack(sk, skb);
4732 goto discard;
4733 }
4734 /* Reset is accepted even if it did not pass PAWS. */
4735 }
4736
4737 /* Step 1: check sequence number */
4738 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
4739 /* RFC793, page 37: "In all states except SYN-SENT, all reset
4740 * (RST) segments are validated by checking their SEQ-fields."
4741 * And page 69: "If an incoming segment is not acceptable,
4742 * an acknowledgment should be sent in reply (unless the RST
4743 * bit is set, if so drop the segment and return)".
4744 */
4745 if (!th->rst)
4746 tcp_send_dupack(sk, skb);
4747 goto discard;
4748 }
4749
4750 /* Step 2: check RST bit */
4751 if (th->rst) {
4752 tcp_reset(sk);
4753 goto discard;
4754 }
4755
4756 /* ts_recent update must be made after we are sure that the packet
4757 * is in window.
4758 */
4759 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4760
4761 /* step 3: check security and precedence [ignored] */
4762
4763 /* step 4: Check for a SYN in window. */
4764 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4765 if (syn_inerr)
4766 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4767 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4768 tcp_reset(sk);
4769 return -1;
4770 }
4771
4772 return 1;
4773
4774discard:
4775 __kfree_skb(skb);
4776 return 0;
4777}
4778
4694/* 4779/*
4695 * TCP receive function for the ESTABLISHED state. 4780 * TCP receive function for the ESTABLISHED state.
4696 * 4781 *
@@ -4718,6 +4803,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4718 struct tcphdr *th, unsigned len) 4803 struct tcphdr *th, unsigned len)
4719{ 4804{
4720 struct tcp_sock *tp = tcp_sk(sk); 4805 struct tcp_sock *tp = tcp_sk(sk);
4806 int res;
4721 4807
4722 /* 4808 /*
4723 * Header prediction. 4809 * Header prediction.
@@ -4756,19 +4842,10 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
4756 4842
4757 /* Check timestamp */ 4843 /* Check timestamp */
4758 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { 4844 if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
4759 __be32 *ptr = (__be32 *)(th + 1);
4760
4761 /* No? Slow path! */ 4845 /* No? Slow path! */
4762 if (*ptr != htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 4846 if (!tcp_parse_aligned_timestamp(tp, th))
4763 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
4764 goto slow_path; 4847 goto slow_path;
4765 4848
4766 tp->rx_opt.saw_tstamp = 1;
4767 ++ptr;
4768 tp->rx_opt.rcv_tsval = ntohl(*ptr);
4769 ++ptr;
4770 tp->rx_opt.rcv_tsecr = ntohl(*ptr);
4771
4772 /* If PAWS failed, check it more carefully in slow path */ 4849 /* If PAWS failed, check it more carefully in slow path */
4773 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) 4850 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
4774 goto slow_path; 4851 goto slow_path;
@@ -4899,51 +4976,12 @@ slow_path:
4899 goto csum_error; 4976 goto csum_error;
4900 4977
4901 /* 4978 /*
4902 * RFC1323: H1. Apply PAWS check first.
4903 */
4904 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
4905 tcp_paws_discard(sk, skb)) {
4906 if (!th->rst) {
4907 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
4908 tcp_send_dupack(sk, skb);
4909 goto discard;
4910 }
4911 /* Resets are accepted even if PAWS failed.
4912
4913 ts_recent update must be made after we are sure
4914 that the packet is in window.
4915 */
4916 }
4917
4918 /*
4919 * Standard slow path. 4979 * Standard slow path.
4920 */ 4980 */
4921 4981
4922 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) { 4982 res = tcp_validate_incoming(sk, skb, th, 1);
4923 /* RFC793, page 37: "In all states except SYN-SENT, all reset 4983 if (res <= 0)
4924 * (RST) segments are validated by checking their SEQ-fields." 4984 return -res;
4925 * And page 69: "If an incoming segment is not acceptable,
4926 * an acknowledgment should be sent in reply (unless the RST bit
4927 * is set, if so drop the segment and return)".
4928 */
4929 if (!th->rst)
4930 tcp_send_dupack(sk, skb);
4931 goto discard;
4932 }
4933
4934 if (th->rst) {
4935 tcp_reset(sk);
4936 goto discard;
4937 }
4938
4939 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4940
4941 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4942 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
4943 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
4944 tcp_reset(sk);
4945 return 1;
4946 }
4947 4985
4948step5: 4986step5:
4949 if (th->ack) 4987 if (th->ack)
@@ -5225,6 +5263,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5225 struct tcp_sock *tp = tcp_sk(sk); 5263 struct tcp_sock *tp = tcp_sk(sk);
5226 struct inet_connection_sock *icsk = inet_csk(sk); 5264 struct inet_connection_sock *icsk = inet_csk(sk);
5227 int queued = 0; 5265 int queued = 0;
5266 int res;
5228 5267
5229 tp->rx_opt.saw_tstamp = 0; 5268 tp->rx_opt.saw_tstamp = 0;
5230 5269
@@ -5277,42 +5316,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5277 return 0; 5316 return 0;
5278 } 5317 }
5279 5318
5280 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 5319 res = tcp_validate_incoming(sk, skb, th, 0);
5281 tcp_paws_discard(sk, skb)) { 5320 if (res <= 0)
5282 if (!th->rst) { 5321 return -res;
5283 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5284 tcp_send_dupack(sk, skb);
5285 goto discard;
5286 }
5287 /* Reset is accepted even if it did not pass PAWS. */
5288 }
5289
5290 /* step 1: check sequence number */
5291 if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
5292 if (!th->rst)
5293 tcp_send_dupack(sk, skb);
5294 goto discard;
5295 }
5296
5297 /* step 2: check RST bit */
5298 if (th->rst) {
5299 tcp_reset(sk);
5300 goto discard;
5301 }
5302
5303 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
5304
5305 /* step 3: check security and precedence [ignored] */
5306
5307 /* step 4:
5308 *
5309 * Check for a SYN in window.
5310 */
5311 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5312 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5313 tcp_reset(sk);
5314 return 1;
5315 }
5316 5322
5317 /* step 5: check the ACK field */ 5323 /* step 5: check the ACK field */
5318 if (th->ack) { 5324 if (th->ack) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 011478e46c40..d13688e3558d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1364,6 +1364,10 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1364 tcp_mtup_init(newsk); 1364 tcp_mtup_init(newsk);
1365 tcp_sync_mss(newsk, dst_mtu(dst)); 1365 tcp_sync_mss(newsk, dst_mtu(dst));
1366 newtp->advmss = dst_metric(dst, RTAX_ADVMSS); 1366 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1367 if (tcp_sk(sk)->rx_opt.user_mss &&
1368 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1369 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1370
1367 tcp_initialize_rcv_mss(newsk); 1371 tcp_initialize_rcv_mss(newsk);
1368 1372
1369#ifdef CONFIG_TCP_MD5SIG 1373#ifdef CONFIG_TCP_MD5SIG
@@ -1946,6 +1950,12 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1946 return rc; 1950 return rc;
1947} 1951}
1948 1952
1953static inline int empty_bucket(struct tcp_iter_state *st)
1954{
1955 return hlist_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
1956 hlist_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
1957}
1958
1949static void *established_get_first(struct seq_file *seq) 1959static void *established_get_first(struct seq_file *seq)
1950{ 1960{
1951 struct tcp_iter_state* st = seq->private; 1961 struct tcp_iter_state* st = seq->private;
@@ -1958,6 +1968,10 @@ static void *established_get_first(struct seq_file *seq)
1958 struct inet_timewait_sock *tw; 1968 struct inet_timewait_sock *tw;
1959 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); 1969 rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1960 1970
1971 /* Lockless fast path for the common case of empty buckets */
1972 if (empty_bucket(st))
1973 continue;
1974
1961 read_lock_bh(lock); 1975 read_lock_bh(lock);
1962 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { 1976 sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1963 if (sk->sk_family != st->family || 1977 if (sk->sk_family != st->family ||
@@ -2008,13 +2022,15 @@ get_tw:
2008 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2022 read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2009 st->state = TCP_SEQ_STATE_ESTABLISHED; 2023 st->state = TCP_SEQ_STATE_ESTABLISHED;
2010 2024
2011 if (++st->bucket < tcp_hashinfo.ehash_size) { 2025 /* Look for next non empty bucket */
2012 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); 2026 while (++st->bucket < tcp_hashinfo.ehash_size &&
2013 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain); 2027 empty_bucket(st))
2014 } else { 2028 ;
2015 cur = NULL; 2029 if (st->bucket >= tcp_hashinfo.ehash_size)
2016 goto out; 2030 return NULL;
2017 } 2031
2032 read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2033 sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
2018 } else 2034 } else
2019 sk = sk_next(sk); 2035 sk = sk_next(sk);
2020 2036
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8165f5aa8c71..a8499ef3234a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1824,6 +1824,8 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb,
1824 1824
1825 /* changed transmit queue under us so clear hints */ 1825 /* changed transmit queue under us so clear hints */
1826 tcp_clear_retrans_hints_partial(tp); 1826 tcp_clear_retrans_hints_partial(tp);
1827 if (next_skb == tp->retransmit_skb_hint)
1828 tp->retransmit_skb_hint = skb;
1827 1829
1828 sk_wmem_free_skb(sk, next_skb); 1830 sk_wmem_free_skb(sk, next_skb);
1829} 1831}
@@ -1838,7 +1840,7 @@ void tcp_simple_retransmit(struct sock *sk)
1838 struct tcp_sock *tp = tcp_sk(sk); 1840 struct tcp_sock *tp = tcp_sk(sk);
1839 struct sk_buff *skb; 1841 struct sk_buff *skb;
1840 unsigned int mss = tcp_current_mss(sk, 0); 1842 unsigned int mss = tcp_current_mss(sk, 0);
1841 int lost = 0; 1843 u32 prior_lost = tp->lost_out;
1842 1844
1843 tcp_for_write_queue(skb, sk) { 1845 tcp_for_write_queue(skb, sk) {
1844 if (skb == tcp_send_head(sk)) 1846 if (skb == tcp_send_head(sk))
@@ -1849,17 +1851,13 @@ void tcp_simple_retransmit(struct sock *sk)
1849 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1851 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1850 tp->retrans_out -= tcp_skb_pcount(skb); 1852 tp->retrans_out -= tcp_skb_pcount(skb);
1851 } 1853 }
1852 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST)) { 1854 tcp_skb_mark_lost_uncond_verify(tp, skb);
1853 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1854 tp->lost_out += tcp_skb_pcount(skb);
1855 lost = 1;
1856 }
1857 } 1855 }
1858 } 1856 }
1859 1857
1860 tcp_clear_all_retrans_hints(tp); 1858 tcp_clear_retrans_hints_partial(tp);
1861 1859
1862 if (!lost) 1860 if (prior_lost == tp->lost_out)
1863 return; 1861 return;
1864 1862
1865 if (tcp_is_reno(tp)) 1863 if (tcp_is_reno(tp))
@@ -1934,8 +1932,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1934 /* Collapse two adjacent packets if worthwhile and we can. */ 1932 /* Collapse two adjacent packets if worthwhile and we can. */
1935 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && 1933 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
1936 (skb->len < (cur_mss >> 1)) && 1934 (skb->len < (cur_mss >> 1)) &&
1937 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1938 (!tcp_skb_is_last(sk, skb)) && 1935 (!tcp_skb_is_last(sk, skb)) &&
1936 (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) &&
1939 (skb_shinfo(skb)->nr_frags == 0 && 1937 (skb_shinfo(skb)->nr_frags == 0 &&
1940 skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && 1938 skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) &&
1941 (tcp_skb_pcount(skb) == 1 && 1939 (tcp_skb_pcount(skb) == 1 &&
@@ -1996,86 +1994,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1996 return err; 1994 return err;
1997} 1995}
1998 1996
1999/* This gets called after a retransmit timeout, and the initially 1997static int tcp_can_forward_retransmit(struct sock *sk)
2000 * retransmitted data is acknowledged. It tries to continue
2001 * resending the rest of the retransmit queue, until either
2002 * we've sent it all or the congestion window limit is reached.
2003 * If doing SACK, the first ACK which comes back for a timeout
2004 * based retransmit packet might feed us FACK information again.
2005 * If so, we use it to avoid unnecessarily retransmissions.
2006 */
2007void tcp_xmit_retransmit_queue(struct sock *sk)
2008{ 1998{
2009 const struct inet_connection_sock *icsk = inet_csk(sk); 1999 const struct inet_connection_sock *icsk = inet_csk(sk);
2010 struct tcp_sock *tp = tcp_sk(sk); 2000 struct tcp_sock *tp = tcp_sk(sk);
2011 struct sk_buff *skb;
2012 int packet_cnt;
2013
2014 if (tp->retransmit_skb_hint) {
2015 skb = tp->retransmit_skb_hint;
2016 packet_cnt = tp->retransmit_cnt_hint;
2017 } else {
2018 skb = tcp_write_queue_head(sk);
2019 packet_cnt = 0;
2020 }
2021
2022 /* First pass: retransmit lost packets. */
2023 if (tp->lost_out) {
2024 tcp_for_write_queue_from(skb, sk) {
2025 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2026
2027 if (skb == tcp_send_head(sk))
2028 break;
2029 /* we could do better than to assign each time */
2030 tp->retransmit_skb_hint = skb;
2031 tp->retransmit_cnt_hint = packet_cnt;
2032
2033 /* Assume this retransmit will generate
2034 * only one packet for congestion window
2035 * calculation purposes. This works because
2036 * tcp_retransmit_skb() will chop up the
2037 * packet to be MSS sized and all the
2038 * packet counting works out.
2039 */
2040 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2041 return;
2042
2043 if (sacked & TCPCB_LOST) {
2044 if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
2045 int mib_idx;
2046
2047 if (tcp_retransmit_skb(sk, skb)) {
2048 tp->retransmit_skb_hint = NULL;
2049 return;
2050 }
2051 if (icsk->icsk_ca_state != TCP_CA_Loss)
2052 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2053 else
2054 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2055 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2056
2057 if (skb == tcp_write_queue_head(sk))
2058 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2059 inet_csk(sk)->icsk_rto,
2060 TCP_RTO_MAX);
2061 }
2062
2063 packet_cnt += tcp_skb_pcount(skb);
2064 if (packet_cnt >= tp->lost_out)
2065 break;
2066 }
2067 }
2068 }
2069
2070 /* OK, demanded retransmission is finished. */
2071 2001
2072 /* Forward retransmissions are possible only during Recovery. */ 2002 /* Forward retransmissions are possible only during Recovery. */
2073 if (icsk->icsk_ca_state != TCP_CA_Recovery) 2003 if (icsk->icsk_ca_state != TCP_CA_Recovery)
2074 return; 2004 return 0;
2075 2005
2076 /* No forward retransmissions in Reno are possible. */ 2006 /* No forward retransmissions in Reno are possible. */
2077 if (tcp_is_reno(tp)) 2007 if (tcp_is_reno(tp))
2078 return; 2008 return 0;
2079 2009
2080 /* Yeah, we have to make difficult choice between forward transmission 2010 /* Yeah, we have to make difficult choice between forward transmission
2081 * and retransmission... Both ways have their merits... 2011 * and retransmission... Both ways have their merits...
@@ -2086,43 +2016,104 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2086 */ 2016 */
2087 2017
2088 if (tcp_may_send_now(sk)) 2018 if (tcp_may_send_now(sk))
2089 return; 2019 return 0;
2090 2020
2091 /* If nothing is SACKed, highest_sack in the loop won't be valid */ 2021 return 1;
2092 if (!tp->sacked_out) 2022}
2093 return;
2094 2023
2095 if (tp->forward_skb_hint) 2024/* This gets called after a retransmit timeout, and the initially
2096 skb = tp->forward_skb_hint; 2025 * retransmitted data is acknowledged. It tries to continue
2097 else 2026 * resending the rest of the retransmit queue, until either
2027 * we've sent it all or the congestion window limit is reached.
2028 * If doing SACK, the first ACK which comes back for a timeout
2029 * based retransmit packet might feed us FACK information again.
2030 * If so, we use it to avoid unnecessarily retransmissions.
2031 */
2032void tcp_xmit_retransmit_queue(struct sock *sk)
2033{
2034 const struct inet_connection_sock *icsk = inet_csk(sk);
2035 struct tcp_sock *tp = tcp_sk(sk);
2036 struct sk_buff *skb;
2037 struct sk_buff *hole = NULL;
2038 u32 last_lost;
2039 int mib_idx;
2040 int fwd_rexmitting = 0;
2041
2042 if (!tp->lost_out)
2043 tp->retransmit_high = tp->snd_una;
2044
2045 if (tp->retransmit_skb_hint) {
2046 skb = tp->retransmit_skb_hint;
2047 last_lost = TCP_SKB_CB(skb)->end_seq;
2048 if (after(last_lost, tp->retransmit_high))
2049 last_lost = tp->retransmit_high;
2050 } else {
2098 skb = tcp_write_queue_head(sk); 2051 skb = tcp_write_queue_head(sk);
2052 last_lost = tp->snd_una;
2053 }
2099 2054
2055 /* First pass: retransmit lost packets. */
2100 tcp_for_write_queue_from(skb, sk) { 2056 tcp_for_write_queue_from(skb, sk) {
2101 if (skb == tcp_send_head(sk)) 2057 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2102 break;
2103 tp->forward_skb_hint = skb;
2104 2058
2105 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2059 if (skb == tcp_send_head(sk))
2106 break; 2060 break;
2061 /* we could do better than to assign each time */
2062 if (hole == NULL)
2063 tp->retransmit_skb_hint = skb;
2107 2064
2065 /* Assume this retransmit will generate
2066 * only one packet for congestion window
2067 * calculation purposes. This works because
2068 * tcp_retransmit_skb() will chop up the
2069 * packet to be MSS sized and all the
2070 * packet counting works out.
2071 */
2108 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2072 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2109 break; 2073 return;
2074
2075 if (fwd_rexmitting) {
2076begin_fwd:
2077 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2078 break;
2079 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2080
2081 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2082 tp->retransmit_high = last_lost;
2083 if (!tcp_can_forward_retransmit(sk))
2084 break;
2085 /* Backtrack if necessary to non-L'ed skb */
2086 if (hole != NULL) {
2087 skb = hole;
2088 hole = NULL;
2089 }
2090 fwd_rexmitting = 1;
2091 goto begin_fwd;
2110 2092
2111 if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) 2093 } else if (!(sacked & TCPCB_LOST)) {
2094 if (hole == NULL && !(sacked & TCPCB_SACKED_RETRANS))
2095 hole = skb;
2112 continue; 2096 continue;
2113 2097
2114 /* Ok, retransmit it. */ 2098 } else {
2115 if (tcp_retransmit_skb(sk, skb)) { 2099 last_lost = TCP_SKB_CB(skb)->end_seq;
2116 tp->forward_skb_hint = NULL; 2100 if (icsk->icsk_ca_state != TCP_CA_Loss)
2117 break; 2101 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2102 else
2103 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2118 } 2104 }
2119 2105
2106 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2107 continue;
2108
2109 if (tcp_retransmit_skb(sk, skb))
2110 return;
2111 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2112
2120 if (skb == tcp_write_queue_head(sk)) 2113 if (skb == tcp_write_queue_head(sk))
2121 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2114 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2122 inet_csk(sk)->icsk_rto, 2115 inet_csk(sk)->icsk_rto,
2123 TCP_RTO_MAX); 2116 TCP_RTO_MAX);
2124
2125 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS);
2126 } 2117 }
2127} 2118}
2128 2119
@@ -2241,6 +2232,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2241 struct sk_buff *skb; 2232 struct sk_buff *skb;
2242 struct tcp_md5sig_key *md5; 2233 struct tcp_md5sig_key *md5;
2243 __u8 *md5_hash_location; 2234 __u8 *md5_hash_location;
2235 int mss;
2244 2236
2245 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); 2237 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2246 if (skb == NULL) 2238 if (skb == NULL)
@@ -2251,13 +2243,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2251 2243
2252 skb->dst = dst_clone(dst); 2244 skb->dst = dst_clone(dst);
2253 2245
2246 mss = dst_metric(dst, RTAX_ADVMSS);
2247 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2248 mss = tp->rx_opt.user_mss;
2249
2254 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ 2250 if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2255 __u8 rcv_wscale; 2251 __u8 rcv_wscale;
2256 /* Set this up on the first call only */ 2252 /* Set this up on the first call only */
2257 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); 2253 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2258 /* tcp_full_space because it is guaranteed to be the first packet */ 2254 /* tcp_full_space because it is guaranteed to be the first packet */
2259 tcp_select_initial_window(tcp_full_space(sk), 2255 tcp_select_initial_window(tcp_full_space(sk),
2260 dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), 2256 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2261 &req->rcv_wnd, 2257 &req->rcv_wnd,
2262 &req->window_clamp, 2258 &req->window_clamp,
2263 ireq->wscale_ok, 2259 ireq->wscale_ok,
@@ -2267,8 +2263,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2267 2263
2268 memset(&opts, 0, sizeof(opts)); 2264 memset(&opts, 0, sizeof(opts));
2269 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2265 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2270 tcp_header_size = tcp_synack_options(sk, req, 2266 tcp_header_size = tcp_synack_options(sk, req, mss,
2271 dst_metric(dst, RTAX_ADVMSS),
2272 skb, &opts, &md5) + 2267 skb, &opts, &md5) +
2273 sizeof(struct tcphdr); 2268 sizeof(struct tcphdr);
2274 2269
@@ -2342,6 +2337,9 @@ static void tcp_connect_init(struct sock *sk)
2342 if (!tp->window_clamp) 2337 if (!tp->window_clamp)
2343 tp->window_clamp = dst_metric(dst, RTAX_WINDOW); 2338 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2344 tp->advmss = dst_metric(dst, RTAX_ADVMSS); 2339 tp->advmss = dst_metric(dst, RTAX_ADVMSS);
2340 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2341 tp->advmss = tp->rx_opt.user_mss;
2342
2345 tcp_initialize_rcv_mss(sk); 2343 tcp_initialize_rcv_mss(sk);
2346 2344
2347 tcp_select_initial_window(tcp_full_space(sk), 2345 tcp_select_initial_window(tcp_full_space(sk),
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 17c7b098cdb0..64ce3d33d9c6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1050,10 +1050,10 @@ ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1050 } 1050 }
1051 1051
1052 switch (skb->protocol) { 1052 switch (skb->protocol) {
1053 case __constant_htons(ETH_P_IP): 1053 case htons(ETH_P_IP):
1054 ret = ip4ip6_tnl_xmit(skb, dev); 1054 ret = ip4ip6_tnl_xmit(skb, dev);
1055 break; 1055 break;
1056 case __constant_htons(ETH_P_IPV6): 1056 case htons(ETH_P_IPV6):
1057 ret = ip6ip6_tnl_xmit(skb, dev); 1057 ret = ip6ip6_tnl_xmit(skb, dev);
1058 break; 1058 break;
1059 default: 1059 default:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 52d06dd4b817..9967ac7a01a8 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -27,7 +27,6 @@
27#include <linux/ipv6.h> 27#include <linux/ipv6.h>
28#include <linux/icmpv6.h> 28#include <linux/icmpv6.h>
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/jhash.h>
31 30
32#include <net/sock.h> 31#include <net/sock.h>
33#include <net/snmp.h> 32#include <net/snmp.h>
@@ -103,39 +102,12 @@ struct ctl_table nf_ct_ipv6_sysctl_table[] = {
103}; 102};
104#endif 103#endif
105 104
106static unsigned int ip6qhashfn(__be32 id, const struct in6_addr *saddr,
107 const struct in6_addr *daddr)
108{
109 u32 a, b, c;
110
111 a = (__force u32)saddr->s6_addr32[0];
112 b = (__force u32)saddr->s6_addr32[1];
113 c = (__force u32)saddr->s6_addr32[2];
114
115 a += JHASH_GOLDEN_RATIO;
116 b += JHASH_GOLDEN_RATIO;
117 c += nf_frags.rnd;
118 __jhash_mix(a, b, c);
119
120 a += (__force u32)saddr->s6_addr32[3];
121 b += (__force u32)daddr->s6_addr32[0];
122 c += (__force u32)daddr->s6_addr32[1];
123 __jhash_mix(a, b, c);
124
125 a += (__force u32)daddr->s6_addr32[2];
126 b += (__force u32)daddr->s6_addr32[3];
127 c += (__force u32)id;
128 __jhash_mix(a, b, c);
129
130 return c & (INETFRAGS_HASHSZ - 1);
131}
132
133static unsigned int nf_hashfn(struct inet_frag_queue *q) 105static unsigned int nf_hashfn(struct inet_frag_queue *q)
134{ 106{
135 const struct nf_ct_frag6_queue *nq; 107 const struct nf_ct_frag6_queue *nq;
136 108
137 nq = container_of(q, struct nf_ct_frag6_queue, q); 109 nq = container_of(q, struct nf_ct_frag6_queue, q);
138 return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr); 110 return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
139} 111}
140 112
141static void nf_skb_free(struct sk_buff *skb) 113static void nf_skb_free(struct sk_buff *skb)
@@ -209,7 +181,7 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
209 arg.dst = dst; 181 arg.dst = dst;
210 182
211 read_lock_bh(&nf_frags.lock); 183 read_lock_bh(&nf_frags.lock);
212 hash = ip6qhashfn(id, src, dst); 184 hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
213 185
214 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash); 186 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
215 local_bh_enable(); 187 local_bh_enable();
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 89184b576e23..2eeadfa039cb 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -99,8 +99,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
99 * callers should be careful not to use the hash value outside the ipfrag_lock 99 * callers should be careful not to use the hash value outside the ipfrag_lock
100 * as doing so could race with ipfrag_hash_rnd being recalculated. 100 * as doing so could race with ipfrag_hash_rnd being recalculated.
101 */ 101 */
102static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, 102unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
103 struct in6_addr *daddr) 103 const struct in6_addr *daddr, u32 rnd)
104{ 104{
105 u32 a, b, c; 105 u32 a, b, c;
106 106
@@ -110,7 +110,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
110 110
111 a += JHASH_GOLDEN_RATIO; 111 a += JHASH_GOLDEN_RATIO;
112 b += JHASH_GOLDEN_RATIO; 112 b += JHASH_GOLDEN_RATIO;
113 c += ip6_frags.rnd; 113 c += rnd;
114 __jhash_mix(a, b, c); 114 __jhash_mix(a, b, c);
115 115
116 a += (__force u32)saddr->s6_addr32[3]; 116 a += (__force u32)saddr->s6_addr32[3];
@@ -125,13 +125,14 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
125 125
126 return c & (INETFRAGS_HASHSZ - 1); 126 return c & (INETFRAGS_HASHSZ - 1);
127} 127}
128EXPORT_SYMBOL_GPL(inet6_hash_frag);
128 129
129static unsigned int ip6_hashfn(struct inet_frag_queue *q) 130static unsigned int ip6_hashfn(struct inet_frag_queue *q)
130{ 131{
131 struct frag_queue *fq; 132 struct frag_queue *fq;
132 133
133 fq = container_of(q, struct frag_queue, q); 134 fq = container_of(q, struct frag_queue, q);
134 return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr); 135 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
135} 136}
136 137
137int ip6_frag_match(struct inet_frag_queue *q, void *a) 138int ip6_frag_match(struct inet_frag_queue *q, void *a)
@@ -247,7 +248,7 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
247 arg.dst = dst; 248 arg.dst = dst;
248 249
249 read_lock(&ip6_frags.lock); 250 read_lock(&ip6_frags.lock);
250 hash = ip6qhashfn(id, src, dst); 251 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
251 252
252 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 253 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
253 if (q == NULL) 254 if (q == NULL)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 63442a1e741c..f4385a6569c2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1003,6 +1003,25 @@ int icmp6_dst_gc(void)
1003 return more; 1003 return more;
1004} 1004}
1005 1005
1006static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1007 void *arg)
1008{
1009 struct dst_entry *dst, **pprev;
1010
1011 spin_lock_bh(&icmp6_dst_lock);
1012 pprev = &icmp6_dst_gc_list;
1013 while ((dst = *pprev) != NULL) {
1014 struct rt6_info *rt = (struct rt6_info *) dst;
1015 if (func(rt, arg)) {
1016 *pprev = dst->next;
1017 dst_free(dst);
1018 } else {
1019 pprev = &dst->next;
1020 }
1021 }
1022 spin_unlock_bh(&icmp6_dst_lock);
1023}
1024
1006static int ip6_dst_gc(struct dst_ops *ops) 1025static int ip6_dst_gc(struct dst_ops *ops)
1007{ 1026{
1008 unsigned long now = jiffies; 1027 unsigned long now = jiffies;
@@ -1930,6 +1949,7 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
1930 }; 1949 };
1931 1950
1932 fib6_clean_all(net, fib6_ifdown, 0, &adn); 1951 fib6_clean_all(net, fib6_ifdown, 0, &adn);
1952 icmp6_clean_all(fib6_ifdown, &adn);
1933} 1953}
1934 1954
1935struct rt6_mtu_change_arg 1955struct rt6_mtu_change_arg
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 10e22fd48222..df16b68644e7 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1286,7 +1286,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1286 struct request_sock *req, 1286 struct request_sock *req,
1287 struct dst_entry *dst) 1287 struct dst_entry *dst)
1288{ 1288{
1289 struct inet6_request_sock *treq = inet6_rsk(req); 1289 struct inet6_request_sock *treq;
1290 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 1290 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1291 struct tcp6_sock *newtcp6sk; 1291 struct tcp6_sock *newtcp6sk;
1292 struct inet_sock *newinet; 1292 struct inet_sock *newinet;
@@ -1350,6 +1350,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1350 return newsk; 1350 return newsk;
1351 } 1351 }
1352 1352
1353 treq = inet6_rsk(req);
1353 opt = np->opt; 1354 opt = np->opt;
1354 1355
1355 if (sk_acceptq_is_full(sk)) 1356 if (sk_acceptq_is_full(sk))
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 80d693392b0f..8427518e4f20 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -179,19 +179,6 @@ config MAC80211_VERBOSE_MPL_DEBUG
179 179
180 Do not select this option. 180 Do not select this option.
181 181
182config MAC80211_LOWTX_FRAME_DUMP
183 bool "Debug frame dumping"
184 depends on MAC80211_DEBUG_MENU
185 ---help---
186 Selecting this option will cause the stack to
187 print a message for each frame that is handed
188 to the lowlevel driver for transmission. This
189 message includes all MAC addresses and the
190 frame control field.
191
192 If unsure, say N and insert the debugging code
193 you require into the driver you are debugging.
194
195config MAC80211_DEBUG_COUNTERS 182config MAC80211_DEBUG_COUNTERS
196 bool "Extra statistics for TX/RX debugging" 183 bool "Extra statistics for TX/RX debugging"
197 depends on MAC80211_DEBUG_MENU 184 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index a169b0201d61..2dc8f2bff27b 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -7,6 +7,8 @@ mac80211-y := \
7 sta_info.o \ 7 sta_info.o \
8 wep.o \ 8 wep.o \
9 wpa.o \ 9 wpa.o \
10 scan.o \
11 ht.o \
10 mlme.o \ 12 mlme.o \
11 iface.o \ 13 iface.o \
12 rate.o \ 14 rate.o \
@@ -15,6 +17,7 @@ mac80211-y := \
15 aes_ccm.o \ 17 aes_ccm.o \
16 cfg.o \ 18 cfg.o \
17 rx.o \ 19 rx.o \
20 spectmgmt.o \
18 tx.o \ 21 tx.o \
19 key.o \ 22 key.o \
20 util.o \ 23 util.o \
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 297c257864c7..855126a3039d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -17,26 +17,26 @@
17#include "rate.h" 17#include "rate.h"
18#include "mesh.h" 18#include "mesh.h"
19 19
20static enum ieee80211_if_types 20struct ieee80211_hw *wiphy_to_hw(struct wiphy *wiphy)
21nl80211_type_to_mac80211_type(enum nl80211_iftype type) 21{
22 struct ieee80211_local *local = wiphy_priv(wiphy);
23 return &local->hw;
24}
25EXPORT_SYMBOL(wiphy_to_hw);
26
27static bool nl80211_type_check(enum nl80211_iftype type)
22{ 28{
23 switch (type) { 29 switch (type) {
24 case NL80211_IFTYPE_UNSPECIFIED:
25 return IEEE80211_IF_TYPE_STA;
26 case NL80211_IFTYPE_ADHOC: 30 case NL80211_IFTYPE_ADHOC:
27 return IEEE80211_IF_TYPE_IBSS;
28 case NL80211_IFTYPE_STATION: 31 case NL80211_IFTYPE_STATION:
29 return IEEE80211_IF_TYPE_STA;
30 case NL80211_IFTYPE_MONITOR: 32 case NL80211_IFTYPE_MONITOR:
31 return IEEE80211_IF_TYPE_MNTR;
32#ifdef CONFIG_MAC80211_MESH 33#ifdef CONFIG_MAC80211_MESH
33 case NL80211_IFTYPE_MESH_POINT: 34 case NL80211_IFTYPE_MESH_POINT:
34 return IEEE80211_IF_TYPE_MESH_POINT;
35#endif 35#endif
36 case NL80211_IFTYPE_WDS: 36 case NL80211_IFTYPE_WDS:
37 return IEEE80211_IF_TYPE_WDS; 37 return true;
38 default: 38 default:
39 return IEEE80211_IF_TYPE_INVALID; 39 return false;
40 } 40 }
41} 41}
42 42
@@ -45,17 +45,15 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
45 struct vif_params *params) 45 struct vif_params *params)
46{ 46{
47 struct ieee80211_local *local = wiphy_priv(wiphy); 47 struct ieee80211_local *local = wiphy_priv(wiphy);
48 enum ieee80211_if_types itype;
49 struct net_device *dev; 48 struct net_device *dev;
50 struct ieee80211_sub_if_data *sdata; 49 struct ieee80211_sub_if_data *sdata;
51 int err; 50 int err;
52 51
53 itype = nl80211_type_to_mac80211_type(type); 52 if (!nl80211_type_check(type))
54 if (itype == IEEE80211_IF_TYPE_INVALID)
55 return -EINVAL; 53 return -EINVAL;
56 54
57 err = ieee80211_if_add(local, name, &dev, itype, params); 55 err = ieee80211_if_add(local, name, &dev, type, params);
58 if (err || itype != IEEE80211_IF_TYPE_MNTR || !flags) 56 if (err || type != NL80211_IFTYPE_MONITOR || !flags)
59 return err; 57 return err;
60 58
61 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 59 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -66,13 +64,16 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
66static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex) 64static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
67{ 65{
68 struct net_device *dev; 66 struct net_device *dev;
67 struct ieee80211_sub_if_data *sdata;
69 68
70 /* we're under RTNL */ 69 /* we're under RTNL */
71 dev = __dev_get_by_index(&init_net, ifindex); 70 dev = __dev_get_by_index(&init_net, ifindex);
72 if (!dev) 71 if (!dev)
73 return -ENODEV; 72 return -ENODEV;
74 73
75 ieee80211_if_remove(dev); 74 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
75
76 ieee80211_if_remove(sdata);
76 77
77 return 0; 78 return 0;
78} 79}
@@ -81,9 +82,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
81 enum nl80211_iftype type, u32 *flags, 82 enum nl80211_iftype type, u32 *flags,
82 struct vif_params *params) 83 struct vif_params *params)
83{ 84{
84 struct ieee80211_local *local = wiphy_priv(wiphy);
85 struct net_device *dev; 85 struct net_device *dev;
86 enum ieee80211_if_types itype;
87 struct ieee80211_sub_if_data *sdata; 86 struct ieee80211_sub_if_data *sdata;
88 int ret; 87 int ret;
89 88
@@ -92,25 +91,24 @@ static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
92 if (!dev) 91 if (!dev)
93 return -ENODEV; 92 return -ENODEV;
94 93
95 itype = nl80211_type_to_mac80211_type(type); 94 if (!nl80211_type_check(type))
96 if (itype == IEEE80211_IF_TYPE_INVALID)
97 return -EINVAL; 95 return -EINVAL;
98 96
99 if (dev == local->mdev)
100 return -EOPNOTSUPP;
101
102 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 97 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
103 98
104 ret = ieee80211_if_change_type(sdata, itype); 99 ret = ieee80211_if_change_type(sdata, type);
105 if (ret) 100 if (ret)
106 return ret; 101 return ret;
107 102
103 if (netif_running(sdata->dev))
104 return -EBUSY;
105
108 if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len) 106 if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
109 ieee80211_if_sta_set_mesh_id(&sdata->u.sta, 107 ieee80211_sdata_set_mesh_id(sdata,
110 params->mesh_id_len, 108 params->mesh_id_len,
111 params->mesh_id); 109 params->mesh_id);
112 110
113 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || !flags) 111 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags)
114 return 0; 112 return 0;
115 113
116 sdata->u.mntr_flags = *flags; 114 sdata->u.mntr_flags = *flags;
@@ -121,16 +119,12 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
121 u8 key_idx, u8 *mac_addr, 119 u8 key_idx, u8 *mac_addr,
122 struct key_params *params) 120 struct key_params *params)
123{ 121{
124 struct ieee80211_local *local = wiphy_priv(wiphy);
125 struct ieee80211_sub_if_data *sdata; 122 struct ieee80211_sub_if_data *sdata;
126 struct sta_info *sta = NULL; 123 struct sta_info *sta = NULL;
127 enum ieee80211_key_alg alg; 124 enum ieee80211_key_alg alg;
128 struct ieee80211_key *key; 125 struct ieee80211_key *key;
129 int err; 126 int err;
130 127
131 if (dev == local->mdev)
132 return -EOPNOTSUPP;
133
134 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 128 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
135 129
136 switch (params->cipher) { 130 switch (params->cipher) {
@@ -175,14 +169,10 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
175static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, 169static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
176 u8 key_idx, u8 *mac_addr) 170 u8 key_idx, u8 *mac_addr)
177{ 171{
178 struct ieee80211_local *local = wiphy_priv(wiphy);
179 struct ieee80211_sub_if_data *sdata; 172 struct ieee80211_sub_if_data *sdata;
180 struct sta_info *sta; 173 struct sta_info *sta;
181 int ret; 174 int ret;
182 175
183 if (dev == local->mdev)
184 return -EOPNOTSUPP;
185
186 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 176 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
187 177
188 rcu_read_lock(); 178 rcu_read_lock();
@@ -223,7 +213,6 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
223 void (*callback)(void *cookie, 213 void (*callback)(void *cookie,
224 struct key_params *params)) 214 struct key_params *params))
225{ 215{
226 struct ieee80211_local *local = wiphy_priv(wiphy);
227 struct ieee80211_sub_if_data *sdata; 216 struct ieee80211_sub_if_data *sdata;
228 struct sta_info *sta = NULL; 217 struct sta_info *sta = NULL;
229 u8 seq[6] = {0}; 218 u8 seq[6] = {0};
@@ -233,9 +222,6 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
233 u16 iv16; 222 u16 iv16;
234 int err = -ENOENT; 223 int err = -ENOENT;
235 224
236 if (dev == local->mdev)
237 return -EOPNOTSUPP;
238
239 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 225 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
240 226
241 rcu_read_lock(); 227 rcu_read_lock();
@@ -311,12 +297,8 @@ static int ieee80211_config_default_key(struct wiphy *wiphy,
311 struct net_device *dev, 297 struct net_device *dev,
312 u8 key_idx) 298 u8 key_idx)
313{ 299{
314 struct ieee80211_local *local = wiphy_priv(wiphy);
315 struct ieee80211_sub_if_data *sdata; 300 struct ieee80211_sub_if_data *sdata;
316 301
317 if (dev == local->mdev)
318 return -EOPNOTSUPP;
319
320 rcu_read_lock(); 302 rcu_read_lock();
321 303
322 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 304 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -365,7 +347,7 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
365 sta = sta_info_get_by_idx(local, idx, dev); 347 sta = sta_info_get_by_idx(local, idx, dev);
366 if (sta) { 348 if (sta) {
367 ret = 0; 349 ret = 0;
368 memcpy(mac, sta->addr, ETH_ALEN); 350 memcpy(mac, sta->sta.addr, ETH_ALEN);
369 sta_set_sinfo(sta, sinfo); 351 sta_set_sinfo(sta, sinfo);
370 } 352 }
371 353
@@ -497,16 +479,12 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
497static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, 479static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
498 struct beacon_parameters *params) 480 struct beacon_parameters *params)
499{ 481{
500 struct ieee80211_local *local = wiphy_priv(wiphy);
501 struct ieee80211_sub_if_data *sdata; 482 struct ieee80211_sub_if_data *sdata;
502 struct beacon_data *old; 483 struct beacon_data *old;
503 484
504 if (dev == local->mdev)
505 return -EOPNOTSUPP;
506
507 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 485 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
508 486
509 if (sdata->vif.type != IEEE80211_IF_TYPE_AP) 487 if (sdata->vif.type != NL80211_IFTYPE_AP)
510 return -EINVAL; 488 return -EINVAL;
511 489
512 old = sdata->u.ap.beacon; 490 old = sdata->u.ap.beacon;
@@ -520,16 +498,12 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
520static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, 498static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
521 struct beacon_parameters *params) 499 struct beacon_parameters *params)
522{ 500{
523 struct ieee80211_local *local = wiphy_priv(wiphy);
524 struct ieee80211_sub_if_data *sdata; 501 struct ieee80211_sub_if_data *sdata;
525 struct beacon_data *old; 502 struct beacon_data *old;
526 503
527 if (dev == local->mdev)
528 return -EOPNOTSUPP;
529
530 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 504 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
531 505
532 if (sdata->vif.type != IEEE80211_IF_TYPE_AP) 506 if (sdata->vif.type != NL80211_IFTYPE_AP)
533 return -EINVAL; 507 return -EINVAL;
534 508
535 old = sdata->u.ap.beacon; 509 old = sdata->u.ap.beacon;
@@ -542,16 +516,12 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
542 516
543static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) 517static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
544{ 518{
545 struct ieee80211_local *local = wiphy_priv(wiphy);
546 struct ieee80211_sub_if_data *sdata; 519 struct ieee80211_sub_if_data *sdata;
547 struct beacon_data *old; 520 struct beacon_data *old;
548 521
549 if (dev == local->mdev)
550 return -EOPNOTSUPP;
551
552 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 522 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
553 523
554 if (sdata->vif.type != IEEE80211_IF_TYPE_AP) 524 if (sdata->vif.type != NL80211_IFTYPE_AP)
555 return -EINVAL; 525 return -EINVAL;
556 526
557 old = sdata->u.ap.beacon; 527 old = sdata->u.ap.beacon;
@@ -594,7 +564,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
594 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ 564 * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */
595 565
596 memset(msg->da, 0xff, ETH_ALEN); 566 memset(msg->da, 0xff, ETH_ALEN);
597 memcpy(msg->sa, sta->addr, ETH_ALEN); 567 memcpy(msg->sa, sta->sta.addr, ETH_ALEN);
598 msg->len = htons(6); 568 msg->len = htons(6);
599 msg->dsap = 0; 569 msg->dsap = 0;
600 msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ 570 msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */
@@ -649,9 +619,9 @@ static void sta_apply_parameters(struct ieee80211_local *local,
649 */ 619 */
650 620
651 if (params->aid) { 621 if (params->aid) {
652 sta->aid = params->aid; 622 sta->sta.aid = params->aid;
653 if (sta->aid > IEEE80211_MAX_AID) 623 if (sta->sta.aid > IEEE80211_MAX_AID)
654 sta->aid = 0; /* XXX: should this be an error? */ 624 sta->sta.aid = 0; /* XXX: should this be an error? */
655 } 625 }
656 626
657 if (params->listen_interval >= 0) 627 if (params->listen_interval >= 0)
@@ -668,7 +638,12 @@ static void sta_apply_parameters(struct ieee80211_local *local,
668 rates |= BIT(j); 638 rates |= BIT(j);
669 } 639 }
670 } 640 }
671 sta->supp_rates[local->oper_channel->band] = rates; 641 sta->sta.supp_rates[local->oper_channel->band] = rates;
642 }
643
644 if (params->ht_capa) {
645 ieee80211_ht_cap_ie_to_ht_info(params->ht_capa,
646 &sta->sta.ht_info);
672 } 647 }
673 648
674 if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { 649 if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) {
@@ -691,9 +666,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
691 struct ieee80211_sub_if_data *sdata; 666 struct ieee80211_sub_if_data *sdata;
692 int err; 667 int err;
693 668
694 if (dev == local->mdev || params->vlan == local->mdev)
695 return -EOPNOTSUPP;
696
697 /* Prevent a race with changing the rate control algorithm */ 669 /* Prevent a race with changing the rate control algorithm */
698 if (!netif_running(dev)) 670 if (!netif_running(dev))
699 return -ENETDOWN; 671 return -ENETDOWN;
@@ -701,8 +673,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
701 if (params->vlan) { 673 if (params->vlan) {
702 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 674 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
703 675
704 if (sdata->vif.type != IEEE80211_IF_TYPE_VLAN && 676 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
705 sdata->vif.type != IEEE80211_IF_TYPE_AP) 677 sdata->vif.type != NL80211_IFTYPE_AP)
706 return -EINVAL; 678 return -EINVAL;
707 } else 679 } else
708 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 680 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -721,7 +693,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
721 693
722 sta_apply_parameters(local, sta, params); 694 sta_apply_parameters(local, sta, params);
723 695
724 rate_control_rate_init(sta, local); 696 rate_control_rate_init(sta);
725 697
726 rcu_read_lock(); 698 rcu_read_lock();
727 699
@@ -732,8 +704,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
732 return err; 704 return err;
733 } 705 }
734 706
735 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN || 707 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
736 sdata->vif.type == IEEE80211_IF_TYPE_AP) 708 sdata->vif.type == NL80211_IFTYPE_AP)
737 ieee80211_send_layer2_update(sta); 709 ieee80211_send_layer2_update(sta);
738 710
739 rcu_read_unlock(); 711 rcu_read_unlock();
@@ -748,9 +720,6 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
748 struct ieee80211_sub_if_data *sdata; 720 struct ieee80211_sub_if_data *sdata;
749 struct sta_info *sta; 721 struct sta_info *sta;
750 722
751 if (dev == local->mdev)
752 return -EOPNOTSUPP;
753
754 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 723 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
755 724
756 if (mac) { 725 if (mac) {
@@ -782,9 +751,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
782 struct sta_info *sta; 751 struct sta_info *sta;
783 struct ieee80211_sub_if_data *vlansdata; 752 struct ieee80211_sub_if_data *vlansdata;
784 753
785 if (dev == local->mdev || params->vlan == local->mdev)
786 return -EOPNOTSUPP;
787
788 rcu_read_lock(); 754 rcu_read_lock();
789 755
790 /* XXX: get sta belonging to dev */ 756 /* XXX: get sta belonging to dev */
@@ -797,8 +763,8 @@ static int ieee80211_change_station(struct wiphy *wiphy,
797 if (params->vlan && params->vlan != sta->sdata->dev) { 763 if (params->vlan && params->vlan != sta->sdata->dev) {
798 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 764 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
799 765
800 if (vlansdata->vif.type != IEEE80211_IF_TYPE_VLAN && 766 if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
801 vlansdata->vif.type != IEEE80211_IF_TYPE_AP) { 767 vlansdata->vif.type != NL80211_IFTYPE_AP) {
802 rcu_read_unlock(); 768 rcu_read_unlock();
803 return -EINVAL; 769 return -EINVAL;
804 } 770 }
@@ -824,15 +790,12 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
824 struct sta_info *sta; 790 struct sta_info *sta;
825 int err; 791 int err;
826 792
827 if (dev == local->mdev)
828 return -EOPNOTSUPP;
829
830 if (!netif_running(dev)) 793 if (!netif_running(dev))
831 return -ENETDOWN; 794 return -ENETDOWN;
832 795
833 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 796 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
834 797
835 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) 798 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
836 return -ENOTSUPP; 799 return -ENOTSUPP;
837 800
838 rcu_read_lock(); 801 rcu_read_lock();
@@ -842,13 +805,13 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
842 return -ENOENT; 805 return -ENOENT;
843 } 806 }
844 807
845 err = mesh_path_add(dst, dev); 808 err = mesh_path_add(dst, sdata);
846 if (err) { 809 if (err) {
847 rcu_read_unlock(); 810 rcu_read_unlock();
848 return err; 811 return err;
849 } 812 }
850 813
851 mpath = mesh_path_lookup(dst, dev); 814 mpath = mesh_path_lookup(dst, sdata);
852 if (!mpath) { 815 if (!mpath) {
853 rcu_read_unlock(); 816 rcu_read_unlock();
854 return -ENXIO; 817 return -ENXIO;
@@ -862,10 +825,12 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
862static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, 825static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
863 u8 *dst) 826 u8 *dst)
864{ 827{
828 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
829
865 if (dst) 830 if (dst)
866 return mesh_path_del(dst, dev); 831 return mesh_path_del(dst, sdata);
867 832
868 mesh_path_flush(dev); 833 mesh_path_flush(sdata);
869 return 0; 834 return 0;
870} 835}
871 836
@@ -878,15 +843,12 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
878 struct mesh_path *mpath; 843 struct mesh_path *mpath;
879 struct sta_info *sta; 844 struct sta_info *sta;
880 845
881 if (dev == local->mdev)
882 return -EOPNOTSUPP;
883
884 if (!netif_running(dev)) 846 if (!netif_running(dev))
885 return -ENETDOWN; 847 return -ENETDOWN;
886 848
887 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 849 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
888 850
889 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) 851 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
890 return -ENOTSUPP; 852 return -ENOTSUPP;
891 853
892 rcu_read_lock(); 854 rcu_read_lock();
@@ -897,7 +859,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
897 return -ENOENT; 859 return -ENOENT;
898 } 860 }
899 861
900 mpath = mesh_path_lookup(dst, dev); 862 mpath = mesh_path_lookup(dst, sdata);
901 if (!mpath) { 863 if (!mpath) {
902 rcu_read_unlock(); 864 rcu_read_unlock();
903 return -ENOENT; 865 return -ENOENT;
@@ -913,7 +875,7 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
913 struct mpath_info *pinfo) 875 struct mpath_info *pinfo)
914{ 876{
915 if (mpath->next_hop) 877 if (mpath->next_hop)
916 memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN); 878 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
917 else 879 else
918 memset(next_hop, 0, ETH_ALEN); 880 memset(next_hop, 0, ETH_ALEN);
919 881
@@ -952,20 +914,16 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
952 u8 *dst, u8 *next_hop, struct mpath_info *pinfo) 914 u8 *dst, u8 *next_hop, struct mpath_info *pinfo)
953 915
954{ 916{
955 struct ieee80211_local *local = wiphy_priv(wiphy);
956 struct ieee80211_sub_if_data *sdata; 917 struct ieee80211_sub_if_data *sdata;
957 struct mesh_path *mpath; 918 struct mesh_path *mpath;
958 919
959 if (dev == local->mdev)
960 return -EOPNOTSUPP;
961
962 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 920 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
963 921
964 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) 922 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
965 return -ENOTSUPP; 923 return -ENOTSUPP;
966 924
967 rcu_read_lock(); 925 rcu_read_lock();
968 mpath = mesh_path_lookup(dst, dev); 926 mpath = mesh_path_lookup(dst, sdata);
969 if (!mpath) { 927 if (!mpath) {
970 rcu_read_unlock(); 928 rcu_read_unlock();
971 return -ENOENT; 929 return -ENOENT;
@@ -980,20 +938,16 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
980 int idx, u8 *dst, u8 *next_hop, 938 int idx, u8 *dst, u8 *next_hop,
981 struct mpath_info *pinfo) 939 struct mpath_info *pinfo)
982{ 940{
983 struct ieee80211_local *local = wiphy_priv(wiphy);
984 struct ieee80211_sub_if_data *sdata; 941 struct ieee80211_sub_if_data *sdata;
985 struct mesh_path *mpath; 942 struct mesh_path *mpath;
986 943
987 if (dev == local->mdev)
988 return -EOPNOTSUPP;
989
990 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 944 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
991 945
992 if (sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT) 946 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
993 return -ENOTSUPP; 947 return -ENOTSUPP;
994 948
995 rcu_read_lock(); 949 rcu_read_lock();
996 mpath = mesh_path_lookup_by_idx(idx, dev); 950 mpath = mesh_path_lookup_by_idx(idx, sdata);
997 if (!mpath) { 951 if (!mpath) {
998 rcu_read_unlock(); 952 rcu_read_unlock();
999 return -ENOENT; 953 return -ENOENT;
@@ -1005,6 +959,38 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
1005} 959}
1006#endif 960#endif
1007 961
962static int ieee80211_change_bss(struct wiphy *wiphy,
963 struct net_device *dev,
964 struct bss_parameters *params)
965{
966 struct ieee80211_sub_if_data *sdata;
967 u32 changed = 0;
968
969 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
970
971 if (sdata->vif.type != NL80211_IFTYPE_AP)
972 return -EINVAL;
973
974 if (params->use_cts_prot >= 0) {
975 sdata->bss_conf.use_cts_prot = params->use_cts_prot;
976 changed |= BSS_CHANGED_ERP_CTS_PROT;
977 }
978 if (params->use_short_preamble >= 0) {
979 sdata->bss_conf.use_short_preamble =
980 params->use_short_preamble;
981 changed |= BSS_CHANGED_ERP_PREAMBLE;
982 }
983 if (params->use_short_slot_time >= 0) {
984 sdata->bss_conf.use_short_slot =
985 params->use_short_slot_time;
986 changed |= BSS_CHANGED_ERP_SLOT;
987 }
988
989 ieee80211_bss_info_change_notify(sdata, changed);
990
991 return 0;
992}
993
1008struct cfg80211_ops mac80211_config_ops = { 994struct cfg80211_ops mac80211_config_ops = {
1009 .add_virtual_intf = ieee80211_add_iface, 995 .add_virtual_intf = ieee80211_add_iface,
1010 .del_virtual_intf = ieee80211_del_iface, 996 .del_virtual_intf = ieee80211_del_iface,
@@ -1028,4 +1014,5 @@ struct cfg80211_ops mac80211_config_ops = {
1028 .get_mpath = ieee80211_get_mpath, 1014 .get_mpath = ieee80211_get_mpath,
1029 .dump_mpath = ieee80211_dump_mpath, 1015 .dump_mpath = ieee80211_dump_mpath,
1030#endif 1016#endif
1017 .change_bss = ieee80211_change_bss,
1031}; 1018};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index ee509f1109e2..24ce54463310 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -51,8 +51,6 @@ DEBUGFS_READONLY_FILE(antenna_sel_tx, 20, "%d",
51 local->hw.conf.antenna_sel_tx); 51 local->hw.conf.antenna_sel_tx);
52DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d", 52DEBUGFS_READONLY_FILE(antenna_sel_rx, 20, "%d",
53 local->hw.conf.antenna_sel_rx); 53 local->hw.conf.antenna_sel_rx);
54DEBUGFS_READONLY_FILE(bridge_packets, 20, "%d",
55 local->bridge_packets);
56DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d", 54DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
57 local->rts_threshold); 55 local->rts_threshold);
58DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d", 56DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
@@ -206,7 +204,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
206 DEBUGFS_ADD(frequency); 204 DEBUGFS_ADD(frequency);
207 DEBUGFS_ADD(antenna_sel_tx); 205 DEBUGFS_ADD(antenna_sel_tx);
208 DEBUGFS_ADD(antenna_sel_rx); 206 DEBUGFS_ADD(antenna_sel_rx);
209 DEBUGFS_ADD(bridge_packets);
210 DEBUGFS_ADD(rts_threshold); 207 DEBUGFS_ADD(rts_threshold);
211 DEBUGFS_ADD(fragmentation_threshold); 208 DEBUGFS_ADD(fragmentation_threshold);
212 DEBUGFS_ADD(short_retry_limit); 209 DEBUGFS_ADD(short_retry_limit);
@@ -263,7 +260,6 @@ void debugfs_hw_del(struct ieee80211_local *local)
263 DEBUGFS_DEL(frequency); 260 DEBUGFS_DEL(frequency);
264 DEBUGFS_DEL(antenna_sel_tx); 261 DEBUGFS_DEL(antenna_sel_tx);
265 DEBUGFS_DEL(antenna_sel_rx); 262 DEBUGFS_DEL(antenna_sel_rx);
266 DEBUGFS_DEL(bridge_packets);
267 DEBUGFS_DEL(rts_threshold); 263 DEBUGFS_DEL(rts_threshold);
268 DEBUGFS_DEL(fragmentation_threshold); 264 DEBUGFS_DEL(fragmentation_threshold);
269 DEBUGFS_DEL(short_retry_limit); 265 DEBUGFS_DEL(short_retry_limit);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index cf82acec913a..a3294d109322 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -206,7 +206,8 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
206 rcu_read_lock(); 206 rcu_read_lock();
207 sta = rcu_dereference(key->sta); 207 sta = rcu_dereference(key->sta);
208 if (sta) 208 if (sta)
209 sprintf(buf, "../../stations/%s", print_mac(mac, sta->addr)); 209 sprintf(buf, "../../stations/%s",
210 print_mac(mac, sta->sta.addr));
210 rcu_read_unlock(); 211 rcu_read_unlock();
211 212
212 /* using sta as a boolean is fine outside RCU lock */ 213 /* using sta as a boolean is fine outside RCU lock */
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 8165df578c92..2a4515623776 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -173,7 +173,6 @@ IEEE80211_IF_FILE(assoc_tries, u.sta.assoc_tries, DEC);
173IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX); 173IEEE80211_IF_FILE(auth_algs, u.sta.auth_algs, HEX);
174IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC); 174IEEE80211_IF_FILE(auth_alg, u.sta.auth_alg, DEC);
175IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC); 175IEEE80211_IF_FILE(auth_transaction, u.sta.auth_transaction, DEC);
176IEEE80211_IF_FILE(num_beacons_sta, u.sta.num_beacons, DEC);
177 176
178static ssize_t ieee80211_if_fmt_flags( 177static ssize_t ieee80211_if_fmt_flags(
179 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) 178 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -192,7 +191,6 @@ __IEEE80211_IF_FILE(flags);
192/* AP attributes */ 191/* AP attributes */
193IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 192IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
194IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); 193IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
195IEEE80211_IF_FILE(num_beacons, u.ap.num_beacons, DEC);
196 194
197static ssize_t ieee80211_if_fmt_num_buffered_multicast( 195static ssize_t ieee80211_if_fmt_num_buffered_multicast(
198 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) 196 const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -207,37 +205,37 @@ IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
207 205
208#ifdef CONFIG_MAC80211_MESH 206#ifdef CONFIG_MAC80211_MESH
209/* Mesh stats attributes */ 207/* Mesh stats attributes */
210IEEE80211_IF_FILE(fwded_frames, u.sta.mshstats.fwded_frames, DEC); 208IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
211IEEE80211_IF_FILE(dropped_frames_ttl, u.sta.mshstats.dropped_frames_ttl, DEC); 209IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
212IEEE80211_IF_FILE(dropped_frames_no_route, 210IEEE80211_IF_FILE(dropped_frames_no_route,
213 u.sta.mshstats.dropped_frames_no_route, DEC); 211 u.mesh.mshstats.dropped_frames_no_route, DEC);
214IEEE80211_IF_FILE(estab_plinks, u.sta.mshstats.estab_plinks, ATOMIC); 212IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
215 213
216/* Mesh parameters */ 214/* Mesh parameters */
217IEEE80211_IF_WFILE(dot11MeshMaxRetries, 215IEEE80211_IF_WFILE(dot11MeshMaxRetries,
218 u.sta.mshcfg.dot11MeshMaxRetries, DEC, u8); 216 u.mesh.mshcfg.dot11MeshMaxRetries, DEC, u8);
219IEEE80211_IF_WFILE(dot11MeshRetryTimeout, 217IEEE80211_IF_WFILE(dot11MeshRetryTimeout,
220 u.sta.mshcfg.dot11MeshRetryTimeout, DEC, u16); 218 u.mesh.mshcfg.dot11MeshRetryTimeout, DEC, u16);
221IEEE80211_IF_WFILE(dot11MeshConfirmTimeout, 219IEEE80211_IF_WFILE(dot11MeshConfirmTimeout,
222 u.sta.mshcfg.dot11MeshConfirmTimeout, DEC, u16); 220 u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC, u16);
223IEEE80211_IF_WFILE(dot11MeshHoldingTimeout, 221IEEE80211_IF_WFILE(dot11MeshHoldingTimeout,
224 u.sta.mshcfg.dot11MeshHoldingTimeout, DEC, u16); 222 u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC, u16);
225IEEE80211_IF_WFILE(dot11MeshTTL, u.sta.mshcfg.dot11MeshTTL, DEC, u8); 223IEEE80211_IF_WFILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC, u8);
226IEEE80211_IF_WFILE(auto_open_plinks, u.sta.mshcfg.auto_open_plinks, DEC, u8); 224IEEE80211_IF_WFILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC, u8);
227IEEE80211_IF_WFILE(dot11MeshMaxPeerLinks, 225IEEE80211_IF_WFILE(dot11MeshMaxPeerLinks,
228 u.sta.mshcfg.dot11MeshMaxPeerLinks, DEC, u16); 226 u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC, u16);
229IEEE80211_IF_WFILE(dot11MeshHWMPactivePathTimeout, 227IEEE80211_IF_WFILE(dot11MeshHWMPactivePathTimeout,
230 u.sta.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32); 228 u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC, u32);
231IEEE80211_IF_WFILE(dot11MeshHWMPpreqMinInterval, 229IEEE80211_IF_WFILE(dot11MeshHWMPpreqMinInterval,
232 u.sta.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16); 230 u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC, u16);
233IEEE80211_IF_WFILE(dot11MeshHWMPnetDiameterTraversalTime, 231IEEE80211_IF_WFILE(dot11MeshHWMPnetDiameterTraversalTime,
234 u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16); 232 u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC, u16);
235IEEE80211_IF_WFILE(dot11MeshHWMPmaxPREQretries, 233IEEE80211_IF_WFILE(dot11MeshHWMPmaxPREQretries,
236 u.sta.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8); 234 u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC, u8);
237IEEE80211_IF_WFILE(path_refresh_time, 235IEEE80211_IF_WFILE(path_refresh_time,
238 u.sta.mshcfg.path_refresh_time, DEC, u32); 236 u.mesh.mshcfg.path_refresh_time, DEC, u32);
239IEEE80211_IF_WFILE(min_discovery_timeout, 237IEEE80211_IF_WFILE(min_discovery_timeout,
240 u.sta.mshcfg.min_discovery_timeout, DEC, u16); 238 u.mesh.mshcfg.min_discovery_timeout, DEC, u16);
241#endif 239#endif
242 240
243 241
@@ -265,7 +263,6 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
265 DEBUGFS_ADD(auth_alg, sta); 263 DEBUGFS_ADD(auth_alg, sta);
266 DEBUGFS_ADD(auth_transaction, sta); 264 DEBUGFS_ADD(auth_transaction, sta);
267 DEBUGFS_ADD(flags, sta); 265 DEBUGFS_ADD(flags, sta);
268 DEBUGFS_ADD(num_beacons_sta, sta);
269} 266}
270 267
271static void add_ap_files(struct ieee80211_sub_if_data *sdata) 268static void add_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -276,7 +273,6 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
276 273
277 DEBUGFS_ADD(num_sta_ps, ap); 274 DEBUGFS_ADD(num_sta_ps, ap);
278 DEBUGFS_ADD(dtim_count, ap); 275 DEBUGFS_ADD(dtim_count, ap);
279 DEBUGFS_ADD(num_beacons, ap);
280 DEBUGFS_ADD(num_buffered_multicast, ap); 276 DEBUGFS_ADD(num_buffered_multicast, ap);
281} 277}
282 278
@@ -345,26 +341,26 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
345 return; 341 return;
346 342
347 switch (sdata->vif.type) { 343 switch (sdata->vif.type) {
348 case IEEE80211_IF_TYPE_MESH_POINT: 344 case NL80211_IFTYPE_MESH_POINT:
349#ifdef CONFIG_MAC80211_MESH 345#ifdef CONFIG_MAC80211_MESH
350 add_mesh_stats(sdata); 346 add_mesh_stats(sdata);
351 add_mesh_config(sdata); 347 add_mesh_config(sdata);
352#endif 348#endif
353 /* fall through */ 349 break;
354 case IEEE80211_IF_TYPE_STA: 350 case NL80211_IFTYPE_STATION:
355 case IEEE80211_IF_TYPE_IBSS: 351 case NL80211_IFTYPE_ADHOC:
356 add_sta_files(sdata); 352 add_sta_files(sdata);
357 break; 353 break;
358 case IEEE80211_IF_TYPE_AP: 354 case NL80211_IFTYPE_AP:
359 add_ap_files(sdata); 355 add_ap_files(sdata);
360 break; 356 break;
361 case IEEE80211_IF_TYPE_WDS: 357 case NL80211_IFTYPE_WDS:
362 add_wds_files(sdata); 358 add_wds_files(sdata);
363 break; 359 break;
364 case IEEE80211_IF_TYPE_MNTR: 360 case NL80211_IFTYPE_MONITOR:
365 add_monitor_files(sdata); 361 add_monitor_files(sdata);
366 break; 362 break;
367 case IEEE80211_IF_TYPE_VLAN: 363 case NL80211_IFTYPE_AP_VLAN:
368 add_vlan_files(sdata); 364 add_vlan_files(sdata);
369 break; 365 break;
370 default: 366 default:
@@ -398,7 +394,6 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata)
398 DEBUGFS_DEL(auth_alg, sta); 394 DEBUGFS_DEL(auth_alg, sta);
399 DEBUGFS_DEL(auth_transaction, sta); 395 DEBUGFS_DEL(auth_transaction, sta);
400 DEBUGFS_DEL(flags, sta); 396 DEBUGFS_DEL(flags, sta);
401 DEBUGFS_DEL(num_beacons_sta, sta);
402} 397}
403 398
404static void del_ap_files(struct ieee80211_sub_if_data *sdata) 399static void del_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -409,7 +404,6 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata)
409 404
410 DEBUGFS_DEL(num_sta_ps, ap); 405 DEBUGFS_DEL(num_sta_ps, ap);
411 DEBUGFS_DEL(dtim_count, ap); 406 DEBUGFS_DEL(dtim_count, ap);
412 DEBUGFS_DEL(num_beacons, ap);
413 DEBUGFS_DEL(num_buffered_multicast, ap); 407 DEBUGFS_DEL(num_buffered_multicast, ap);
414} 408}
415 409
@@ -482,26 +476,26 @@ static void del_files(struct ieee80211_sub_if_data *sdata)
482 return; 476 return;
483 477
484 switch (sdata->vif.type) { 478 switch (sdata->vif.type) {
485 case IEEE80211_IF_TYPE_MESH_POINT: 479 case NL80211_IFTYPE_MESH_POINT:
486#ifdef CONFIG_MAC80211_MESH 480#ifdef CONFIG_MAC80211_MESH
487 del_mesh_stats(sdata); 481 del_mesh_stats(sdata);
488 del_mesh_config(sdata); 482 del_mesh_config(sdata);
489#endif 483#endif
490 /* fall through */ 484 break;
491 case IEEE80211_IF_TYPE_STA: 485 case NL80211_IFTYPE_STATION:
492 case IEEE80211_IF_TYPE_IBSS: 486 case NL80211_IFTYPE_ADHOC:
493 del_sta_files(sdata); 487 del_sta_files(sdata);
494 break; 488 break;
495 case IEEE80211_IF_TYPE_AP: 489 case NL80211_IFTYPE_AP:
496 del_ap_files(sdata); 490 del_ap_files(sdata);
497 break; 491 break;
498 case IEEE80211_IF_TYPE_WDS: 492 case NL80211_IFTYPE_WDS:
499 del_wds_files(sdata); 493 del_wds_files(sdata);
500 break; 494 break;
501 case IEEE80211_IF_TYPE_MNTR: 495 case NL80211_IFTYPE_MONITOR:
502 del_monitor_files(sdata); 496 del_monitor_files(sdata);
503 break; 497 break;
504 case IEEE80211_IF_TYPE_VLAN: 498 case NL80211_IFTYPE_AP_VLAN:
505 del_vlan_files(sdata); 499 del_vlan_files(sdata);
506 break; 500 break;
507 default: 501 default:
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 79a062782d52..b9902e425f09 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -50,7 +50,7 @@ static const struct file_operations sta_ ##name## _ops = { \
50 STA_READ_##format(name, field) \ 50 STA_READ_##format(name, field) \
51 STA_OPS(name) 51 STA_OPS(name)
52 52
53STA_FILE(aid, aid, D); 53STA_FILE(aid, sta.aid, D);
54STA_FILE(dev, sdata->dev->name, S); 54STA_FILE(dev, sdata->dev->name, S);
55STA_FILE(rx_packets, rx_packets, LU); 55STA_FILE(rx_packets, rx_packets, LU);
56STA_FILE(tx_packets, tx_packets, LU); 56STA_FILE(tx_packets, tx_packets, LU);
@@ -173,10 +173,9 @@ static ssize_t sta_agg_status_write(struct file *file,
173 const char __user *user_buf, size_t count, loff_t *ppos) 173 const char __user *user_buf, size_t count, loff_t *ppos)
174{ 174{
175 struct sta_info *sta = file->private_data; 175 struct sta_info *sta = file->private_data;
176 struct net_device *dev = sta->sdata->dev; 176 struct ieee80211_local *local = sta->sdata->local;
177 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
178 struct ieee80211_hw *hw = &local->hw; 177 struct ieee80211_hw *hw = &local->hw;
179 u8 *da = sta->addr; 178 u8 *da = sta->sta.addr;
180 static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0, 179 static int tid_static_tx[16] = {0, 0, 0, 0, 0, 0, 0, 0,
181 0, 0, 0, 0, 0, 0, 0, 0}; 180 0, 0, 0, 0, 0, 0, 0, 0};
182 static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1, 181 static int tid_static_rx[16] = {1, 1, 1, 1, 1, 1, 1, 1,
@@ -201,7 +200,7 @@ static ssize_t sta_agg_status_write(struct file *file,
201 tid_num = tid_num - 100; 200 tid_num = tid_num - 100;
202 if (tid_static_rx[tid_num] == 1) { 201 if (tid_static_rx[tid_num] == 1) {
203 strcpy(state, "off "); 202 strcpy(state, "off ");
204 ieee80211_sta_stop_rx_ba_session(dev, da, tid_num, 0, 203 ieee80211_sta_stop_rx_ba_session(sta->sdata, da, tid_num, 0,
205 WLAN_REASON_QSTA_REQUIRE_SETUP); 204 WLAN_REASON_QSTA_REQUIRE_SETUP);
206 sta->ampdu_mlme.tid_state_rx[tid_num] |= 205 sta->ampdu_mlme.tid_state_rx[tid_num] |=
207 HT_AGG_STATE_DEBUGFS_CTL; 206 HT_AGG_STATE_DEBUGFS_CTL;
@@ -253,7 +252,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
253 if (!stations_dir) 252 if (!stations_dir)
254 return; 253 return;
255 254
256 mac = print_mac(mbuf, sta->addr); 255 mac = print_mac(mbuf, sta->sta.addr);
257 256
258 sta->debugfs.dir = debugfs_create_dir(mac, stations_dir); 257 sta->debugfs.dir = debugfs_create_dir(mac, stations_dir);
259 if (!sta->debugfs.dir) 258 if (!sta->debugfs.dir)
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
index 2280f40b4560..8de60de70bc9 100644
--- a/net/mac80211/event.c
+++ b/net/mac80211/event.c
@@ -8,7 +8,6 @@
8 * mac80211 - events 8 * mac80211 - events
9 */ 9 */
10 10
11#include <linux/netdevice.h>
12#include <net/iw_handler.h> 11#include <net/iw_handler.h>
13#include "ieee80211_i.h" 12#include "ieee80211_i.h"
14 13
@@ -17,7 +16,7 @@
17 * (in the variable hdr) must be long enough to extract the TKIP 16 * (in the variable hdr) must be long enough to extract the TKIP
18 * fields like TSC 17 * fields like TSC
19 */ 18 */
20void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, 19void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
21 struct ieee80211_hdr *hdr) 20 struct ieee80211_hdr *hdr)
22{ 21{
23 union iwreq_data wrqu; 22 union iwreq_data wrqu;
@@ -32,7 +31,7 @@ void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx,
32 print_mac(mac, hdr->addr2)); 31 print_mac(mac, hdr->addr2));
33 memset(&wrqu, 0, sizeof(wrqu)); 32 memset(&wrqu, 0, sizeof(wrqu));
34 wrqu.data.length = strlen(buf); 33 wrqu.data.length = strlen(buf);
35 wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); 34 wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf);
36 kfree(buf); 35 kfree(buf);
37 } 36 }
38 37
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
new file mode 100644
index 000000000000..dc7d9a3d70d5
--- /dev/null
+++ b/net/mac80211/ht.c
@@ -0,0 +1,992 @@
1/*
2 * HT handling
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2008, Intel Corporation
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/ieee80211.h>
17#include <net/wireless.h>
18#include <net/mac80211.h>
19#include "ieee80211_i.h"
20#include "sta_info.h"
21#include "wme.h"
22
23int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
24 struct ieee80211_ht_info *ht_info)
25{
26
27 if (ht_info == NULL)
28 return -EINVAL;
29
30 memset(ht_info, 0, sizeof(*ht_info));
31
32 if (ht_cap_ie) {
33 u8 ampdu_info = ht_cap_ie->ampdu_params_info;
34
35 ht_info->ht_supported = 1;
36 ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info);
37 ht_info->ampdu_factor =
38 ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR;
39 ht_info->ampdu_density =
40 (ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2;
41 memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16);
42 } else
43 ht_info->ht_supported = 0;
44
45 return 0;
46}
47
48int ieee80211_ht_addt_info_ie_to_ht_bss_info(
49 struct ieee80211_ht_addt_info *ht_add_info_ie,
50 struct ieee80211_ht_bss_info *bss_info)
51{
52 if (bss_info == NULL)
53 return -EINVAL;
54
55 memset(bss_info, 0, sizeof(*bss_info));
56
57 if (ht_add_info_ie) {
58 u16 op_mode;
59 op_mode = le16_to_cpu(ht_add_info_ie->operation_mode);
60
61 bss_info->primary_channel = ht_add_info_ie->control_chan;
62 bss_info->bss_cap = ht_add_info_ie->ht_param;
63 bss_info->bss_op_mode = (u8)(op_mode & 0xff);
64 }
65
66 return 0;
67}
68
69static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
70 const u8 *da, u16 tid,
71 u8 dialog_token, u16 start_seq_num,
72 u16 agg_size, u16 timeout)
73{
74 struct ieee80211_local *local = sdata->local;
75 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
76 struct sk_buff *skb;
77 struct ieee80211_mgmt *mgmt;
78 u16 capab;
79
80 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
81
82 if (!skb) {
83 printk(KERN_ERR "%s: failed to allocate buffer "
84 "for addba request frame\n", sdata->dev->name);
85 return;
86 }
87 skb_reserve(skb, local->hw.extra_tx_headroom);
88 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
89 memset(mgmt, 0, 24);
90 memcpy(mgmt->da, da, ETH_ALEN);
91 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
92 if (sdata->vif.type == NL80211_IFTYPE_AP)
93 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
94 else
95 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
96
97 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
98 IEEE80211_STYPE_ACTION);
99
100 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req));
101
102 mgmt->u.action.category = WLAN_CATEGORY_BACK;
103 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
104
105 mgmt->u.action.u.addba_req.dialog_token = dialog_token;
106 capab = (u16)(1 << 1); /* bit 1 aggregation policy */
107 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
108 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */
109
110 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
111
112 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
113 mgmt->u.action.u.addba_req.start_seq_num =
114 cpu_to_le16(start_seq_num << 4);
115
116 ieee80211_tx_skb(sdata, skb, 0);
117}
118
119static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
120 u8 dialog_token, u16 status, u16 policy,
121 u16 buf_size, u16 timeout)
122{
123 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
124 struct ieee80211_local *local = sdata->local;
125 struct sk_buff *skb;
126 struct ieee80211_mgmt *mgmt;
127 u16 capab;
128
129 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
130
131 if (!skb) {
132 printk(KERN_DEBUG "%s: failed to allocate buffer "
133 "for addba resp frame\n", sdata->dev->name);
134 return;
135 }
136
137 skb_reserve(skb, local->hw.extra_tx_headroom);
138 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
139 memset(mgmt, 0, 24);
140 memcpy(mgmt->da, da, ETH_ALEN);
141 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
142 if (sdata->vif.type == NL80211_IFTYPE_AP)
143 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
144 else
145 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
146 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
147 IEEE80211_STYPE_ACTION);
148
149 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp));
150 mgmt->u.action.category = WLAN_CATEGORY_BACK;
151 mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP;
152 mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
153
154 capab = (u16)(policy << 1); /* bit 1 aggregation policy */
155 capab |= (u16)(tid << 2); /* bit 5:2 TID number */
156 capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */
157
158 mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab);
159 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout);
160 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status);
161
162 ieee80211_tx_skb(sdata, skb, 0);
163}
164
165static void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
166 const u8 *da, u16 tid,
167 u16 initiator, u16 reason_code)
168{
169 struct ieee80211_local *local = sdata->local;
170 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
171 struct sk_buff *skb;
172 struct ieee80211_mgmt *mgmt;
173 u16 params;
174
175 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
176
177 if (!skb) {
178 printk(KERN_ERR "%s: failed to allocate buffer "
179 "for delba frame\n", sdata->dev->name);
180 return;
181 }
182
183 skb_reserve(skb, local->hw.extra_tx_headroom);
184 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
185 memset(mgmt, 0, 24);
186 memcpy(mgmt->da, da, ETH_ALEN);
187 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
188 if (sdata->vif.type == NL80211_IFTYPE_AP)
189 memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
190 else
191 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
192 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
193 IEEE80211_STYPE_ACTION);
194
195 skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba));
196
197 mgmt->u.action.category = WLAN_CATEGORY_BACK;
198 mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA;
199 params = (u16)(initiator << 11); /* bit 11 initiator */
200 params |= (u16)(tid << 12); /* bit 15:12 TID number */
201
202 mgmt->u.action.u.delba.params = cpu_to_le16(params);
203 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
204
205 ieee80211_tx_skb(sdata, skb, 0);
206}
207
208void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
209{
210 struct ieee80211_local *local = sdata->local;
211 struct sk_buff *skb;
212 struct ieee80211_bar *bar;
213 u16 bar_control = 0;
214
215 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
216 if (!skb) {
217 printk(KERN_ERR "%s: failed to allocate buffer for "
218 "bar frame\n", sdata->dev->name);
219 return;
220 }
221 skb_reserve(skb, local->hw.extra_tx_headroom);
222 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
223 memset(bar, 0, sizeof(*bar));
224 bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
225 IEEE80211_STYPE_BACK_REQ);
226 memcpy(bar->ra, ra, ETH_ALEN);
227 memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN);
228 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
229 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
230 bar_control |= (u16)(tid << 12);
231 bar->control = cpu_to_le16(bar_control);
232 bar->start_seq_num = cpu_to_le16(ssn);
233
234 ieee80211_tx_skb(sdata, skb, 0);
235}
236
237void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
238 u16 initiator, u16 reason)
239{
240 struct ieee80211_local *local = sdata->local;
241 struct ieee80211_hw *hw = &local->hw;
242 struct sta_info *sta;
243 int ret, i;
244 DECLARE_MAC_BUF(mac);
245
246 rcu_read_lock();
247
248 sta = sta_info_get(local, ra);
249 if (!sta) {
250 rcu_read_unlock();
251 return;
252 }
253
254 /* check if TID is in operational state */
255 spin_lock_bh(&sta->lock);
256 if (sta->ampdu_mlme.tid_state_rx[tid]
257 != HT_AGG_STATE_OPERATIONAL) {
258 spin_unlock_bh(&sta->lock);
259 rcu_read_unlock();
260 return;
261 }
262 sta->ampdu_mlme.tid_state_rx[tid] =
263 HT_AGG_STATE_REQ_STOP_BA_MSK |
264 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
265 spin_unlock_bh(&sta->lock);
266
267 /* stop HW Rx aggregation. ampdu_action existence
268 * already verified in session init so we add the BUG_ON */
269 BUG_ON(!local->ops->ampdu_action);
270
271#ifdef CONFIG_MAC80211_HT_DEBUG
272 printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n",
273 print_mac(mac, ra), tid);
274#endif /* CONFIG_MAC80211_HT_DEBUG */
275
276 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
277 &sta->sta, tid, NULL);
278 if (ret)
279 printk(KERN_DEBUG "HW problem - can not stop rx "
280 "aggregation for tid %d\n", tid);
281
282 /* shutdown timer has not expired */
283 if (initiator != WLAN_BACK_TIMER)
284 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
285
286 /* check if this is a self generated aggregation halt */
287 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
288 ieee80211_send_delba(sdata, ra, tid, 0, reason);
289
290 /* free the reordering buffer */
291 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
292 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
293 /* release the reordered frames */
294 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
295 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
296 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
297 }
298 }
299 /* free resources */
300 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
301 kfree(sta->ampdu_mlme.tid_rx[tid]);
302 sta->ampdu_mlme.tid_rx[tid] = NULL;
303 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
304
305 rcu_read_unlock();
306}
307
308
309/*
310 * After sending add Block Ack request we activated a timer until
311 * add Block Ack response will arrive from the recipient.
312 * If this timer expires sta_addba_resp_timer_expired will be executed.
313 */
314static void sta_addba_resp_timer_expired(unsigned long data)
315{
316 /* not an elegant detour, but there is no choice as the timer passes
317 * only one argument, and both sta_info and TID are needed, so init
318 * flow in sta_info_create gives the TID as data, while the timer_to_id
319 * array gives the sta through container_of */
320 u16 tid = *(u8 *)data;
321 struct sta_info *temp_sta = container_of((void *)data,
322 struct sta_info, timer_to_tid[tid]);
323
324 struct ieee80211_local *local = temp_sta->local;
325 struct ieee80211_hw *hw = &local->hw;
326 struct sta_info *sta;
327 u8 *state;
328
329 rcu_read_lock();
330
331 sta = sta_info_get(local, temp_sta->sta.addr);
332 if (!sta) {
333 rcu_read_unlock();
334 return;
335 }
336
337 state = &sta->ampdu_mlme.tid_state_tx[tid];
338 /* check if the TID waits for addBA response */
339 spin_lock_bh(&sta->lock);
340 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
341 spin_unlock_bh(&sta->lock);
342 *state = HT_AGG_STATE_IDLE;
343#ifdef CONFIG_MAC80211_HT_DEBUG
344 printk(KERN_DEBUG "timer expired on tid %d but we are not "
345 "expecting addBA response there", tid);
346#endif
347 goto timer_expired_exit;
348 }
349
350#ifdef CONFIG_MAC80211_HT_DEBUG
351 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
352#endif
353
354 /* go through the state check in stop_BA_session */
355 *state = HT_AGG_STATE_OPERATIONAL;
356 spin_unlock_bh(&sta->lock);
357 ieee80211_stop_tx_ba_session(hw, temp_sta->sta.addr, tid,
358 WLAN_BACK_INITIATOR);
359
360timer_expired_exit:
361 rcu_read_unlock();
362}
363
364void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr)
365{
366 struct ieee80211_local *local = sdata->local;
367 int i;
368
369 for (i = 0; i < STA_TID_NUM; i++) {
370 ieee80211_stop_tx_ba_session(&local->hw, addr, i,
371 WLAN_BACK_INITIATOR);
372 ieee80211_sta_stop_rx_ba_session(sdata, addr, i,
373 WLAN_BACK_RECIPIENT,
374 WLAN_REASON_QSTA_LEAVE_QBSS);
375 }
376}
377
378int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
379{
380 struct ieee80211_local *local = hw_to_local(hw);
381 struct sta_info *sta;
382 struct ieee80211_sub_if_data *sdata;
383 u16 start_seq_num;
384 u8 *state;
385 int ret;
386 DECLARE_MAC_BUF(mac);
387
388 if (tid >= STA_TID_NUM)
389 return -EINVAL;
390
391#ifdef CONFIG_MAC80211_HT_DEBUG
392 printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
393 print_mac(mac, ra), tid);
394#endif /* CONFIG_MAC80211_HT_DEBUG */
395
396 rcu_read_lock();
397
398 sta = sta_info_get(local, ra);
399 if (!sta) {
400#ifdef CONFIG_MAC80211_HT_DEBUG
401 printk(KERN_DEBUG "Could not find the station\n");
402#endif
403 ret = -ENOENT;
404 goto exit;
405 }
406
407 spin_lock_bh(&sta->lock);
408
409 /* we have tried too many times, receiver does not want A-MPDU */
410 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
411 ret = -EBUSY;
412 goto err_unlock_sta;
413 }
414
415 state = &sta->ampdu_mlme.tid_state_tx[tid];
416 /* check if the TID is not in aggregation flow already */
417 if (*state != HT_AGG_STATE_IDLE) {
418#ifdef CONFIG_MAC80211_HT_DEBUG
419 printk(KERN_DEBUG "BA request denied - session is not "
420 "idle on tid %u\n", tid);
421#endif /* CONFIG_MAC80211_HT_DEBUG */
422 ret = -EAGAIN;
423 goto err_unlock_sta;
424 }
425
426 /* prepare A-MPDU MLME for Tx aggregation */
427 sta->ampdu_mlme.tid_tx[tid] =
428 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
429 if (!sta->ampdu_mlme.tid_tx[tid]) {
430#ifdef CONFIG_MAC80211_HT_DEBUG
431 if (net_ratelimit())
432 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
433 tid);
434#endif
435 ret = -ENOMEM;
436 goto err_unlock_sta;
437 }
438 /* Tx timer */
439 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
440 sta_addba_resp_timer_expired;
441 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
442 (unsigned long)&sta->timer_to_tid[tid];
443 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
444
445 /* create a new queue for this aggregation */
446 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
447
448 /* case no queue is available to aggregation
449 * don't switch to aggregation */
450 if (ret) {
451#ifdef CONFIG_MAC80211_HT_DEBUG
452 printk(KERN_DEBUG "BA request denied - queue unavailable for"
453 " tid %d\n", tid);
454#endif /* CONFIG_MAC80211_HT_DEBUG */
455 goto err_unlock_queue;
456 }
457 sdata = sta->sdata;
458
459 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
460 * call back right away, it must see that the flow has begun */
461 *state |= HT_ADDBA_REQUESTED_MSK;
462
463 /* This is slightly racy because the queue isn't stopped */
464 start_seq_num = sta->tid_seq[tid];
465
466 if (local->ops->ampdu_action)
467 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
468 &sta->sta, tid, &start_seq_num);
469
470 if (ret) {
471 /* No need to requeue the packets in the agg queue, since we
472 * held the tx lock: no packet could be enqueued to the newly
473 * allocated queue */
474 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
475#ifdef CONFIG_MAC80211_HT_DEBUG
476 printk(KERN_DEBUG "BA request denied - HW unavailable for"
477 " tid %d\n", tid);
478#endif /* CONFIG_MAC80211_HT_DEBUG */
479 *state = HT_AGG_STATE_IDLE;
480 goto err_unlock_queue;
481 }
482
483 /* Will put all the packets in the new SW queue */
484 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
485 spin_unlock_bh(&sta->lock);
486
487 /* send an addBA request */
488 sta->ampdu_mlme.dialog_token_allocator++;
489 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
490 sta->ampdu_mlme.dialog_token_allocator;
491 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
492
493
494 ieee80211_send_addba_request(sta->sdata, ra, tid,
495 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
496 sta->ampdu_mlme.tid_tx[tid]->ssn,
497 0x40, 5000);
498 /* activate the timer for the recipient's addBA response */
499 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
500 jiffies + ADDBA_RESP_INTERVAL;
501 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
502#ifdef CONFIG_MAC80211_HT_DEBUG
503 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
504#endif
505 goto exit;
506
507err_unlock_queue:
508 kfree(sta->ampdu_mlme.tid_tx[tid]);
509 sta->ampdu_mlme.tid_tx[tid] = NULL;
510 ret = -EBUSY;
511err_unlock_sta:
512 spin_unlock_bh(&sta->lock);
513exit:
514 rcu_read_unlock();
515 return ret;
516}
517EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
518
519int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
520 u8 *ra, u16 tid,
521 enum ieee80211_back_parties initiator)
522{
523 struct ieee80211_local *local = hw_to_local(hw);
524 struct sta_info *sta;
525 u8 *state;
526 int ret = 0;
527 DECLARE_MAC_BUF(mac);
528
529 if (tid >= STA_TID_NUM)
530 return -EINVAL;
531
532 rcu_read_lock();
533 sta = sta_info_get(local, ra);
534 if (!sta) {
535 rcu_read_unlock();
536 return -ENOENT;
537 }
538
539 /* check if the TID is in aggregation */
540 state = &sta->ampdu_mlme.tid_state_tx[tid];
541 spin_lock_bh(&sta->lock);
542
543 if (*state != HT_AGG_STATE_OPERATIONAL) {
544 ret = -ENOENT;
545 goto stop_BA_exit;
546 }
547
548#ifdef CONFIG_MAC80211_HT_DEBUG
549 printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
550 print_mac(mac, ra), tid);
551#endif /* CONFIG_MAC80211_HT_DEBUG */
552
553 ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
554
555 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
556 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
557
558 if (local->ops->ampdu_action)
559 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
560 &sta->sta, tid, NULL);
561
562 /* case HW denied going back to legacy */
563 if (ret) {
564 WARN_ON(ret != -EBUSY);
565 *state = HT_AGG_STATE_OPERATIONAL;
566 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
567 goto stop_BA_exit;
568 }
569
570stop_BA_exit:
571 spin_unlock_bh(&sta->lock);
572 rcu_read_unlock();
573 return ret;
574}
575EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
576
577void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
578{
579 struct ieee80211_local *local = hw_to_local(hw);
580 struct sta_info *sta;
581 u8 *state;
582 DECLARE_MAC_BUF(mac);
583
584 if (tid >= STA_TID_NUM) {
585#ifdef CONFIG_MAC80211_HT_DEBUG
586 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
587 tid, STA_TID_NUM);
588#endif
589 return;
590 }
591
592 rcu_read_lock();
593 sta = sta_info_get(local, ra);
594 if (!sta) {
595 rcu_read_unlock();
596#ifdef CONFIG_MAC80211_HT_DEBUG
597 printk(KERN_DEBUG "Could not find station: %s\n",
598 print_mac(mac, ra));
599#endif
600 return;
601 }
602
603 state = &sta->ampdu_mlme.tid_state_tx[tid];
604 spin_lock_bh(&sta->lock);
605
606 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
607#ifdef CONFIG_MAC80211_HT_DEBUG
608 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
609 *state);
610#endif
611 spin_unlock_bh(&sta->lock);
612 rcu_read_unlock();
613 return;
614 }
615
616 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
617
618 *state |= HT_ADDBA_DRV_READY_MSK;
619
620 if (*state == HT_AGG_STATE_OPERATIONAL) {
621#ifdef CONFIG_MAC80211_HT_DEBUG
622 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
623#endif
624 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
625 }
626 spin_unlock_bh(&sta->lock);
627 rcu_read_unlock();
628}
629EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
630
631void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
632{
633 struct ieee80211_local *local = hw_to_local(hw);
634 struct sta_info *sta;
635 u8 *state;
636 int agg_queue;
637 DECLARE_MAC_BUF(mac);
638
639 if (tid >= STA_TID_NUM) {
640#ifdef CONFIG_MAC80211_HT_DEBUG
641 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
642 tid, STA_TID_NUM);
643#endif
644 return;
645 }
646
647#ifdef CONFIG_MAC80211_HT_DEBUG
648 printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
649 print_mac(mac, ra), tid);
650#endif /* CONFIG_MAC80211_HT_DEBUG */
651
652 rcu_read_lock();
653 sta = sta_info_get(local, ra);
654 if (!sta) {
655#ifdef CONFIG_MAC80211_HT_DEBUG
656 printk(KERN_DEBUG "Could not find station: %s\n",
657 print_mac(mac, ra));
658#endif
659 rcu_read_unlock();
660 return;
661 }
662 state = &sta->ampdu_mlme.tid_state_tx[tid];
663
664 /* NOTE: no need to use sta->lock in this state check, as
665 * ieee80211_stop_tx_ba_session will let only one stop call to
666 * pass through per sta/tid
667 */
668 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
669#ifdef CONFIG_MAC80211_HT_DEBUG
670 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
671#endif
672 rcu_read_unlock();
673 return;
674 }
675
676 if (*state & HT_AGG_STATE_INITIATOR_MSK)
677 ieee80211_send_delba(sta->sdata, ra, tid,
678 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
679
680 agg_queue = sta->tid_to_tx_q[tid];
681
682 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
683
684 /* We just requeued the all the frames that were in the
685 * removed queue, and since we might miss a softirq we do
686 * netif_schedule_queue. ieee80211_wake_queue is not used
687 * here as this queue is not necessarily stopped
688 */
689 netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue));
690 spin_lock_bh(&sta->lock);
691 *state = HT_AGG_STATE_IDLE;
692 sta->ampdu_mlme.addba_req_num[tid] = 0;
693 kfree(sta->ampdu_mlme.tid_tx[tid]);
694 sta->ampdu_mlme.tid_tx[tid] = NULL;
695 spin_unlock_bh(&sta->lock);
696
697 rcu_read_unlock();
698}
699EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
700
701void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
702 const u8 *ra, u16 tid)
703{
704 struct ieee80211_local *local = hw_to_local(hw);
705 struct ieee80211_ra_tid *ra_tid;
706 struct sk_buff *skb = dev_alloc_skb(0);
707
708 if (unlikely(!skb)) {
709#ifdef CONFIG_MAC80211_HT_DEBUG
710 if (net_ratelimit())
711 printk(KERN_WARNING "%s: Not enough memory, "
712 "dropping start BA session", skb->dev->name);
713#endif
714 return;
715 }
716 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
717 memcpy(&ra_tid->ra, ra, ETH_ALEN);
718 ra_tid->tid = tid;
719
720 skb->pkt_type = IEEE80211_ADDBA_MSG;
721 skb_queue_tail(&local->skb_queue, skb);
722 tasklet_schedule(&local->tasklet);
723}
724EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
725
726void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
727 const u8 *ra, u16 tid)
728{
729 struct ieee80211_local *local = hw_to_local(hw);
730 struct ieee80211_ra_tid *ra_tid;
731 struct sk_buff *skb = dev_alloc_skb(0);
732
733 if (unlikely(!skb)) {
734#ifdef CONFIG_MAC80211_HT_DEBUG
735 if (net_ratelimit())
736 printk(KERN_WARNING "%s: Not enough memory, "
737 "dropping stop BA session", skb->dev->name);
738#endif
739 return;
740 }
741 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
742 memcpy(&ra_tid->ra, ra, ETH_ALEN);
743 ra_tid->tid = tid;
744
745 skb->pkt_type = IEEE80211_DELBA_MSG;
746 skb_queue_tail(&local->skb_queue, skb);
747 tasklet_schedule(&local->tasklet);
748}
749EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
750
751/*
752 * After accepting the AddBA Request we activated a timer,
753 * resetting it after each frame that arrives from the originator.
754 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
755 */
756static void sta_rx_agg_session_timer_expired(unsigned long data)
757{
758 /* not an elegant detour, but there is no choice as the timer passes
759 * only one argument, and various sta_info are needed here, so init
760 * flow in sta_info_create gives the TID as data, while the timer_to_id
761 * array gives the sta through container_of */
762 u8 *ptid = (u8 *)data;
763 u8 *timer_to_id = ptid - *ptid;
764 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
765 timer_to_tid[0]);
766
767#ifdef CONFIG_MAC80211_HT_DEBUG
768 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
769#endif
770 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
771 (u16)*ptid, WLAN_BACK_TIMER,
772 WLAN_REASON_QSTA_TIMEOUT);
773}
774
775void ieee80211_process_addba_request(struct ieee80211_local *local,
776 struct sta_info *sta,
777 struct ieee80211_mgmt *mgmt,
778 size_t len)
779{
780 struct ieee80211_hw *hw = &local->hw;
781 struct ieee80211_conf *conf = &hw->conf;
782 struct tid_ampdu_rx *tid_agg_rx;
783 u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
784 u8 dialog_token;
785 int ret = -EOPNOTSUPP;
786 DECLARE_MAC_BUF(mac);
787
788 /* extract session parameters from addba request frame */
789 dialog_token = mgmt->u.action.u.addba_req.dialog_token;
790 timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout);
791 start_seq_num =
792 le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
793
794 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
795 ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
796 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
797 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
798
799 status = WLAN_STATUS_REQUEST_DECLINED;
800
801 /* sanity check for incoming parameters:
802 * check if configuration can support the BA policy
803 * and if buffer size does not exceeds max value */
804 if (((ba_policy != 1)
805 && (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA)))
806 || (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
807 status = WLAN_STATUS_INVALID_QOS_PARAM;
808#ifdef CONFIG_MAC80211_HT_DEBUG
809 if (net_ratelimit())
810 printk(KERN_DEBUG "AddBA Req with bad params from "
811 "%s on tid %u. policy %d, buffer size %d\n",
812 print_mac(mac, mgmt->sa), tid, ba_policy,
813 buf_size);
814#endif /* CONFIG_MAC80211_HT_DEBUG */
815 goto end_no_lock;
816 }
817 /* determine default buffer size */
818 if (buf_size == 0) {
819 struct ieee80211_supported_band *sband;
820
821 sband = local->hw.wiphy->bands[conf->channel->band];
822 buf_size = IEEE80211_MIN_AMPDU_BUF;
823 buf_size = buf_size << sband->ht_info.ampdu_factor;
824 }
825
826
827 /* examine state machine */
828 spin_lock_bh(&sta->lock);
829
830 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
831#ifdef CONFIG_MAC80211_HT_DEBUG
832 if (net_ratelimit())
833 printk(KERN_DEBUG "unexpected AddBA Req from "
834 "%s on tid %u\n",
835 print_mac(mac, mgmt->sa), tid);
836#endif /* CONFIG_MAC80211_HT_DEBUG */
837 goto end;
838 }
839
840 /* prepare A-MPDU MLME for Rx aggregation */
841 sta->ampdu_mlme.tid_rx[tid] =
842 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
843 if (!sta->ampdu_mlme.tid_rx[tid]) {
844#ifdef CONFIG_MAC80211_HT_DEBUG
845 if (net_ratelimit())
846 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
847 tid);
848#endif
849 goto end;
850 }
851 /* rx timer */
852 sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
853 sta_rx_agg_session_timer_expired;
854 sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
855 (unsigned long)&sta->timer_to_tid[tid];
856 init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
857
858 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
859
860 /* prepare reordering buffer */
861 tid_agg_rx->reorder_buf =
862 kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
863 if (!tid_agg_rx->reorder_buf) {
864#ifdef CONFIG_MAC80211_HT_DEBUG
865 if (net_ratelimit())
866 printk(KERN_ERR "can not allocate reordering buffer "
867 "to tid %d\n", tid);
868#endif
869 kfree(sta->ampdu_mlme.tid_rx[tid]);
870 goto end;
871 }
872 memset(tid_agg_rx->reorder_buf, 0,
873 buf_size * sizeof(struct sk_buff *));
874
875 if (local->ops->ampdu_action)
876 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
877 &sta->sta, tid, &start_seq_num);
878#ifdef CONFIG_MAC80211_HT_DEBUG
879 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
880#endif /* CONFIG_MAC80211_HT_DEBUG */
881
882 if (ret) {
883 kfree(tid_agg_rx->reorder_buf);
884 kfree(tid_agg_rx);
885 sta->ampdu_mlme.tid_rx[tid] = NULL;
886 goto end;
887 }
888
889 /* change state and send addba resp */
890 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
891 tid_agg_rx->dialog_token = dialog_token;
892 tid_agg_rx->ssn = start_seq_num;
893 tid_agg_rx->head_seq_num = start_seq_num;
894 tid_agg_rx->buf_size = buf_size;
895 tid_agg_rx->timeout = timeout;
896 tid_agg_rx->stored_mpdu_num = 0;
897 status = WLAN_STATUS_SUCCESS;
898end:
899 spin_unlock_bh(&sta->lock);
900
901end_no_lock:
902 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
903 dialog_token, status, 1, buf_size, timeout);
904}
905
906void ieee80211_process_addba_resp(struct ieee80211_local *local,
907 struct sta_info *sta,
908 struct ieee80211_mgmt *mgmt,
909 size_t len)
910{
911 struct ieee80211_hw *hw = &local->hw;
912 u16 capab;
913 u16 tid;
914 u8 *state;
915
916 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
917 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
918
919 state = &sta->ampdu_mlme.tid_state_tx[tid];
920
921 spin_lock_bh(&sta->lock);
922
923 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
924 spin_unlock_bh(&sta->lock);
925 return;
926 }
927
928 if (mgmt->u.action.u.addba_resp.dialog_token !=
929 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
930 spin_unlock_bh(&sta->lock);
931#ifdef CONFIG_MAC80211_HT_DEBUG
932 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
933#endif /* CONFIG_MAC80211_HT_DEBUG */
934 return;
935 }
936
937 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
938#ifdef CONFIG_MAC80211_HT_DEBUG
939 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
940#endif /* CONFIG_MAC80211_HT_DEBUG */
941 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
942 == WLAN_STATUS_SUCCESS) {
943 *state |= HT_ADDBA_RECEIVED_MSK;
944 sta->ampdu_mlme.addba_req_num[tid] = 0;
945
946 if (*state == HT_AGG_STATE_OPERATIONAL)
947 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
948
949 spin_unlock_bh(&sta->lock);
950 } else {
951 sta->ampdu_mlme.addba_req_num[tid]++;
952 /* this will allow the state check in stop_BA_session */
953 *state = HT_AGG_STATE_OPERATIONAL;
954 spin_unlock_bh(&sta->lock);
955 ieee80211_stop_tx_ba_session(hw, sta->sta.addr, tid,
956 WLAN_BACK_INITIATOR);
957 }
958}
959
960void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
961 struct sta_info *sta,
962 struct ieee80211_mgmt *mgmt, size_t len)
963{
964 struct ieee80211_local *local = sdata->local;
965 u16 tid, params;
966 u16 initiator;
967 DECLARE_MAC_BUF(mac);
968
969 params = le16_to_cpu(mgmt->u.action.u.delba.params);
970 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
971 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
972
973#ifdef CONFIG_MAC80211_HT_DEBUG
974 if (net_ratelimit())
975 printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n",
976 print_mac(mac, mgmt->sa),
977 initiator ? "initiator" : "recipient", tid,
978 mgmt->u.action.u.delba.reason_code);
979#endif /* CONFIG_MAC80211_HT_DEBUG */
980
981 if (initiator == WLAN_BACK_INITIATOR)
982 ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid,
983 WLAN_BACK_INITIATOR, 0);
984 else { /* WLAN_BACK_RECIPIENT */
985 spin_lock_bh(&sta->lock);
986 sta->ampdu_mlme.tid_state_tx[tid] =
987 HT_AGG_STATE_OPERATIONAL;
988 spin_unlock_bh(&sta->lock);
989 ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid,
990 WLAN_BACK_RECIPIENT);
991 }
992}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 4498d8713652..8025b294588b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -29,17 +29,6 @@
29#include "key.h" 29#include "key.h"
30#include "sta_info.h" 30#include "sta_info.h"
31 31
32/* ieee80211.o internal definitions, etc. These are not included into
33 * low-level drivers. */
34
35#ifndef ETH_P_PAE
36#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
37#endif /* ETH_P_PAE */
38
39#define WLAN_FC_DATA_PRESENT(fc) (((fc) & 0x4c) == 0x08)
40
41#define IEEE80211_FC(type, subtype) cpu_to_le16(type | subtype)
42
43struct ieee80211_local; 32struct ieee80211_local;
44 33
45/* Maximum number of broadcast/multicast frames to buffer when some of the 34/* Maximum number of broadcast/multicast frames to buffer when some of the
@@ -61,6 +50,12 @@ struct ieee80211_local;
61 * increased memory use (about 2 kB of RAM per entry). */ 50 * increased memory use (about 2 kB of RAM per entry). */
62#define IEEE80211_FRAGMENT_MAX 4 51#define IEEE80211_FRAGMENT_MAX 4
63 52
53/*
54 * Time after which we ignore scan results and no longer report/use
55 * them in any way.
56 */
57#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
58
64struct ieee80211_fragment_entry { 59struct ieee80211_fragment_entry {
65 unsigned long first_frag_time; 60 unsigned long first_frag_time;
66 unsigned int seq; 61 unsigned int seq;
@@ -73,9 +68,9 @@ struct ieee80211_fragment_entry {
73}; 68};
74 69
75 70
76struct ieee80211_sta_bss { 71struct ieee80211_bss {
77 struct list_head list; 72 struct list_head list;
78 struct ieee80211_sta_bss *hnext; 73 struct ieee80211_bss *hnext;
79 size_t ssid_len; 74 size_t ssid_len;
80 75
81 atomic_t users; 76 atomic_t users;
@@ -87,16 +82,11 @@ struct ieee80211_sta_bss {
87 enum ieee80211_band band; 82 enum ieee80211_band band;
88 int freq; 83 int freq;
89 int signal, noise, qual; 84 int signal, noise, qual;
90 u8 *wpa_ie; 85 u8 *ies; /* all information elements from the last Beacon or Probe
91 size_t wpa_ie_len; 86 * Response frames; note Beacon frame is not allowed to
92 u8 *rsn_ie; 87 * override values from Probe Response */
93 size_t rsn_ie_len; 88 size_t ies_len;
94 u8 *wmm_ie; 89 bool wmm_used;
95 size_t wmm_ie_len;
96 u8 *ht_ie;
97 size_t ht_ie_len;
98 u8 *ht_add_ie;
99 size_t ht_add_ie_len;
100#ifdef CONFIG_MAC80211_MESH 90#ifdef CONFIG_MAC80211_MESH
101 u8 *mesh_id; 91 u8 *mesh_id;
102 size_t mesh_id_len; 92 size_t mesh_id_len;
@@ -108,7 +98,7 @@ struct ieee80211_sta_bss {
108 u64 timestamp; 98 u64 timestamp;
109 int beacon_int; 99 int beacon_int;
110 100
111 bool probe_resp; 101 unsigned long last_probe_resp;
112 unsigned long last_update; 102 unsigned long last_update;
113 103
114 /* during assocation, we save an ERP value from a probe response so 104 /* during assocation, we save an ERP value from a probe response so
@@ -119,7 +109,7 @@ struct ieee80211_sta_bss {
119 u8 erp_value; 109 u8 erp_value;
120}; 110};
121 111
122static inline u8 *bss_mesh_cfg(struct ieee80211_sta_bss *bss) 112static inline u8 *bss_mesh_cfg(struct ieee80211_bss *bss)
123{ 113{
124#ifdef CONFIG_MAC80211_MESH 114#ifdef CONFIG_MAC80211_MESH
125 return bss->mesh_cfg; 115 return bss->mesh_cfg;
@@ -127,7 +117,7 @@ static inline u8 *bss_mesh_cfg(struct ieee80211_sta_bss *bss)
127 return NULL; 117 return NULL;
128} 118}
129 119
130static inline u8 *bss_mesh_id(struct ieee80211_sta_bss *bss) 120static inline u8 *bss_mesh_id(struct ieee80211_bss *bss)
131{ 121{
132#ifdef CONFIG_MAC80211_MESH 122#ifdef CONFIG_MAC80211_MESH
133 return bss->mesh_id; 123 return bss->mesh_id;
@@ -135,7 +125,7 @@ static inline u8 *bss_mesh_id(struct ieee80211_sta_bss *bss)
135 return NULL; 125 return NULL;
136} 126}
137 127
138static inline u8 bss_mesh_id_len(struct ieee80211_sta_bss *bss) 128static inline u8 bss_mesh_id_len(struct ieee80211_bss *bss)
139{ 129{
140#ifdef CONFIG_MAC80211_MESH 130#ifdef CONFIG_MAC80211_MESH
141 return bss->mesh_id_len; 131 return bss->mesh_id_len;
@@ -174,7 +164,7 @@ struct ieee80211_tx_data {
174 struct sk_buff **extra_frag; 164 struct sk_buff **extra_frag;
175 int num_extra_frag; 165 int num_extra_frag;
176 166
177 u16 fc, ethertype; 167 u16 ethertype;
178 unsigned int flags; 168 unsigned int flags;
179}; 169};
180 170
@@ -202,7 +192,7 @@ struct ieee80211_rx_data {
202 struct ieee80211_rx_status *status; 192 struct ieee80211_rx_status *status;
203 struct ieee80211_rate *rate; 193 struct ieee80211_rate *rate;
204 194
205 u16 fc, ethertype; 195 u16 ethertype;
206 unsigned int flags; 196 unsigned int flags;
207 int sent_ps_buffered; 197 int sent_ps_buffered;
208 int queue; 198 int queue;
@@ -239,7 +229,6 @@ struct ieee80211_if_ap {
239 struct sk_buff_head ps_bc_buf; 229 struct sk_buff_head ps_bc_buf;
240 atomic_t num_sta_ps; /* number of stations in PS mode */ 230 atomic_t num_sta_ps; /* number of stations in PS mode */
241 int dtim_count; 231 int dtim_count;
242 int num_beacons; /* number of TXed beacon frames for this BSS */
243}; 232};
244 233
245struct ieee80211_if_wds { 234struct ieee80211_if_wds {
@@ -300,48 +289,37 @@ struct mesh_config {
300#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) 289#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
301#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) 290#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
302#define IEEE80211_STA_PRIVACY_INVOKED BIT(13) 291#define IEEE80211_STA_PRIVACY_INVOKED BIT(13)
292/* flags for MLME request */
293#define IEEE80211_STA_REQ_SCAN 0
294#define IEEE80211_STA_REQ_DIRECT_PROBE 1
295#define IEEE80211_STA_REQ_AUTH 2
296#define IEEE80211_STA_REQ_RUN 3
297
298/* STA/IBSS MLME states */
299enum ieee80211_sta_mlme_state {
300 IEEE80211_STA_MLME_DISABLED,
301 IEEE80211_STA_MLME_DIRECT_PROBE,
302 IEEE80211_STA_MLME_AUTHENTICATE,
303 IEEE80211_STA_MLME_ASSOCIATE,
304 IEEE80211_STA_MLME_ASSOCIATED,
305 IEEE80211_STA_MLME_IBSS_SEARCH,
306 IEEE80211_STA_MLME_IBSS_JOINED,
307};
308
309/* bitfield of allowed auth algs */
310#define IEEE80211_AUTH_ALG_OPEN BIT(0)
311#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
312#define IEEE80211_AUTH_ALG_LEAP BIT(2)
313
303struct ieee80211_if_sta { 314struct ieee80211_if_sta {
304 struct timer_list timer; 315 struct timer_list timer;
305 struct work_struct work; 316 struct work_struct work;
306 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 317 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
307 u8 ssid[IEEE80211_MAX_SSID_LEN]; 318 u8 ssid[IEEE80211_MAX_SSID_LEN];
308 enum { 319 enum ieee80211_sta_mlme_state state;
309 IEEE80211_DISABLED, IEEE80211_AUTHENTICATE,
310 IEEE80211_ASSOCIATE, IEEE80211_ASSOCIATED,
311 IEEE80211_IBSS_SEARCH, IEEE80211_IBSS_JOINED,
312 IEEE80211_MESH_UP
313 } state;
314 size_t ssid_len; 320 size_t ssid_len;
315 u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; 321 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
316 size_t scan_ssid_len; 322 size_t scan_ssid_len;
317#ifdef CONFIG_MAC80211_MESH
318 struct timer_list mesh_path_timer;
319 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
320 size_t mesh_id_len;
321 /* Active Path Selection Protocol Identifier */
322 u8 mesh_pp_id[4];
323 /* Active Path Selection Metric Identifier */
324 u8 mesh_pm_id[4];
325 /* Congestion Control Mode Identifier */
326 u8 mesh_cc_id[4];
327 /* Local mesh Destination Sequence Number */
328 u32 dsn;
329 /* Last used PREQ ID */
330 u32 preq_id;
331 atomic_t mpaths;
332 /* Timestamp of last DSN update */
333 unsigned long last_dsn_update;
334 /* Timestamp of last DSN sent */
335 unsigned long last_preq;
336 struct mesh_rmc *rmc;
337 spinlock_t mesh_preq_queue_lock;
338 struct mesh_preq_queue preq_queue;
339 int preq_queue_len;
340 struct mesh_stats mshstats;
341 struct mesh_config mshcfg;
342 u32 mesh_seqnum;
343 bool accepting_plinks;
344#endif
345 u16 aid; 323 u16 aid;
346 u16 ap_capab, capab; 324 u16 ap_capab, capab;
347 u8 *extra_ie; /* to be added to the end of AssocReq */ 325 u8 *extra_ie; /* to be added to the end of AssocReq */
@@ -353,20 +331,17 @@ struct ieee80211_if_sta {
353 331
354 struct sk_buff_head skb_queue; 332 struct sk_buff_head skb_queue;
355 333
356 int auth_tries, assoc_tries; 334 int assoc_scan_tries; /* number of scans done pre-association */
335 int direct_probe_tries; /* retries for direct probes */
336 int auth_tries; /* retries for auth req */
337 int assoc_tries; /* retries for assoc req */
357 338
358 unsigned long request; 339 unsigned long request;
359 340
360 unsigned long last_probe; 341 unsigned long last_probe;
361 342
362 unsigned int flags; 343 unsigned int flags;
363#define IEEE80211_STA_REQ_SCAN 0
364#define IEEE80211_STA_REQ_AUTH 1
365#define IEEE80211_STA_REQ_RUN 2
366 344
367#define IEEE80211_AUTH_ALG_OPEN BIT(0)
368#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
369#define IEEE80211_AUTH_ALG_LEAP BIT(2)
370 unsigned int auth_algs; /* bitfield of allowed auth algs */ 345 unsigned int auth_algs; /* bitfield of allowed auth algs */
371 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */ 346 int auth_alg; /* currently used IEEE 802.11 authentication algorithm */
372 int auth_transaction; 347 int auth_transaction;
@@ -376,31 +351,70 @@ struct ieee80211_if_sta {
376 u32 supp_rates_bits[IEEE80211_NUM_BANDS]; 351 u32 supp_rates_bits[IEEE80211_NUM_BANDS];
377 352
378 int wmm_last_param_set; 353 int wmm_last_param_set;
379 int num_beacons; /* number of TXed beacon frames by this STA */
380}; 354};
381 355
382static inline void ieee80211_if_sta_set_mesh_id(struct ieee80211_if_sta *ifsta, 356struct ieee80211_if_mesh {
383 u8 mesh_id_len, u8 *mesh_id) 357 struct work_struct work;
384{ 358 struct timer_list housekeeping_timer;
385#ifdef CONFIG_MAC80211_MESH 359 struct timer_list mesh_path_timer;
386 ifsta->mesh_id_len = mesh_id_len; 360 struct sk_buff_head skb_queue;
387 memcpy(ifsta->mesh_id, mesh_id, mesh_id_len); 361
388#endif 362 bool housekeeping;
389} 363
364 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
365 size_t mesh_id_len;
366 /* Active Path Selection Protocol Identifier */
367 u8 mesh_pp_id[4];
368 /* Active Path Selection Metric Identifier */
369 u8 mesh_pm_id[4];
370 /* Congestion Control Mode Identifier */
371 u8 mesh_cc_id[4];
372 /* Local mesh Destination Sequence Number */
373 u32 dsn;
374 /* Last used PREQ ID */
375 u32 preq_id;
376 atomic_t mpaths;
377 /* Timestamp of last DSN update */
378 unsigned long last_dsn_update;
379 /* Timestamp of last DSN sent */
380 unsigned long last_preq;
381 struct mesh_rmc *rmc;
382 spinlock_t mesh_preq_queue_lock;
383 struct mesh_preq_queue preq_queue;
384 int preq_queue_len;
385 struct mesh_stats mshstats;
386 struct mesh_config mshcfg;
387 u32 mesh_seqnum;
388 bool accepting_plinks;
389};
390 390
391#ifdef CONFIG_MAC80211_MESH 391#ifdef CONFIG_MAC80211_MESH
392#define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \ 392#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \
393 do { (sta)->mshstats.name++; } while (0) 393 do { (msh)->mshstats.name++; } while (0)
394#else 394#else
395#define IEEE80211_IFSTA_MESH_CTR_INC(sta, name) \ 395#define IEEE80211_IFSTA_MESH_CTR_INC(msh, name) \
396 do { } while (0) 396 do { } while (0)
397#endif 397#endif
398 398
399/* flags used in struct ieee80211_sub_if_data.flags */ 399/**
400#define IEEE80211_SDATA_ALLMULTI BIT(0) 400 * enum ieee80211_sub_if_data_flags - virtual interface flags
401#define IEEE80211_SDATA_PROMISC BIT(1) 401 *
402#define IEEE80211_SDATA_USERSPACE_MLME BIT(2) 402 * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
403#define IEEE80211_SDATA_OPERATING_GMODE BIT(3) 403 * @IEEE80211_SDATA_PROMISC: interface is promisc
404 * @IEEE80211_SDATA_USERSPACE_MLME: userspace MLME is active
405 * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
406 * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
407 * associated stations and deliver multicast frames both
408 * back to wireless media and to the local net stack.
409 */
410enum ieee80211_sub_if_data_flags {
411 IEEE80211_SDATA_ALLMULTI = BIT(0),
412 IEEE80211_SDATA_PROMISC = BIT(1),
413 IEEE80211_SDATA_USERSPACE_MLME = BIT(2),
414 IEEE80211_SDATA_OPERATING_GMODE = BIT(3),
415 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(4),
416};
417
404struct ieee80211_sub_if_data { 418struct ieee80211_sub_if_data {
405 struct list_head list; 419 struct list_head list;
406 420
@@ -416,11 +430,6 @@ struct ieee80211_sub_if_data {
416 430
417 int drop_unencrypted; 431 int drop_unencrypted;
418 432
419 /*
420 * basic rates of this AP or the AP we're associated to
421 */
422 u64 basic_rates;
423
424 /* Fragment table for host-based reassembly */ 433 /* Fragment table for host-based reassembly */
425 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 434 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
426 unsigned int fragment_next; 435 unsigned int fragment_next;
@@ -447,6 +456,9 @@ struct ieee80211_sub_if_data {
447 struct ieee80211_if_wds wds; 456 struct ieee80211_if_wds wds;
448 struct ieee80211_if_vlan vlan; 457 struct ieee80211_if_vlan vlan;
449 struct ieee80211_if_sta sta; 458 struct ieee80211_if_sta sta;
459#ifdef CONFIG_MAC80211_MESH
460 struct ieee80211_if_mesh mesh;
461#endif
450 u32 mntr_flags; 462 u32 mntr_flags;
451 } u; 463 } u;
452 464
@@ -469,7 +481,6 @@ struct ieee80211_sub_if_data {
469 struct dentry *auth_alg; 481 struct dentry *auth_alg;
470 struct dentry *auth_transaction; 482 struct dentry *auth_transaction;
471 struct dentry *flags; 483 struct dentry *flags;
472 struct dentry *num_beacons_sta;
473 struct dentry *force_unicast_rateidx; 484 struct dentry *force_unicast_rateidx;
474 struct dentry *max_ratectrl_rateidx; 485 struct dentry *max_ratectrl_rateidx;
475 } sta; 486 } sta;
@@ -477,7 +488,6 @@ struct ieee80211_sub_if_data {
477 struct dentry *drop_unencrypted; 488 struct dentry *drop_unencrypted;
478 struct dentry *num_sta_ps; 489 struct dentry *num_sta_ps;
479 struct dentry *dtim_count; 490 struct dentry *dtim_count;
480 struct dentry *num_beacons;
481 struct dentry *force_unicast_rateidx; 491 struct dentry *force_unicast_rateidx;
482 struct dentry *max_ratectrl_rateidx; 492 struct dentry *max_ratectrl_rateidx;
483 struct dentry *num_buffered_multicast; 493 struct dentry *num_buffered_multicast;
@@ -540,6 +550,19 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
540 return container_of(p, struct ieee80211_sub_if_data, vif); 550 return container_of(p, struct ieee80211_sub_if_data, vif);
541} 551}
542 552
553static inline void
554ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata,
555 u8 mesh_id_len, u8 *mesh_id)
556{
557#ifdef CONFIG_MAC80211_MESH
558 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
559 ifmsh->mesh_id_len = mesh_id_len;
560 memcpy(ifmsh->mesh_id, mesh_id, mesh_id_len);
561#else
562 WARN_ON(1);
563#endif
564}
565
543enum { 566enum {
544 IEEE80211_RX_MSG = 1, 567 IEEE80211_RX_MSG = 1,
545 IEEE80211_TX_STATUS_MSG = 2, 568 IEEE80211_TX_STATUS_MSG = 2,
@@ -550,6 +573,10 @@ enum {
550/* maximum number of hardware queues we support. */ 573/* maximum number of hardware queues we support. */
551#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES) 574#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
552 575
576struct ieee80211_master_priv {
577 struct ieee80211_local *local;
578};
579
553struct ieee80211_local { 580struct ieee80211_local {
554 /* embed the driver visible part. 581 /* embed the driver visible part.
555 * don't cast (use the static inlines below), but we keep 582 * don't cast (use the static inlines below), but we keep
@@ -613,10 +640,6 @@ struct ieee80211_local {
613 struct crypto_blkcipher *wep_rx_tfm; 640 struct crypto_blkcipher *wep_rx_tfm;
614 u32 wep_iv; 641 u32 wep_iv;
615 642
616 int bridge_packets; /* bridge packets between associated stations and
617 * deliver multicast frames both back to wireless
618 * media and to the local net stack */
619
620 struct list_head interfaces; 643 struct list_head interfaces;
621 644
622 /* 645 /*
@@ -626,21 +649,21 @@ struct ieee80211_local {
626 spinlock_t key_lock; 649 spinlock_t key_lock;
627 650
628 651
629 bool sta_sw_scanning; 652 /* Scanning and BSS list */
630 bool sta_hw_scanning; 653 bool sw_scanning, hw_scanning;
631 int scan_channel_idx; 654 int scan_channel_idx;
632 enum ieee80211_band scan_band; 655 enum ieee80211_band scan_band;
633 656
634 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state; 657 enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state;
635 unsigned long last_scan_completed; 658 unsigned long last_scan_completed;
636 struct delayed_work scan_work; 659 struct delayed_work scan_work;
637 struct net_device *scan_dev; 660 struct ieee80211_sub_if_data *scan_sdata;
638 struct ieee80211_channel *oper_channel, *scan_channel; 661 struct ieee80211_channel *oper_channel, *scan_channel;
639 u8 scan_ssid[IEEE80211_MAX_SSID_LEN]; 662 u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
640 size_t scan_ssid_len; 663 size_t scan_ssid_len;
641 struct list_head sta_bss_list; 664 struct list_head bss_list;
642 struct ieee80211_sta_bss *sta_bss_hash[STA_HASH_SIZE]; 665 struct ieee80211_bss *bss_hash[STA_HASH_SIZE];
643 spinlock_t sta_bss_lock; 666 spinlock_t bss_lock;
644 667
645 /* SNMP counters */ 668 /* SNMP counters */
646 /* dot11CountersTable */ 669 /* dot11CountersTable */
@@ -701,10 +724,11 @@ struct ieee80211_local {
701 724
702#ifdef CONFIG_MAC80211_DEBUGFS 725#ifdef CONFIG_MAC80211_DEBUGFS
703 struct local_debugfsdentries { 726 struct local_debugfsdentries {
727 struct dentry *rcdir;
728 struct dentry *rcname;
704 struct dentry *frequency; 729 struct dentry *frequency;
705 struct dentry *antenna_sel_tx; 730 struct dentry *antenna_sel_tx;
706 struct dentry *antenna_sel_rx; 731 struct dentry *antenna_sel_rx;
707 struct dentry *bridge_packets;
708 struct dentry *rts_threshold; 732 struct dentry *rts_threshold;
709 struct dentry *fragmentation_threshold; 733 struct dentry *fragmentation_threshold;
710 struct dentry *short_retry_limit; 734 struct dentry *short_retry_limit;
@@ -774,6 +798,9 @@ struct ieee80211_ra_tid {
774 798
775/* Parsed Information Elements */ 799/* Parsed Information Elements */
776struct ieee802_11_elems { 800struct ieee802_11_elems {
801 u8 *ie_start;
802 size_t total_len;
803
777 /* pointers to IEs */ 804 /* pointers to IEs */
778 u8 *ssid; 805 u8 *ssid;
779 u8 *supp_rates; 806 u8 *supp_rates;
@@ -857,86 +884,82 @@ static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
857} 884}
858 885
859 886
860/* ieee80211.c */
861int ieee80211_hw_config(struct ieee80211_local *local); 887int ieee80211_hw_config(struct ieee80211_local *local);
862int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed); 888int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed);
863void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx); 889void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx);
864u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht, 890u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
865 struct ieee80211_ht_info *req_ht_cap, 891 struct ieee80211_ht_info *req_ht_cap,
866 struct ieee80211_ht_bss_info *req_bss_cap); 892 struct ieee80211_ht_bss_info *req_bss_cap);
893void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
894 u32 changed);
895void ieee80211_configure_filter(struct ieee80211_local *local);
867 896
868/* ieee80211_ioctl.c */ 897/* wireless extensions */
869extern const struct iw_handler_def ieee80211_iw_handler_def; 898extern const struct iw_handler_def ieee80211_iw_handler_def;
870int ieee80211_set_freq(struct net_device *dev, int freq);
871 899
872/* ieee80211_sta.c */ 900/* STA/IBSS code */
873void ieee80211_sta_timer(unsigned long data); 901void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
874void ieee80211_sta_work(struct work_struct *work); 902void ieee80211_scan_work(struct work_struct *work);
875void ieee80211_sta_scan_work(struct work_struct *work); 903void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
876void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
877 struct ieee80211_rx_status *rx_status); 904 struct ieee80211_rx_status *rx_status);
878int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len); 905int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len);
879int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len); 906int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len);
880int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid); 907int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid);
881int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len); 908void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata,
882void ieee80211_sta_req_auth(struct net_device *dev,
883 struct ieee80211_if_sta *ifsta); 909 struct ieee80211_if_sta *ifsta);
884int ieee80211_sta_scan_results(struct net_device *dev, 910struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
885 struct iw_request_info *info,
886 char *buf, size_t len);
887ieee80211_rx_result ieee80211_sta_rx_scan(
888 struct net_device *dev, struct sk_buff *skb,
889 struct ieee80211_rx_status *rx_status);
890void ieee80211_rx_bss_list_init(struct ieee80211_local *local);
891void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local);
892int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len);
893struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
894 struct sk_buff *skb, u8 *bssid, 911 struct sk_buff *skb, u8 *bssid,
895 u8 *addr, u64 supp_rates); 912 u8 *addr, u64 supp_rates);
896int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason); 913int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason);
897int ieee80211_sta_disassociate(struct net_device *dev, u16 reason); 914int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason);
898void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, 915u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
899 u32 changed);
900u32 ieee80211_reset_erp_info(struct net_device *dev);
901int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
902 struct ieee80211_ht_info *ht_info);
903int ieee80211_ht_addt_info_ie_to_ht_bss_info(
904 struct ieee80211_ht_addt_info *ht_add_info_ie,
905 struct ieee80211_ht_bss_info *bss_info);
906void ieee80211_send_addba_request(struct net_device *dev, const u8 *da,
907 u16 tid, u8 dialog_token, u16 start_seq_num,
908 u16 agg_size, u16 timeout);
909void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid,
910 u16 initiator, u16 reason_code);
911void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn);
912
913void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *da,
914 u16 tid, u16 initiator, u16 reason);
915void sta_addba_resp_timer_expired(unsigned long data);
916void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr);
917u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 916u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
918 struct ieee802_11_elems *elems, 917 struct ieee802_11_elems *elems,
919 enum ieee80211_band band); 918 enum ieee80211_band band);
920void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb, 919void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
921 int encrypt); 920 u8 *ssid, size_t ssid_len);
922void ieee802_11_parse_elems(u8 *start, size_t len, 921
923 struct ieee802_11_elems *elems); 922/* scan/BSS handling */
924 923int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
925#ifdef CONFIG_MAC80211_MESH 924 u8 *ssid, size_t ssid_len);
926void ieee80211_start_mesh(struct net_device *dev); 925int ieee80211_scan_results(struct ieee80211_local *local,
927#else 926 struct iw_request_info *info,
928static inline void ieee80211_start_mesh(struct net_device *dev) 927 char *buf, size_t len);
929{} 928ieee80211_rx_result
930#endif 929ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
930 struct sk_buff *skb,
931 struct ieee80211_rx_status *rx_status);
932void ieee80211_rx_bss_list_init(struct ieee80211_local *local);
933void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local);
934int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
935 char *ie, size_t len);
936
937void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
938int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
939 u8 *ssid, size_t ssid_len);
940struct ieee80211_bss *
941ieee80211_bss_info_update(struct ieee80211_local *local,
942 struct ieee80211_rx_status *rx_status,
943 struct ieee80211_mgmt *mgmt,
944 size_t len,
945 struct ieee802_11_elems *elems,
946 int freq, bool beacon);
947struct ieee80211_bss *
948ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq,
949 u8 *ssid, u8 ssid_len);
950struct ieee80211_bss *
951ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
952 u8 *ssid, u8 ssid_len);
953void ieee80211_rx_bss_put(struct ieee80211_local *local,
954 struct ieee80211_bss *bss);
931 955
932/* interface handling */ 956/* interface handling */
933void ieee80211_if_setup(struct net_device *dev);
934int ieee80211_if_add(struct ieee80211_local *local, const char *name, 957int ieee80211_if_add(struct ieee80211_local *local, const char *name,
935 struct net_device **new_dev, enum ieee80211_if_types type, 958 struct net_device **new_dev, enum nl80211_iftype type,
936 struct vif_params *params); 959 struct vif_params *params);
937int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, 960int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
938 enum ieee80211_if_types type); 961 enum nl80211_iftype type);
939void ieee80211_if_remove(struct net_device *dev); 962void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
940void ieee80211_remove_interfaces(struct ieee80211_local *local); 963void ieee80211_remove_interfaces(struct ieee80211_local *local);
941 964
942/* tx handling */ 965/* tx handling */
@@ -946,16 +969,52 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev);
946int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev); 969int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
947int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); 970int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
948 971
972/* HT */
973int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
974 struct ieee80211_ht_info *ht_info);
975int ieee80211_ht_addt_info_ie_to_ht_bss_info(
976 struct ieee80211_ht_addt_info *ht_add_info_ie,
977 struct ieee80211_ht_bss_info *bss_info);
978void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
979
980void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
981 u16 tid, u16 initiator, u16 reason);
982void ieee80211_sta_tear_down_BA_sessions(struct ieee80211_sub_if_data *sdata, u8 *addr);
983void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
984 struct sta_info *sta,
985 struct ieee80211_mgmt *mgmt, size_t len);
986void ieee80211_process_addba_resp(struct ieee80211_local *local,
987 struct sta_info *sta,
988 struct ieee80211_mgmt *mgmt,
989 size_t len);
990void ieee80211_process_addba_request(struct ieee80211_local *local,
991 struct sta_info *sta,
992 struct ieee80211_mgmt *mgmt,
993 size_t len);
994
995/* Spectrum management */
996void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
997 struct ieee80211_mgmt *mgmt,
998 size_t len);
999
949/* utility functions/constants */ 1000/* utility functions/constants */
950extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1001extern void *mac80211_wiphy_privid; /* for wiphy privid */
951extern const unsigned char rfc1042_header[6]; 1002extern const unsigned char rfc1042_header[6];
952extern const unsigned char bridge_tunnel_header[6]; 1003extern const unsigned char bridge_tunnel_header[6];
953u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 1004u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
954 enum ieee80211_if_types type); 1005 enum nl80211_iftype type);
955int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 1006int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
956 int rate, int erp, int short_preamble); 1007 int rate, int erp, int short_preamble);
957void mac80211_ev_michael_mic_failure(struct net_device *dev, int keyidx, 1008void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
958 struct ieee80211_hdr *hdr); 1009 struct ieee80211_hdr *hdr);
1010void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
1011void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
1012 int encrypt);
1013void ieee802_11_parse_elems(u8 *start, size_t len,
1014 struct ieee802_11_elems *elems);
1015int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freq);
1016u64 ieee80211_mandatory_rates(struct ieee80211_local *local,
1017 enum ieee80211_band band);
959 1018
960#ifdef CONFIG_MAC80211_NOINLINE 1019#ifdef CONFIG_MAC80211_NOINLINE
961#define debug_noinline noinline 1020#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 610ed1d9893a..8336fee68d3e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * Interface handling (except master interface)
3 *
2 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 5 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz> 6 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
@@ -17,7 +19,539 @@
17#include "sta_info.h" 19#include "sta_info.h"
18#include "debugfs_netdev.h" 20#include "debugfs_netdev.h"
19#include "mesh.h" 21#include "mesh.h"
22#include "led.h"
23
24static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
25{
26 int meshhdrlen;
27 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
28
29 meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0;
30
31 /* FIX: what would be proper limits for MTU?
32 * This interface uses 802.3 frames. */
33 if (new_mtu < 256 ||
34 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
35 return -EINVAL;
36 }
37
38#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
39 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
40#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
41 dev->mtu = new_mtu;
42 return 0;
43}
44
45static inline int identical_mac_addr_allowed(int type1, int type2)
46{
47 return type1 == NL80211_IFTYPE_MONITOR ||
48 type2 == NL80211_IFTYPE_MONITOR ||
49 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) ||
50 (type1 == NL80211_IFTYPE_WDS &&
51 (type2 == NL80211_IFTYPE_WDS ||
52 type2 == NL80211_IFTYPE_AP)) ||
53 (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) ||
54 (type1 == NL80211_IFTYPE_AP_VLAN &&
55 (type2 == NL80211_IFTYPE_AP ||
56 type2 == NL80211_IFTYPE_AP_VLAN));
57}
58
59static int ieee80211_open(struct net_device *dev)
60{
61 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
62 struct ieee80211_sub_if_data *nsdata;
63 struct ieee80211_local *local = sdata->local;
64 struct sta_info *sta;
65 struct ieee80211_if_init_conf conf;
66 u32 changed = 0;
67 int res;
68 bool need_hw_reconfig = 0;
69 u8 null_addr[ETH_ALEN] = {0};
70
71 /* fail early if user set an invalid address */
72 if (compare_ether_addr(dev->dev_addr, null_addr) &&
73 !is_valid_ether_addr(dev->dev_addr))
74 return -EADDRNOTAVAIL;
75
76 /* we hold the RTNL here so can safely walk the list */
77 list_for_each_entry(nsdata, &local->interfaces, list) {
78 struct net_device *ndev = nsdata->dev;
79
80 if (ndev != dev && netif_running(ndev)) {
81 /*
82 * Allow only a single IBSS interface to be up at any
83 * time. This is restricted because beacon distribution
84 * cannot work properly if both are in the same IBSS.
85 *
86 * To remove this restriction we'd have to disallow them
87 * from setting the same SSID on different IBSS interfaces
88 * belonging to the same hardware. Then, however, we're
89 * faced with having to adopt two different TSF timers...
90 */
91 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
92 nsdata->vif.type == NL80211_IFTYPE_ADHOC)
93 return -EBUSY;
94
95 /*
96 * The remaining checks are only performed for interfaces
97 * with the same MAC address.
98 */
99 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
100 continue;
101
102 /*
103 * check whether it may have the same address
104 */
105 if (!identical_mac_addr_allowed(sdata->vif.type,
106 nsdata->vif.type))
107 return -ENOTUNIQ;
108
109 /*
110 * can only add VLANs to enabled APs
111 */
112 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
113 nsdata->vif.type == NL80211_IFTYPE_AP)
114 sdata->bss = &nsdata->u.ap;
115 }
116 }
117
118 switch (sdata->vif.type) {
119 case NL80211_IFTYPE_WDS:
120 if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
121 return -ENOLINK;
122 break;
123 case NL80211_IFTYPE_AP_VLAN:
124 if (!sdata->bss)
125 return -ENOLINK;
126 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
127 break;
128 case NL80211_IFTYPE_AP:
129 sdata->bss = &sdata->u.ap;
130 break;
131 case NL80211_IFTYPE_MESH_POINT:
132 if (!ieee80211_vif_is_mesh(&sdata->vif))
133 break;
134 /* mesh ifaces must set allmulti to forward mcast traffic */
135 atomic_inc(&local->iff_allmultis);
136 break;
137 case NL80211_IFTYPE_STATION:
138 case NL80211_IFTYPE_MONITOR:
139 case NL80211_IFTYPE_ADHOC:
140 /* no special treatment */
141 break;
142 case NL80211_IFTYPE_UNSPECIFIED:
143 case __NL80211_IFTYPE_AFTER_LAST:
144 /* cannot happen */
145 WARN_ON(1);
146 break;
147 }
148
149 if (local->open_count == 0) {
150 res = 0;
151 if (local->ops->start)
152 res = local->ops->start(local_to_hw(local));
153 if (res)
154 goto err_del_bss;
155 need_hw_reconfig = 1;
156 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
157 }
158
159 /*
160 * Check all interfaces and copy the hopefully now-present
161 * MAC address to those that have the special null one.
162 */
163 list_for_each_entry(nsdata, &local->interfaces, list) {
164 struct net_device *ndev = nsdata->dev;
165
166 /*
167 * No need to check netif_running since we do not allow
168 * it to start up with this invalid address.
169 */
170 if (compare_ether_addr(null_addr, ndev->dev_addr) == 0)
171 memcpy(ndev->dev_addr,
172 local->hw.wiphy->perm_addr,
173 ETH_ALEN);
174 }
175
176 if (compare_ether_addr(null_addr, local->mdev->dev_addr) == 0)
177 memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr,
178 ETH_ALEN);
179
180 /*
181 * Validate the MAC address for this device.
182 */
183 if (!is_valid_ether_addr(dev->dev_addr)) {
184 if (!local->open_count && local->ops->stop)
185 local->ops->stop(local_to_hw(local));
186 return -EADDRNOTAVAIL;
187 }
188
189 switch (sdata->vif.type) {
190 case NL80211_IFTYPE_AP_VLAN:
191 /* no need to tell driver */
192 break;
193 case NL80211_IFTYPE_MONITOR:
194 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
195 local->cooked_mntrs++;
196 break;
197 }
198
199 /* must be before the call to ieee80211_configure_filter */
200 local->monitors++;
201 if (local->monitors == 1)
202 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
203
204 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
205 local->fif_fcsfail++;
206 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
207 local->fif_plcpfail++;
208 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
209 local->fif_control++;
210 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
211 local->fif_other_bss++;
212
213 netif_addr_lock_bh(local->mdev);
214 ieee80211_configure_filter(local);
215 netif_addr_unlock_bh(local->mdev);
216 break;
217 case NL80211_IFTYPE_STATION:
218 case NL80211_IFTYPE_ADHOC:
219 sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
220 /* fall through */
221 default:
222 conf.vif = &sdata->vif;
223 conf.type = sdata->vif.type;
224 conf.mac_addr = dev->dev_addr;
225 res = local->ops->add_interface(local_to_hw(local), &conf);
226 if (res)
227 goto err_stop;
228
229 if (ieee80211_vif_is_mesh(&sdata->vif))
230 ieee80211_start_mesh(sdata);
231 changed |= ieee80211_reset_erp_info(sdata);
232 ieee80211_bss_info_change_notify(sdata, changed);
233 ieee80211_enable_keys(sdata);
234
235 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
236 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
237 netif_carrier_off(dev);
238 else
239 netif_carrier_on(dev);
240 }
241
242 if (sdata->vif.type == NL80211_IFTYPE_WDS) {
243 /* Create STA entry for the WDS peer */
244 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
245 GFP_KERNEL);
246 if (!sta) {
247 res = -ENOMEM;
248 goto err_del_interface;
249 }
250
251 /* no locking required since STA is not live yet */
252 sta->flags |= WLAN_STA_AUTHORIZED;
253
254 res = sta_info_insert(sta);
255 if (res) {
256 /* STA has been freed */
257 goto err_del_interface;
258 }
259 }
260
261 if (local->open_count == 0) {
262 res = dev_open(local->mdev);
263 WARN_ON(res);
264 if (res)
265 goto err_del_interface;
266 tasklet_enable(&local->tx_pending_tasklet);
267 tasklet_enable(&local->tasklet);
268 }
269
270 /*
271 * set_multicast_list will be invoked by the networking core
272 * which will check whether any increments here were done in
273 * error and sync them down to the hardware as filter flags.
274 */
275 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
276 atomic_inc(&local->iff_allmultis);
277
278 if (sdata->flags & IEEE80211_SDATA_PROMISC)
279 atomic_inc(&local->iff_promiscs);
280
281 local->open_count++;
282 if (need_hw_reconfig) {
283 ieee80211_hw_config(local);
284 /*
285 * set default queue parameters so drivers don't
286 * need to initialise the hardware if the hardware
287 * doesn't start up with sane defaults
288 */
289 ieee80211_set_wmm_default(sdata);
290 }
291
292 /*
293 * ieee80211_sta_work is disabled while network interface
294 * is down. Therefore, some configuration changes may not
295 * yet be effective. Trigger execution of ieee80211_sta_work
296 * to fix this.
297 */
298 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
299 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
300 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
301 queue_work(local->hw.workqueue, &ifsta->work);
302 }
303
304 netif_tx_start_all_queues(dev);
305
306 return 0;
307 err_del_interface:
308 local->ops->remove_interface(local_to_hw(local), &conf);
309 err_stop:
310 if (!local->open_count && local->ops->stop)
311 local->ops->stop(local_to_hw(local));
312 err_del_bss:
313 sdata->bss = NULL;
314 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
315 list_del(&sdata->u.vlan.list);
316 return res;
317}
318
319static int ieee80211_stop(struct net_device *dev)
320{
321 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
322 struct ieee80211_local *local = sdata->local;
323 struct ieee80211_if_init_conf conf;
324 struct sta_info *sta;
325
326 /*
327 * Stop TX on this interface first.
328 */
329 netif_tx_stop_all_queues(dev);
330
331 /*
332 * Now delete all active aggregation sessions.
333 */
334 rcu_read_lock();
335
336 list_for_each_entry_rcu(sta, &local->sta_list, list) {
337 if (sta->sdata == sdata)
338 ieee80211_sta_tear_down_BA_sessions(sdata,
339 sta->sta.addr);
340 }
341
342 rcu_read_unlock();
343
344 /*
345 * Remove all stations associated with this interface.
346 *
347 * This must be done before calling ops->remove_interface()
348 * because otherwise we can later invoke ops->sta_notify()
349 * whenever the STAs are removed, and that invalidates driver
350 * assumptions about always getting a vif pointer that is valid
351 * (because if we remove a STA after ops->remove_interface()
352 * the driver will have removed the vif info already!)
353 *
354 * We could relax this and only unlink the stations from the
355 * hash table and list but keep them on a per-sdata list that
356 * will be inserted back again when the interface is brought
357 * up again, but I don't currently see a use case for that,
358 * except with WDS which gets a STA entry created when it is
359 * brought up.
360 */
361 sta_info_flush(local, sdata);
362
363 /*
364 * Don't count this interface for promisc/allmulti while it
365 * is down. dev_mc_unsync() will invoke set_multicast_list
366 * on the master interface which will sync these down to the
367 * hardware as filter flags.
368 */
369 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
370 atomic_dec(&local->iff_allmultis);
371
372 if (sdata->flags & IEEE80211_SDATA_PROMISC)
373 atomic_dec(&local->iff_promiscs);
374
375 dev_mc_unsync(local->mdev, dev);
376
377 /* APs need special treatment */
378 if (sdata->vif.type == NL80211_IFTYPE_AP) {
379 struct ieee80211_sub_if_data *vlan, *tmp;
380 struct beacon_data *old_beacon = sdata->u.ap.beacon;
381
382 /* remove beacon */
383 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
384 synchronize_rcu();
385 kfree(old_beacon);
386
387 /* down all dependent devices, that is VLANs */
388 list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
389 u.vlan.list)
390 dev_close(vlan->dev);
391 WARN_ON(!list_empty(&sdata->u.ap.vlans));
392 }
393
394 local->open_count--;
395
396 switch (sdata->vif.type) {
397 case NL80211_IFTYPE_AP_VLAN:
398 list_del(&sdata->u.vlan.list);
399 /* no need to tell driver */
400 break;
401 case NL80211_IFTYPE_MONITOR:
402 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
403 local->cooked_mntrs--;
404 break;
405 }
406
407 local->monitors--;
408 if (local->monitors == 0)
409 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
410
411 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
412 local->fif_fcsfail--;
413 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
414 local->fif_plcpfail--;
415 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
416 local->fif_control--;
417 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
418 local->fif_other_bss--;
419
420 netif_addr_lock_bh(local->mdev);
421 ieee80211_configure_filter(local);
422 netif_addr_unlock_bh(local->mdev);
423 break;
424 case NL80211_IFTYPE_STATION:
425 case NL80211_IFTYPE_ADHOC:
426 sdata->u.sta.state = IEEE80211_STA_MLME_DISABLED;
427 memset(sdata->u.sta.bssid, 0, ETH_ALEN);
428 del_timer_sync(&sdata->u.sta.timer);
429 /*
430 * If the timer fired while we waited for it, it will have
431 * requeued the work. Now the work will be running again
432 * but will not rearm the timer again because it checks
433 * whether the interface is running, which, at this point,
434 * it no longer is.
435 */
436 cancel_work_sync(&sdata->u.sta.work);
437 /*
438 * When we get here, the interface is marked down.
439 * Call synchronize_rcu() to wait for the RX path
440 * should it be using the interface and enqueuing
441 * frames at this very time on another CPU.
442 */
443 synchronize_rcu();
444 skb_queue_purge(&sdata->u.sta.skb_queue);
445
446 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
447 kfree(sdata->u.sta.extra_ie);
448 sdata->u.sta.extra_ie = NULL;
449 sdata->u.sta.extra_ie_len = 0;
450 /* fall through */
451 case NL80211_IFTYPE_MESH_POINT:
452 if (ieee80211_vif_is_mesh(&sdata->vif)) {
453 /* allmulti is always set on mesh ifaces */
454 atomic_dec(&local->iff_allmultis);
455 ieee80211_stop_mesh(sdata);
456 }
457 /* fall through */
458 default:
459 if (local->scan_sdata == sdata) {
460 if (!local->ops->hw_scan)
461 cancel_delayed_work_sync(&local->scan_work);
462 /*
463 * The software scan can no longer run now, so we can
464 * clear out the scan_sdata reference. However, the
465 * hardware scan may still be running. The complete
466 * function must be prepared to handle a NULL value.
467 */
468 local->scan_sdata = NULL;
469 /*
470 * The memory barrier guarantees that another CPU
471 * that is hardware-scanning will now see the fact
472 * that this interface is gone.
473 */
474 smp_mb();
475 /*
476 * If software scanning, complete the scan but since
477 * the scan_sdata is NULL already don't send out a
478 * scan event to userspace -- the scan is incomplete.
479 */
480 if (local->sw_scanning)
481 ieee80211_scan_completed(&local->hw);
482 }
483
484 conf.vif = &sdata->vif;
485 conf.type = sdata->vif.type;
486 conf.mac_addr = dev->dev_addr;
487 /* disable all keys for as long as this netdev is down */
488 ieee80211_disable_keys(sdata);
489 local->ops->remove_interface(local_to_hw(local), &conf);
490 }
491
492 sdata->bss = NULL;
493
494 if (local->open_count == 0) {
495 if (netif_running(local->mdev))
496 dev_close(local->mdev);
497
498 if (local->ops->stop)
499 local->ops->stop(local_to_hw(local));
500
501 ieee80211_led_radio(local, 0);
502
503 flush_workqueue(local->hw.workqueue);
504
505 tasklet_disable(&local->tx_pending_tasklet);
506 tasklet_disable(&local->tasklet);
507 }
508
509 return 0;
510}
511
512static void ieee80211_set_multicast_list(struct net_device *dev)
513{
514 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
515 struct ieee80211_local *local = sdata->local;
516 int allmulti, promisc, sdata_allmulti, sdata_promisc;
517
518 allmulti = !!(dev->flags & IFF_ALLMULTI);
519 promisc = !!(dev->flags & IFF_PROMISC);
520 sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
521 sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
522
523 if (allmulti != sdata_allmulti) {
524 if (dev->flags & IFF_ALLMULTI)
525 atomic_inc(&local->iff_allmultis);
526 else
527 atomic_dec(&local->iff_allmultis);
528 sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
529 }
530
531 if (promisc != sdata_promisc) {
532 if (dev->flags & IFF_PROMISC)
533 atomic_inc(&local->iff_promiscs);
534 else
535 atomic_dec(&local->iff_promiscs);
536 sdata->flags ^= IEEE80211_SDATA_PROMISC;
537 }
538
539 dev_mc_sync(local->mdev, dev);
540}
20 541
542static void ieee80211_if_setup(struct net_device *dev)
543{
544 ether_setup(dev);
545 dev->hard_start_xmit = ieee80211_subif_start_xmit;
546 dev->wireless_handlers = &ieee80211_iw_handler_def;
547 dev->set_multicast_list = ieee80211_set_multicast_list;
548 dev->change_mtu = ieee80211_change_mtu;
549 dev->open = ieee80211_open;
550 dev->stop = ieee80211_stop;
551 dev->destructor = free_netdev;
552 /* we will validate the address ourselves in ->open */
553 dev->validate_addr = NULL;
554}
21/* 555/*
22 * Called when the netdev is removed or, by the code below, before 556 * Called when the netdev is removed or, by the code below, before
23 * the interface type changes. 557 * the interface type changes.
@@ -31,17 +565,17 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
31 int flushed; 565 int flushed;
32 int i; 566 int i;
33 567
34 ieee80211_debugfs_remove_netdev(sdata);
35
36 /* free extra data */ 568 /* free extra data */
37 ieee80211_free_keys(sdata); 569 ieee80211_free_keys(sdata);
38 570
571 ieee80211_debugfs_remove_netdev(sdata);
572
39 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) 573 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
40 __skb_queue_purge(&sdata->fragments[i].skb_list); 574 __skb_queue_purge(&sdata->fragments[i].skb_list);
41 sdata->fragment_next = 0; 575 sdata->fragment_next = 0;
42 576
43 switch (sdata->vif.type) { 577 switch (sdata->vif.type) {
44 case IEEE80211_IF_TYPE_AP: 578 case NL80211_IFTYPE_AP:
45 beacon = sdata->u.ap.beacon; 579 beacon = sdata->u.ap.beacon;
46 rcu_assign_pointer(sdata->u.ap.beacon, NULL); 580 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
47 synchronize_rcu(); 581 synchronize_rcu();
@@ -53,23 +587,23 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
53 } 587 }
54 588
55 break; 589 break;
56 case IEEE80211_IF_TYPE_MESH_POINT: 590 case NL80211_IFTYPE_MESH_POINT:
57 /* Allow compiler to elide mesh_rmc_free call. */
58 if (ieee80211_vif_is_mesh(&sdata->vif)) 591 if (ieee80211_vif_is_mesh(&sdata->vif))
59 mesh_rmc_free(dev); 592 mesh_rmc_free(sdata);
60 /* fall through */ 593 break;
61 case IEEE80211_IF_TYPE_STA: 594 case NL80211_IFTYPE_STATION:
62 case IEEE80211_IF_TYPE_IBSS: 595 case NL80211_IFTYPE_ADHOC:
63 kfree(sdata->u.sta.extra_ie); 596 kfree(sdata->u.sta.extra_ie);
64 kfree(sdata->u.sta.assocreq_ies); 597 kfree(sdata->u.sta.assocreq_ies);
65 kfree(sdata->u.sta.assocresp_ies); 598 kfree(sdata->u.sta.assocresp_ies);
66 kfree_skb(sdata->u.sta.probe_resp); 599 kfree_skb(sdata->u.sta.probe_resp);
67 break; 600 break;
68 case IEEE80211_IF_TYPE_WDS: 601 case NL80211_IFTYPE_WDS:
69 case IEEE80211_IF_TYPE_VLAN: 602 case NL80211_IFTYPE_AP_VLAN:
70 case IEEE80211_IF_TYPE_MNTR: 603 case NL80211_IFTYPE_MONITOR:
71 break; 604 break;
72 case IEEE80211_IF_TYPE_INVALID: 605 case NL80211_IFTYPE_UNSPECIFIED:
606 case __NL80211_IFTYPE_AFTER_LAST:
73 BUG(); 607 BUG();
74 break; 608 break;
75 } 609 }
@@ -82,55 +616,43 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
82 * Helper function to initialise an interface to a specific type. 616 * Helper function to initialise an interface to a specific type.
83 */ 617 */
84static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, 618static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
85 enum ieee80211_if_types type) 619 enum nl80211_iftype type)
86{ 620{
87 struct ieee80211_if_sta *ifsta;
88
89 /* clear type-dependent union */ 621 /* clear type-dependent union */
90 memset(&sdata->u, 0, sizeof(sdata->u)); 622 memset(&sdata->u, 0, sizeof(sdata->u));
91 623
92 /* and set some type-dependent values */ 624 /* and set some type-dependent values */
93 sdata->vif.type = type; 625 sdata->vif.type = type;
626 sdata->dev->hard_start_xmit = ieee80211_subif_start_xmit;
627 sdata->wdev.iftype = type;
94 628
95 /* only monitor differs */ 629 /* only monitor differs */
96 sdata->dev->type = ARPHRD_ETHER; 630 sdata->dev->type = ARPHRD_ETHER;
97 631
98 switch (type) { 632 switch (type) {
99 case IEEE80211_IF_TYPE_AP: 633 case NL80211_IFTYPE_AP:
100 skb_queue_head_init(&sdata->u.ap.ps_bc_buf); 634 skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
101 INIT_LIST_HEAD(&sdata->u.ap.vlans); 635 INIT_LIST_HEAD(&sdata->u.ap.vlans);
102 break; 636 break;
103 case IEEE80211_IF_TYPE_MESH_POINT: 637 case NL80211_IFTYPE_STATION:
104 case IEEE80211_IF_TYPE_STA: 638 case NL80211_IFTYPE_ADHOC:
105 case IEEE80211_IF_TYPE_IBSS: 639 ieee80211_sta_setup_sdata(sdata);
106 ifsta = &sdata->u.sta; 640 break;
107 INIT_WORK(&ifsta->work, ieee80211_sta_work); 641 case NL80211_IFTYPE_MESH_POINT:
108 setup_timer(&ifsta->timer, ieee80211_sta_timer,
109 (unsigned long) sdata);
110 skb_queue_head_init(&ifsta->skb_queue);
111
112 ifsta->capab = WLAN_CAPABILITY_ESS;
113 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
114 IEEE80211_AUTH_ALG_SHARED_KEY;
115 ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
116 IEEE80211_STA_AUTO_BSSID_SEL |
117 IEEE80211_STA_AUTO_CHANNEL_SEL;
118 if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
119 ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
120
121 if (ieee80211_vif_is_mesh(&sdata->vif)) 642 if (ieee80211_vif_is_mesh(&sdata->vif))
122 ieee80211_mesh_init_sdata(sdata); 643 ieee80211_mesh_init_sdata(sdata);
123 break; 644 break;
124 case IEEE80211_IF_TYPE_MNTR: 645 case NL80211_IFTYPE_MONITOR:
125 sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; 646 sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP;
126 sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit; 647 sdata->dev->hard_start_xmit = ieee80211_monitor_start_xmit;
127 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | 648 sdata->u.mntr_flags = MONITOR_FLAG_CONTROL |
128 MONITOR_FLAG_OTHER_BSS; 649 MONITOR_FLAG_OTHER_BSS;
129 break; 650 break;
130 case IEEE80211_IF_TYPE_WDS: 651 case NL80211_IFTYPE_WDS:
131 case IEEE80211_IF_TYPE_VLAN: 652 case NL80211_IFTYPE_AP_VLAN:
132 break; 653 break;
133 case IEEE80211_IF_TYPE_INVALID: 654 case NL80211_IFTYPE_UNSPECIFIED:
655 case __NL80211_IFTYPE_AFTER_LAST:
134 BUG(); 656 BUG();
135 break; 657 break;
136 } 658 }
@@ -139,7 +661,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
139} 661}
140 662
141int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, 663int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
142 enum ieee80211_if_types type) 664 enum nl80211_iftype type)
143{ 665{
144 ASSERT_RTNL(); 666 ASSERT_RTNL();
145 667
@@ -160,14 +682,16 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
160 ieee80211_setup_sdata(sdata, type); 682 ieee80211_setup_sdata(sdata, type);
161 683
162 /* reset some values that shouldn't be kept across type changes */ 684 /* reset some values that shouldn't be kept across type changes */
163 sdata->basic_rates = 0; 685 sdata->bss_conf.basic_rates =
686 ieee80211_mandatory_rates(sdata->local,
687 sdata->local->hw.conf.channel->band);
164 sdata->drop_unencrypted = 0; 688 sdata->drop_unencrypted = 0;
165 689
166 return 0; 690 return 0;
167} 691}
168 692
169int ieee80211_if_add(struct ieee80211_local *local, const char *name, 693int ieee80211_if_add(struct ieee80211_local *local, const char *name,
170 struct net_device **new_dev, enum ieee80211_if_types type, 694 struct net_device **new_dev, enum nl80211_iftype type,
171 struct vif_params *params) 695 struct vif_params *params)
172{ 696{
173 struct net_device *ndev; 697 struct net_device *ndev;
@@ -225,9 +749,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
225 749
226 if (ieee80211_vif_is_mesh(&sdata->vif) && 750 if (ieee80211_vif_is_mesh(&sdata->vif) &&
227 params && params->mesh_id_len) 751 params && params->mesh_id_len)
228 ieee80211_if_sta_set_mesh_id(&sdata->u.sta, 752 ieee80211_sdata_set_mesh_id(sdata,
229 params->mesh_id_len, 753 params->mesh_id_len,
230 params->mesh_id); 754 params->mesh_id);
231 755
232 list_add_tail_rcu(&sdata->list, &local->interfaces); 756 list_add_tail_rcu(&sdata->list, &local->interfaces);
233 757
@@ -241,15 +765,13 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
241 return ret; 765 return ret;
242} 766}
243 767
244void ieee80211_if_remove(struct net_device *dev) 768void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
245{ 769{
246 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
247
248 ASSERT_RTNL(); 770 ASSERT_RTNL();
249 771
250 list_del_rcu(&sdata->list); 772 list_del_rcu(&sdata->list);
251 synchronize_rcu(); 773 synchronize_rcu();
252 unregister_netdevice(dev); 774 unregister_netdevice(sdata->dev);
253} 775}
254 776
255/* 777/*
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 6597c779e35a..57afcd38cd9e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -118,12 +118,12 @@ static const u8 *get_mac_for_key(struct ieee80211_key *key)
118 * address to indicate a transmit-only key. 118 * address to indicate a transmit-only key.
119 */ 119 */
120 if (key->conf.alg != ALG_WEP && 120 if (key->conf.alg != ALG_WEP &&
121 (key->sdata->vif.type == IEEE80211_IF_TYPE_AP || 121 (key->sdata->vif.type == NL80211_IFTYPE_AP ||
122 key->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) 122 key->sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
123 addr = zero_addr; 123 addr = zero_addr;
124 124
125 if (key->sta) 125 if (key->sta)
126 addr = key->sta->addr; 126 addr = key->sta->sta.addr;
127 127
128 return addr; 128 return addr;
129} 129}
@@ -331,7 +331,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
331 */ 331 */
332 key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; 332 key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE;
333 } else { 333 } else {
334 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 334 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
335 struct sta_info *ap; 335 struct sta_info *ap;
336 336
337 /* 337 /*
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index aa5a191598c9..d608c44047c0 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -45,16 +45,9 @@ struct ieee80211_tx_status_rtap_hdr {
45 u8 data_retries; 45 u8 data_retries;
46} __attribute__ ((packed)); 46} __attribute__ ((packed));
47 47
48/* common interface routines */
49
50static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
51{
52 memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
53 return ETH_ALEN;
54}
55 48
56/* must be called under mdev tx lock */ 49/* must be called under mdev tx lock */
57static void ieee80211_configure_filter(struct ieee80211_local *local) 50void ieee80211_configure_filter(struct ieee80211_local *local)
58{ 51{
59 unsigned int changed_flags; 52 unsigned int changed_flags;
60 unsigned int new_flags = 0; 53 unsigned int new_flags = 0;
@@ -97,9 +90,24 @@ static void ieee80211_configure_filter(struct ieee80211_local *local)
97 90
98/* master interface */ 91/* master interface */
99 92
93static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
94{
95 memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
96 return ETH_ALEN;
97}
98
99static const struct header_ops ieee80211_header_ops = {
100 .create = eth_header,
101 .parse = header_parse_80211,
102 .rebuild = eth_rebuild_header,
103 .cache = eth_header_cache,
104 .cache_update = eth_header_cache_update,
105};
106
100static int ieee80211_master_open(struct net_device *dev) 107static int ieee80211_master_open(struct net_device *dev)
101{ 108{
102 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 109 struct ieee80211_master_priv *mpriv = netdev_priv(dev);
110 struct ieee80211_local *local = mpriv->local;
103 struct ieee80211_sub_if_data *sdata; 111 struct ieee80211_sub_if_data *sdata;
104 int res = -EOPNOTSUPP; 112 int res = -EOPNOTSUPP;
105 113
@@ -121,7 +129,8 @@ static int ieee80211_master_open(struct net_device *dev)
121 129
122static int ieee80211_master_stop(struct net_device *dev) 130static int ieee80211_master_stop(struct net_device *dev)
123{ 131{
124 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 132 struct ieee80211_master_priv *mpriv = netdev_priv(dev);
133 struct ieee80211_local *local = mpriv->local;
125 struct ieee80211_sub_if_data *sdata; 134 struct ieee80211_sub_if_data *sdata;
126 135
127 /* we hold the RTNL here so can safely walk the list */ 136 /* we hold the RTNL here so can safely walk the list */
@@ -134,849 +143,12 @@ static int ieee80211_master_stop(struct net_device *dev)
134 143
135static void ieee80211_master_set_multicast_list(struct net_device *dev) 144static void ieee80211_master_set_multicast_list(struct net_device *dev)
136{ 145{
137 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 146 struct ieee80211_master_priv *mpriv = netdev_priv(dev);
147 struct ieee80211_local *local = mpriv->local;
138 148
139 ieee80211_configure_filter(local); 149 ieee80211_configure_filter(local);
140} 150}
141 151
142/* regular interfaces */
143
144static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
145{
146 int meshhdrlen;
147 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
148
149 meshhdrlen = (sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) ? 5 : 0;
150
151 /* FIX: what would be proper limits for MTU?
152 * This interface uses 802.3 frames. */
153 if (new_mtu < 256 ||
154 new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) {
155 return -EINVAL;
156 }
157
158#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
159 printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
160#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
161 dev->mtu = new_mtu;
162 return 0;
163}
164
165static inline int identical_mac_addr_allowed(int type1, int type2)
166{
167 return (type1 == IEEE80211_IF_TYPE_MNTR ||
168 type2 == IEEE80211_IF_TYPE_MNTR ||
169 (type1 == IEEE80211_IF_TYPE_AP &&
170 type2 == IEEE80211_IF_TYPE_WDS) ||
171 (type1 == IEEE80211_IF_TYPE_WDS &&
172 (type2 == IEEE80211_IF_TYPE_WDS ||
173 type2 == IEEE80211_IF_TYPE_AP)) ||
174 (type1 == IEEE80211_IF_TYPE_AP &&
175 type2 == IEEE80211_IF_TYPE_VLAN) ||
176 (type1 == IEEE80211_IF_TYPE_VLAN &&
177 (type2 == IEEE80211_IF_TYPE_AP ||
178 type2 == IEEE80211_IF_TYPE_VLAN)));
179}
180
181static int ieee80211_open(struct net_device *dev)
182{
183 struct ieee80211_sub_if_data *sdata, *nsdata;
184 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
185 struct sta_info *sta;
186 struct ieee80211_if_init_conf conf;
187 u32 changed = 0;
188 int res;
189 bool need_hw_reconfig = 0;
190
191 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
192
193 /* we hold the RTNL here so can safely walk the list */
194 list_for_each_entry(nsdata, &local->interfaces, list) {
195 struct net_device *ndev = nsdata->dev;
196
197 if (ndev != dev && netif_running(ndev)) {
198 /*
199 * Allow only a single IBSS interface to be up at any
200 * time. This is restricted because beacon distribution
201 * cannot work properly if both are in the same IBSS.
202 *
203 * To remove this restriction we'd have to disallow them
204 * from setting the same SSID on different IBSS interfaces
205 * belonging to the same hardware. Then, however, we're
206 * faced with having to adopt two different TSF timers...
207 */
208 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
209 nsdata->vif.type == IEEE80211_IF_TYPE_IBSS)
210 return -EBUSY;
211
212 /*
213 * The remaining checks are only performed for interfaces
214 * with the same MAC address.
215 */
216 if (compare_ether_addr(dev->dev_addr, ndev->dev_addr))
217 continue;
218
219 /*
220 * check whether it may have the same address
221 */
222 if (!identical_mac_addr_allowed(sdata->vif.type,
223 nsdata->vif.type))
224 return -ENOTUNIQ;
225
226 /*
227 * can only add VLANs to enabled APs
228 */
229 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN &&
230 nsdata->vif.type == IEEE80211_IF_TYPE_AP)
231 sdata->bss = &nsdata->u.ap;
232 }
233 }
234
235 switch (sdata->vif.type) {
236 case IEEE80211_IF_TYPE_WDS:
237 if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
238 return -ENOLINK;
239 break;
240 case IEEE80211_IF_TYPE_VLAN:
241 if (!sdata->bss)
242 return -ENOLINK;
243 list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
244 break;
245 case IEEE80211_IF_TYPE_AP:
246 sdata->bss = &sdata->u.ap;
247 break;
248 case IEEE80211_IF_TYPE_MESH_POINT:
249 /* mesh ifaces must set allmulti to forward mcast traffic */
250 atomic_inc(&local->iff_allmultis);
251 break;
252 case IEEE80211_IF_TYPE_STA:
253 case IEEE80211_IF_TYPE_MNTR:
254 case IEEE80211_IF_TYPE_IBSS:
255 /* no special treatment */
256 break;
257 case IEEE80211_IF_TYPE_INVALID:
258 /* cannot happen */
259 WARN_ON(1);
260 break;
261 }
262
263 if (local->open_count == 0) {
264 res = 0;
265 if (local->ops->start)
266 res = local->ops->start(local_to_hw(local));
267 if (res)
268 goto err_del_bss;
269 need_hw_reconfig = 1;
270 ieee80211_led_radio(local, local->hw.conf.radio_enabled);
271 }
272
273 switch (sdata->vif.type) {
274 case IEEE80211_IF_TYPE_VLAN:
275 /* no need to tell driver */
276 break;
277 case IEEE80211_IF_TYPE_MNTR:
278 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
279 local->cooked_mntrs++;
280 break;
281 }
282
283 /* must be before the call to ieee80211_configure_filter */
284 local->monitors++;
285 if (local->monitors == 1)
286 local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP;
287
288 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
289 local->fif_fcsfail++;
290 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
291 local->fif_plcpfail++;
292 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
293 local->fif_control++;
294 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
295 local->fif_other_bss++;
296
297 netif_addr_lock_bh(local->mdev);
298 ieee80211_configure_filter(local);
299 netif_addr_unlock_bh(local->mdev);
300 break;
301 case IEEE80211_IF_TYPE_STA:
302 case IEEE80211_IF_TYPE_IBSS:
303 sdata->u.sta.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
304 /* fall through */
305 default:
306 conf.vif = &sdata->vif;
307 conf.type = sdata->vif.type;
308 conf.mac_addr = dev->dev_addr;
309 res = local->ops->add_interface(local_to_hw(local), &conf);
310 if (res)
311 goto err_stop;
312
313 if (ieee80211_vif_is_mesh(&sdata->vif))
314 ieee80211_start_mesh(sdata->dev);
315 changed |= ieee80211_reset_erp_info(dev);
316 ieee80211_bss_info_change_notify(sdata, changed);
317 ieee80211_enable_keys(sdata);
318
319 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
320 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
321 netif_carrier_off(dev);
322 else
323 netif_carrier_on(dev);
324 }
325
326 if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) {
327 /* Create STA entry for the WDS peer */
328 sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
329 GFP_KERNEL);
330 if (!sta) {
331 res = -ENOMEM;
332 goto err_del_interface;
333 }
334
335 /* no locking required since STA is not live yet */
336 sta->flags |= WLAN_STA_AUTHORIZED;
337
338 res = sta_info_insert(sta);
339 if (res) {
340 /* STA has been freed */
341 goto err_del_interface;
342 }
343 }
344
345 if (local->open_count == 0) {
346 res = dev_open(local->mdev);
347 WARN_ON(res);
348 if (res)
349 goto err_del_interface;
350 tasklet_enable(&local->tx_pending_tasklet);
351 tasklet_enable(&local->tasklet);
352 }
353
354 /*
355 * set_multicast_list will be invoked by the networking core
356 * which will check whether any increments here were done in
357 * error and sync them down to the hardware as filter flags.
358 */
359 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
360 atomic_inc(&local->iff_allmultis);
361
362 if (sdata->flags & IEEE80211_SDATA_PROMISC)
363 atomic_inc(&local->iff_promiscs);
364
365 local->open_count++;
366 if (need_hw_reconfig)
367 ieee80211_hw_config(local);
368
369 /*
370 * ieee80211_sta_work is disabled while network interface
371 * is down. Therefore, some configuration changes may not
372 * yet be effective. Trigger execution of ieee80211_sta_work
373 * to fix this.
374 */
375 if (sdata->vif.type == IEEE80211_IF_TYPE_STA ||
376 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
377 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
378 queue_work(local->hw.workqueue, &ifsta->work);
379 }
380
381 netif_tx_start_all_queues(dev);
382
383 return 0;
384 err_del_interface:
385 local->ops->remove_interface(local_to_hw(local), &conf);
386 err_stop:
387 if (!local->open_count && local->ops->stop)
388 local->ops->stop(local_to_hw(local));
389 err_del_bss:
390 sdata->bss = NULL;
391 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN)
392 list_del(&sdata->u.vlan.list);
393 return res;
394}
395
396static int ieee80211_stop(struct net_device *dev)
397{
398 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
399 struct ieee80211_local *local = sdata->local;
400 struct ieee80211_if_init_conf conf;
401 struct sta_info *sta;
402
403 /*
404 * Stop TX on this interface first.
405 */
406 netif_tx_stop_all_queues(dev);
407
408 /*
409 * Now delete all active aggregation sessions.
410 */
411 rcu_read_lock();
412
413 list_for_each_entry_rcu(sta, &local->sta_list, list) {
414 if (sta->sdata == sdata)
415 ieee80211_sta_tear_down_BA_sessions(dev, sta->addr);
416 }
417
418 rcu_read_unlock();
419
420 /*
421 * Remove all stations associated with this interface.
422 *
423 * This must be done before calling ops->remove_interface()
424 * because otherwise we can later invoke ops->sta_notify()
425 * whenever the STAs are removed, and that invalidates driver
426 * assumptions about always getting a vif pointer that is valid
427 * (because if we remove a STA after ops->remove_interface()
428 * the driver will have removed the vif info already!)
429 *
430 * We could relax this and only unlink the stations from the
431 * hash table and list but keep them on a per-sdata list that
432 * will be inserted back again when the interface is brought
433 * up again, but I don't currently see a use case for that,
434 * except with WDS which gets a STA entry created when it is
435 * brought up.
436 */
437 sta_info_flush(local, sdata);
438
439 /*
440 * Don't count this interface for promisc/allmulti while it
441 * is down. dev_mc_unsync() will invoke set_multicast_list
442 * on the master interface which will sync these down to the
443 * hardware as filter flags.
444 */
445 if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
446 atomic_dec(&local->iff_allmultis);
447
448 if (sdata->flags & IEEE80211_SDATA_PROMISC)
449 atomic_dec(&local->iff_promiscs);
450
451 dev_mc_unsync(local->mdev, dev);
452
453 /* APs need special treatment */
454 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) {
455 struct ieee80211_sub_if_data *vlan, *tmp;
456 struct beacon_data *old_beacon = sdata->u.ap.beacon;
457
458 /* remove beacon */
459 rcu_assign_pointer(sdata->u.ap.beacon, NULL);
460 synchronize_rcu();
461 kfree(old_beacon);
462
463 /* down all dependent devices, that is VLANs */
464 list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
465 u.vlan.list)
466 dev_close(vlan->dev);
467 WARN_ON(!list_empty(&sdata->u.ap.vlans));
468 }
469
470 local->open_count--;
471
472 switch (sdata->vif.type) {
473 case IEEE80211_IF_TYPE_VLAN:
474 list_del(&sdata->u.vlan.list);
475 /* no need to tell driver */
476 break;
477 case IEEE80211_IF_TYPE_MNTR:
478 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
479 local->cooked_mntrs--;
480 break;
481 }
482
483 local->monitors--;
484 if (local->monitors == 0)
485 local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP;
486
487 if (sdata->u.mntr_flags & MONITOR_FLAG_FCSFAIL)
488 local->fif_fcsfail--;
489 if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
490 local->fif_plcpfail--;
491 if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
492 local->fif_control--;
493 if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
494 local->fif_other_bss--;
495
496 netif_addr_lock_bh(local->mdev);
497 ieee80211_configure_filter(local);
498 netif_addr_unlock_bh(local->mdev);
499 break;
500 case IEEE80211_IF_TYPE_MESH_POINT:
501 /* allmulti is always set on mesh ifaces */
502 atomic_dec(&local->iff_allmultis);
503 /* fall through */
504 case IEEE80211_IF_TYPE_STA:
505 case IEEE80211_IF_TYPE_IBSS:
506 sdata->u.sta.state = IEEE80211_DISABLED;
507 memset(sdata->u.sta.bssid, 0, ETH_ALEN);
508 del_timer_sync(&sdata->u.sta.timer);
509 /*
510 * When we get here, the interface is marked down.
511 * Call synchronize_rcu() to wait for the RX path
512 * should it be using the interface and enqueuing
513 * frames at this very time on another CPU.
514 */
515 synchronize_rcu();
516 skb_queue_purge(&sdata->u.sta.skb_queue);
517
518 if (local->scan_dev == sdata->dev) {
519 if (!local->ops->hw_scan) {
520 local->sta_sw_scanning = 0;
521 cancel_delayed_work(&local->scan_work);
522 } else
523 local->sta_hw_scanning = 0;
524 }
525
526 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
527 kfree(sdata->u.sta.extra_ie);
528 sdata->u.sta.extra_ie = NULL;
529 sdata->u.sta.extra_ie_len = 0;
530 /* fall through */
531 default:
532 conf.vif = &sdata->vif;
533 conf.type = sdata->vif.type;
534 conf.mac_addr = dev->dev_addr;
535 /* disable all keys for as long as this netdev is down */
536 ieee80211_disable_keys(sdata);
537 local->ops->remove_interface(local_to_hw(local), &conf);
538 }
539
540 sdata->bss = NULL;
541
542 if (local->open_count == 0) {
543 if (netif_running(local->mdev))
544 dev_close(local->mdev);
545
546 if (local->ops->stop)
547 local->ops->stop(local_to_hw(local));
548
549 ieee80211_led_radio(local, 0);
550
551 flush_workqueue(local->hw.workqueue);
552
553 tasklet_disable(&local->tx_pending_tasklet);
554 tasklet_disable(&local->tasklet);
555 }
556
557 return 0;
558}
559
560int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
561{
562 struct ieee80211_local *local = hw_to_local(hw);
563 struct sta_info *sta;
564 struct ieee80211_sub_if_data *sdata;
565 u16 start_seq_num = 0;
566 u8 *state;
567 int ret;
568 DECLARE_MAC_BUF(mac);
569
570 if (tid >= STA_TID_NUM)
571 return -EINVAL;
572
573#ifdef CONFIG_MAC80211_HT_DEBUG
574 printk(KERN_DEBUG "Open BA session requested for %s tid %u\n",
575 print_mac(mac, ra), tid);
576#endif /* CONFIG_MAC80211_HT_DEBUG */
577
578 rcu_read_lock();
579
580 sta = sta_info_get(local, ra);
581 if (!sta) {
582#ifdef CONFIG_MAC80211_HT_DEBUG
583 printk(KERN_DEBUG "Could not find the station\n");
584#endif
585 ret = -ENOENT;
586 goto exit;
587 }
588
589 spin_lock_bh(&sta->lock);
590
591 /* we have tried too many times, receiver does not want A-MPDU */
592 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
593 ret = -EBUSY;
594 goto err_unlock_sta;
595 }
596
597 state = &sta->ampdu_mlme.tid_state_tx[tid];
598 /* check if the TID is not in aggregation flow already */
599 if (*state != HT_AGG_STATE_IDLE) {
600#ifdef CONFIG_MAC80211_HT_DEBUG
601 printk(KERN_DEBUG "BA request denied - session is not "
602 "idle on tid %u\n", tid);
603#endif /* CONFIG_MAC80211_HT_DEBUG */
604 ret = -EAGAIN;
605 goto err_unlock_sta;
606 }
607
608 /* prepare A-MPDU MLME for Tx aggregation */
609 sta->ampdu_mlme.tid_tx[tid] =
610 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
611 if (!sta->ampdu_mlme.tid_tx[tid]) {
612#ifdef CONFIG_MAC80211_HT_DEBUG
613 if (net_ratelimit())
614 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
615 tid);
616#endif
617 ret = -ENOMEM;
618 goto err_unlock_sta;
619 }
620 /* Tx timer */
621 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
622 sta_addba_resp_timer_expired;
623 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data =
624 (unsigned long)&sta->timer_to_tid[tid];
625 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
626
627 /* create a new queue for this aggregation */
628 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
629
630 /* case no queue is available to aggregation
631 * don't switch to aggregation */
632 if (ret) {
633#ifdef CONFIG_MAC80211_HT_DEBUG
634 printk(KERN_DEBUG "BA request denied - queue unavailable for"
635 " tid %d\n", tid);
636#endif /* CONFIG_MAC80211_HT_DEBUG */
637 goto err_unlock_queue;
638 }
639 sdata = sta->sdata;
640
641 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
642 * call back right away, it must see that the flow has begun */
643 *state |= HT_ADDBA_REQUESTED_MSK;
644
645 if (local->ops->ampdu_action)
646 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
647 ra, tid, &start_seq_num);
648
649 if (ret) {
650 /* No need to requeue the packets in the agg queue, since we
651 * held the tx lock: no packet could be enqueued to the newly
652 * allocated queue */
653 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
654#ifdef CONFIG_MAC80211_HT_DEBUG
655 printk(KERN_DEBUG "BA request denied - HW unavailable for"
656 " tid %d\n", tid);
657#endif /* CONFIG_MAC80211_HT_DEBUG */
658 *state = HT_AGG_STATE_IDLE;
659 goto err_unlock_queue;
660 }
661
662 /* Will put all the packets in the new SW queue */
663 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
664 spin_unlock_bh(&sta->lock);
665
666 /* send an addBA request */
667 sta->ampdu_mlme.dialog_token_allocator++;
668 sta->ampdu_mlme.tid_tx[tid]->dialog_token =
669 sta->ampdu_mlme.dialog_token_allocator;
670 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
671
672
673 ieee80211_send_addba_request(sta->sdata->dev, ra, tid,
674 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
675 sta->ampdu_mlme.tid_tx[tid]->ssn,
676 0x40, 5000);
677 /* activate the timer for the recipient's addBA response */
678 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
679 jiffies + ADDBA_RESP_INTERVAL;
680 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
681#ifdef CONFIG_MAC80211_HT_DEBUG
682 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
683#endif
684 goto exit;
685
686err_unlock_queue:
687 kfree(sta->ampdu_mlme.tid_tx[tid]);
688 sta->ampdu_mlme.tid_tx[tid] = NULL;
689 ret = -EBUSY;
690err_unlock_sta:
691 spin_unlock_bh(&sta->lock);
692exit:
693 rcu_read_unlock();
694 return ret;
695}
696EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
697
698int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
699 u8 *ra, u16 tid,
700 enum ieee80211_back_parties initiator)
701{
702 struct ieee80211_local *local = hw_to_local(hw);
703 struct sta_info *sta;
704 u8 *state;
705 int ret = 0;
706 DECLARE_MAC_BUF(mac);
707
708 if (tid >= STA_TID_NUM)
709 return -EINVAL;
710
711 rcu_read_lock();
712 sta = sta_info_get(local, ra);
713 if (!sta) {
714 rcu_read_unlock();
715 return -ENOENT;
716 }
717
718 /* check if the TID is in aggregation */
719 state = &sta->ampdu_mlme.tid_state_tx[tid];
720 spin_lock_bh(&sta->lock);
721
722 if (*state != HT_AGG_STATE_OPERATIONAL) {
723 ret = -ENOENT;
724 goto stop_BA_exit;
725 }
726
727#ifdef CONFIG_MAC80211_HT_DEBUG
728 printk(KERN_DEBUG "Tx BA session stop requested for %s tid %u\n",
729 print_mac(mac, ra), tid);
730#endif /* CONFIG_MAC80211_HT_DEBUG */
731
732 ieee80211_stop_queue(hw, sta->tid_to_tx_q[tid]);
733
734 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
735 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
736
737 if (local->ops->ampdu_action)
738 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_STOP,
739 ra, tid, NULL);
740
741 /* case HW denied going back to legacy */
742 if (ret) {
743 WARN_ON(ret != -EBUSY);
744 *state = HT_AGG_STATE_OPERATIONAL;
745 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
746 goto stop_BA_exit;
747 }
748
749stop_BA_exit:
750 spin_unlock_bh(&sta->lock);
751 rcu_read_unlock();
752 return ret;
753}
754EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
755
756void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
757{
758 struct ieee80211_local *local = hw_to_local(hw);
759 struct sta_info *sta;
760 u8 *state;
761 DECLARE_MAC_BUF(mac);
762
763 if (tid >= STA_TID_NUM) {
764#ifdef CONFIG_MAC80211_HT_DEBUG
765 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
766 tid, STA_TID_NUM);
767#endif
768 return;
769 }
770
771 rcu_read_lock();
772 sta = sta_info_get(local, ra);
773 if (!sta) {
774 rcu_read_unlock();
775#ifdef CONFIG_MAC80211_HT_DEBUG
776 printk(KERN_DEBUG "Could not find station: %s\n",
777 print_mac(mac, ra));
778#endif
779 return;
780 }
781
782 state = &sta->ampdu_mlme.tid_state_tx[tid];
783 spin_lock_bh(&sta->lock);
784
785 if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
786#ifdef CONFIG_MAC80211_HT_DEBUG
787 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
788 *state);
789#endif
790 spin_unlock_bh(&sta->lock);
791 rcu_read_unlock();
792 return;
793 }
794
795 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK);
796
797 *state |= HT_ADDBA_DRV_READY_MSK;
798
799 if (*state == HT_AGG_STATE_OPERATIONAL) {
800#ifdef CONFIG_MAC80211_HT_DEBUG
801 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
802#endif
803 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
804 }
805 spin_unlock_bh(&sta->lock);
806 rcu_read_unlock();
807}
808EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
809
810void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
811{
812 struct ieee80211_local *local = hw_to_local(hw);
813 struct sta_info *sta;
814 u8 *state;
815 int agg_queue;
816 DECLARE_MAC_BUF(mac);
817
818 if (tid >= STA_TID_NUM) {
819#ifdef CONFIG_MAC80211_HT_DEBUG
820 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
821 tid, STA_TID_NUM);
822#endif
823 return;
824 }
825
826#ifdef CONFIG_MAC80211_HT_DEBUG
827 printk(KERN_DEBUG "Stopping Tx BA session for %s tid %d\n",
828 print_mac(mac, ra), tid);
829#endif /* CONFIG_MAC80211_HT_DEBUG */
830
831 rcu_read_lock();
832 sta = sta_info_get(local, ra);
833 if (!sta) {
834#ifdef CONFIG_MAC80211_HT_DEBUG
835 printk(KERN_DEBUG "Could not find station: %s\n",
836 print_mac(mac, ra));
837#endif
838 rcu_read_unlock();
839 return;
840 }
841 state = &sta->ampdu_mlme.tid_state_tx[tid];
842
843 /* NOTE: no need to use sta->lock in this state check, as
844 * ieee80211_stop_tx_ba_session will let only one stop call to
845 * pass through per sta/tid
846 */
847 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
848#ifdef CONFIG_MAC80211_HT_DEBUG
849 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
850#endif
851 rcu_read_unlock();
852 return;
853 }
854
855 if (*state & HT_AGG_STATE_INITIATOR_MSK)
856 ieee80211_send_delba(sta->sdata->dev, ra, tid,
857 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
858
859 agg_queue = sta->tid_to_tx_q[tid];
860
861 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
862
863 /* We just requeued the all the frames that were in the
864 * removed queue, and since we might miss a softirq we do
865 * netif_schedule_queue. ieee80211_wake_queue is not used
866 * here as this queue is not necessarily stopped
867 */
868 netif_schedule_queue(netdev_get_tx_queue(local->mdev, agg_queue));
869 spin_lock_bh(&sta->lock);
870 *state = HT_AGG_STATE_IDLE;
871 sta->ampdu_mlme.addba_req_num[tid] = 0;
872 kfree(sta->ampdu_mlme.tid_tx[tid]);
873 sta->ampdu_mlme.tid_tx[tid] = NULL;
874 spin_unlock_bh(&sta->lock);
875
876 rcu_read_unlock();
877}
878EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
879
880void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
881 const u8 *ra, u16 tid)
882{
883 struct ieee80211_local *local = hw_to_local(hw);
884 struct ieee80211_ra_tid *ra_tid;
885 struct sk_buff *skb = dev_alloc_skb(0);
886
887 if (unlikely(!skb)) {
888#ifdef CONFIG_MAC80211_HT_DEBUG
889 if (net_ratelimit())
890 printk(KERN_WARNING "%s: Not enough memory, "
891 "dropping start BA session", skb->dev->name);
892#endif
893 return;
894 }
895 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
896 memcpy(&ra_tid->ra, ra, ETH_ALEN);
897 ra_tid->tid = tid;
898
899 skb->pkt_type = IEEE80211_ADDBA_MSG;
900 skb_queue_tail(&local->skb_queue, skb);
901 tasklet_schedule(&local->tasklet);
902}
903EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
904
905void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_hw *hw,
906 const u8 *ra, u16 tid)
907{
908 struct ieee80211_local *local = hw_to_local(hw);
909 struct ieee80211_ra_tid *ra_tid;
910 struct sk_buff *skb = dev_alloc_skb(0);
911
912 if (unlikely(!skb)) {
913#ifdef CONFIG_MAC80211_HT_DEBUG
914 if (net_ratelimit())
915 printk(KERN_WARNING "%s: Not enough memory, "
916 "dropping stop BA session", skb->dev->name);
917#endif
918 return;
919 }
920 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
921 memcpy(&ra_tid->ra, ra, ETH_ALEN);
922 ra_tid->tid = tid;
923
924 skb->pkt_type = IEEE80211_DELBA_MSG;
925 skb_queue_tail(&local->skb_queue, skb);
926 tasklet_schedule(&local->tasklet);
927}
928EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
929
930static void ieee80211_set_multicast_list(struct net_device *dev)
931{
932 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
933 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
934 int allmulti, promisc, sdata_allmulti, sdata_promisc;
935
936 allmulti = !!(dev->flags & IFF_ALLMULTI);
937 promisc = !!(dev->flags & IFF_PROMISC);
938 sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
939 sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
940
941 if (allmulti != sdata_allmulti) {
942 if (dev->flags & IFF_ALLMULTI)
943 atomic_inc(&local->iff_allmultis);
944 else
945 atomic_dec(&local->iff_allmultis);
946 sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
947 }
948
949 if (promisc != sdata_promisc) {
950 if (dev->flags & IFF_PROMISC)
951 atomic_inc(&local->iff_promiscs);
952 else
953 atomic_dec(&local->iff_promiscs);
954 sdata->flags ^= IEEE80211_SDATA_PROMISC;
955 }
956
957 dev_mc_sync(local->mdev, dev);
958}
959
960static const struct header_ops ieee80211_header_ops = {
961 .create = eth_header,
962 .parse = header_parse_80211,
963 .rebuild = eth_rebuild_header,
964 .cache = eth_header_cache,
965 .cache_update = eth_header_cache_update,
966};
967
968void ieee80211_if_setup(struct net_device *dev)
969{
970 ether_setup(dev);
971 dev->hard_start_xmit = ieee80211_subif_start_xmit;
972 dev->wireless_handlers = &ieee80211_iw_handler_def;
973 dev->set_multicast_list = ieee80211_set_multicast_list;
974 dev->change_mtu = ieee80211_change_mtu;
975 dev->open = ieee80211_open;
976 dev->stop = ieee80211_stop;
977 dev->destructor = free_netdev;
978}
979
980/* everything else */ 152/* everything else */
981 153
982int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed) 154int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
@@ -987,18 +159,21 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
987 if (WARN_ON(!netif_running(sdata->dev))) 159 if (WARN_ON(!netif_running(sdata->dev)))
988 return 0; 160 return 0;
989 161
162 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
163 return -EINVAL;
164
990 if (!local->ops->config_interface) 165 if (!local->ops->config_interface)
991 return 0; 166 return 0;
992 167
993 memset(&conf, 0, sizeof(conf)); 168 memset(&conf, 0, sizeof(conf));
994 conf.changed = changed; 169 conf.changed = changed;
995 170
996 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 171 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
997 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 172 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
998 conf.bssid = sdata->u.sta.bssid; 173 conf.bssid = sdata->u.sta.bssid;
999 conf.ssid = sdata->u.sta.ssid; 174 conf.ssid = sdata->u.sta.ssid;
1000 conf.ssid_len = sdata->u.sta.ssid_len; 175 conf.ssid_len = sdata->u.sta.ssid_len;
1001 } else if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 176 } else if (sdata->vif.type == NL80211_IFTYPE_AP) {
1002 conf.bssid = sdata->dev->dev_addr; 177 conf.bssid = sdata->dev->dev_addr;
1003 conf.ssid = sdata->u.ap.ssid; 178 conf.ssid = sdata->u.ap.ssid;
1004 conf.ssid_len = sdata->u.ap.ssid_len; 179 conf.ssid_len = sdata->u.ap.ssid_len;
@@ -1027,7 +202,7 @@ int ieee80211_hw_config(struct ieee80211_local *local)
1027 struct ieee80211_channel *chan; 202 struct ieee80211_channel *chan;
1028 int ret = 0; 203 int ret = 0;
1029 204
1030 if (local->sta_sw_scanning) 205 if (local->sw_scanning)
1031 chan = local->scan_channel; 206 chan = local->scan_channel;
1032 else 207 else
1033 chan = local->oper_channel; 208 chan = local->oper_channel;
@@ -1099,8 +274,8 @@ u32 ieee80211_handle_ht(struct ieee80211_local *local, int enable_ht,
1099 ht_conf.ht_supported = 1; 274 ht_conf.ht_supported = 1;
1100 275
1101 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap; 276 ht_conf.cap = req_ht_cap->cap & sband->ht_info.cap;
1102 ht_conf.cap &= ~(IEEE80211_HT_CAP_MIMO_PS); 277 ht_conf.cap &= ~(IEEE80211_HT_CAP_SM_PS);
1103 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_MIMO_PS; 278 ht_conf.cap |= sband->ht_info.cap & IEEE80211_HT_CAP_SM_PS;
1104 ht_bss_conf.primary_channel = req_bss_cap->primary_channel; 279 ht_bss_conf.primary_channel = req_bss_cap->primary_channel;
1105 ht_bss_conf.bss_cap = req_bss_cap->bss_cap; 280 ht_bss_conf.bss_cap = req_bss_cap->bss_cap;
1106 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode; 281 ht_bss_conf.bss_op_mode = req_bss_cap->bss_op_mode;
@@ -1152,6 +327,9 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
1152{ 327{
1153 struct ieee80211_local *local = sdata->local; 328 struct ieee80211_local *local = sdata->local;
1154 329
330 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
331 return;
332
1155 if (!changed) 333 if (!changed)
1156 return; 334 return;
1157 335
@@ -1162,10 +340,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
1162 changed); 340 changed);
1163} 341}
1164 342
1165u32 ieee80211_reset_erp_info(struct net_device *dev) 343u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
1166{ 344{
1167 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1168
1169 sdata->bss_conf.use_cts_prot = 0; 345 sdata->bss_conf.use_cts_prot = 0;
1170 sdata->bss_conf.use_short_preamble = 0; 346 sdata->bss_conf.use_short_preamble = 0;
1171 return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE; 347 return BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE;
@@ -1244,9 +420,10 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1244 struct ieee80211_key *key, 420 struct ieee80211_key *key,
1245 struct sk_buff *skb) 421 struct sk_buff *skb)
1246{ 422{
1247 int hdrlen, iv_len, mic_len; 423 unsigned int hdrlen, iv_len, mic_len;
424 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1248 425
1249 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 426 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1250 427
1251 if (!key) 428 if (!key)
1252 goto no_key; 429 goto no_key;
@@ -1268,24 +445,20 @@ static void ieee80211_remove_tx_extra(struct ieee80211_local *local,
1268 goto no_key; 445 goto no_key;
1269 } 446 }
1270 447
1271 if (skb->len >= mic_len && 448 if (skb->len >= hdrlen + mic_len &&
1272 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) 449 !(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
1273 skb_trim(skb, skb->len - mic_len); 450 skb_trim(skb, skb->len - mic_len);
1274 if (skb->len >= iv_len && skb->len > hdrlen) { 451 if (skb->len >= hdrlen + iv_len) {
1275 memmove(skb->data + iv_len, skb->data, hdrlen); 452 memmove(skb->data + iv_len, skb->data, hdrlen);
1276 skb_pull(skb, iv_len); 453 hdr = (struct ieee80211_hdr *)skb_pull(skb, iv_len);
1277 } 454 }
1278 455
1279no_key: 456no_key:
1280 { 457 if (ieee80211_is_data_qos(hdr->frame_control)) {
1281 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 458 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1282 u16 fc = le16_to_cpu(hdr->frame_control); 459 memmove(skb->data + IEEE80211_QOS_CTL_LEN, skb->data,
1283 if ((fc & 0x8C) == 0x88) /* QoS Control Field */ { 460 hdrlen - IEEE80211_QOS_CTL_LEN);
1284 fc &= ~IEEE80211_STYPE_QOS_DATA; 461 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
1285 hdr->frame_control = cpu_to_le16(fc);
1286 memmove(skb->data + 2, skb->data, hdrlen - 2);
1287 skb_pull(skb, 2);
1288 }
1289 } 462 }
1290} 463}
1291 464
@@ -1369,6 +542,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1369 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 542 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1370 u16 frag, type; 543 u16 frag, type;
1371 __le16 fc; 544 __le16 fc;
545 struct ieee80211_supported_band *sband;
1372 struct ieee80211_tx_status_rtap_hdr *rthdr; 546 struct ieee80211_tx_status_rtap_hdr *rthdr;
1373 struct ieee80211_sub_if_data *sdata; 547 struct ieee80211_sub_if_data *sdata;
1374 struct net_device *prev_dev = NULL; 548 struct net_device *prev_dev = NULL;
@@ -1376,47 +550,48 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1376 550
1377 rcu_read_lock(); 551 rcu_read_lock();
1378 552
1379 if (info->status.excessive_retries) { 553 sta = sta_info_get(local, hdr->addr1);
1380 sta = sta_info_get(local, hdr->addr1); 554
1381 if (sta) { 555 if (sta) {
1382 if (test_sta_flags(sta, WLAN_STA_PS)) { 556 if (info->status.excessive_retries &&
1383 /* 557 test_sta_flags(sta, WLAN_STA_PS)) {
1384 * The STA is in power save mode, so assume 558 /*
1385 * that this TX packet failed because of that. 559 * The STA is in power save mode, so assume
1386 */ 560 * that this TX packet failed because of that.
1387 ieee80211_handle_filtered_frame(local, sta, skb); 561 */
1388 rcu_read_unlock(); 562 ieee80211_handle_filtered_frame(local, sta, skb);
1389 return; 563 rcu_read_unlock();
1390 } 564 return;
1391 } 565 }
1392 }
1393 566
1394 fc = hdr->frame_control; 567 fc = hdr->frame_control;
568
569 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
570 (ieee80211_is_data_qos(fc))) {
571 u16 tid, ssn;
572 u8 *qc;
1395 573
1396 if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
1397 (ieee80211_is_data_qos(fc))) {
1398 u16 tid, ssn;
1399 u8 *qc;
1400 sta = sta_info_get(local, hdr->addr1);
1401 if (sta) {
1402 qc = ieee80211_get_qos_ctl(hdr); 574 qc = ieee80211_get_qos_ctl(hdr);
1403 tid = qc[0] & 0xf; 575 tid = qc[0] & 0xf;
1404 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) 576 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
1405 & IEEE80211_SCTL_SEQ); 577 & IEEE80211_SCTL_SEQ);
1406 ieee80211_send_bar(sta->sdata->dev, hdr->addr1, 578 ieee80211_send_bar(sta->sdata, hdr->addr1,
1407 tid, ssn); 579 tid, ssn);
1408 } 580 }
1409 }
1410 581
1411 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) { 582 if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1412 sta = sta_info_get(local, hdr->addr1);
1413 if (sta) {
1414 ieee80211_handle_filtered_frame(local, sta, skb); 583 ieee80211_handle_filtered_frame(local, sta, skb);
1415 rcu_read_unlock(); 584 rcu_read_unlock();
1416 return; 585 return;
586 } else {
587 if (info->status.excessive_retries)
588 sta->tx_retry_failed++;
589 sta->tx_retry_count += info->status.retry_count;
1417 } 590 }
1418 } else 591
1419 rate_control_tx_status(local->mdev, skb); 592 sband = local->hw.wiphy->bands[info->band];
593 rate_control_tx_status(local, sband, sta, skb);
594 }
1420 595
1421 rcu_read_unlock(); 596 rcu_read_unlock();
1422 597
@@ -1504,7 +679,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
1504 679
1505 rcu_read_lock(); 680 rcu_read_lock();
1506 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 681 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
1507 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) { 682 if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
1508 if (!netif_running(sdata->dev)) 683 if (!netif_running(sdata->dev))
1509 continue; 684 continue;
1510 685
@@ -1580,8 +755,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1580 755
1581 local->hw.queues = 1; /* default */ 756 local->hw.queues = 1; /* default */
1582 757
1583 local->bridge_packets = 1;
1584
1585 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 758 local->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
1586 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD; 759 local->fragmentation_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
1587 local->short_retry_limit = 7; 760 local->short_retry_limit = 7;
@@ -1592,7 +765,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
1592 765
1593 spin_lock_init(&local->key_lock); 766 spin_lock_init(&local->key_lock);
1594 767
1595 INIT_DELAYED_WORK(&local->scan_work, ieee80211_sta_scan_work); 768 INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
1596 769
1597 sta_info_init(local); 770 sta_info_init(local);
1598 771
@@ -1619,7 +792,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1619 int result; 792 int result;
1620 enum ieee80211_band band; 793 enum ieee80211_band band;
1621 struct net_device *mdev; 794 struct net_device *mdev;
1622 struct wireless_dev *mwdev; 795 struct ieee80211_master_priv *mpriv;
1623 796
1624 /* 797 /*
1625 * generic code guarantees at least one band, 798 * generic code guarantees at least one band,
@@ -1639,6 +812,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1639 } 812 }
1640 } 813 }
1641 814
815 /* if low-level driver supports AP, we also support VLAN */
816 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP))
817 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
818
819 /* mac80211 always supports monitor */
820 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
821
1642 result = wiphy_register(local->hw.wiphy); 822 result = wiphy_register(local->hw.wiphy);
1643 if (result < 0) 823 if (result < 0)
1644 return result; 824 return result;
@@ -1654,16 +834,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1654 if (hw->queues < 4) 834 if (hw->queues < 4)
1655 hw->ampdu_queues = 0; 835 hw->ampdu_queues = 0;
1656 836
1657 mdev = alloc_netdev_mq(sizeof(struct wireless_dev), 837 mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv),
1658 "wmaster%d", ether_setup, 838 "wmaster%d", ether_setup,
1659 ieee80211_num_queues(hw)); 839 ieee80211_num_queues(hw));
1660 if (!mdev) 840 if (!mdev)
1661 goto fail_mdev_alloc; 841 goto fail_mdev_alloc;
1662 842
1663 mwdev = netdev_priv(mdev); 843 mpriv = netdev_priv(mdev);
1664 mdev->ieee80211_ptr = mwdev; 844 mpriv->local = local;
1665 mwdev->wiphy = local->hw.wiphy;
1666
1667 local->mdev = mdev; 845 local->mdev = mdev;
1668 846
1669 ieee80211_rx_bss_list_init(local); 847 ieee80211_rx_bss_list_init(local);
@@ -1745,7 +923,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1745 923
1746 /* add one default STA interface */ 924 /* add one default STA interface */
1747 result = ieee80211_if_add(local, "wlan%d", NULL, 925 result = ieee80211_if_add(local, "wlan%d", NULL,
1748 IEEE80211_IF_TYPE_STA, NULL); 926 NL80211_IFTYPE_STATION, NULL);
1749 if (result) 927 if (result)
1750 printk(KERN_WARNING "%s: Failed to add default virtual iface\n", 928 printk(KERN_WARNING "%s: Failed to add default virtual iface\n",
1751 wiphy_name(local->hw.wiphy)); 929 wiphy_name(local->hw.wiphy));
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 35f2f95f2fa7..8013277924f2 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -12,6 +12,9 @@
12#include "ieee80211_i.h" 12#include "ieee80211_i.h"
13#include "mesh.h" 13#include "mesh.h"
14 14
15#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
16#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
17
15#define PP_OFFSET 1 /* Path Selection Protocol */ 18#define PP_OFFSET 1 /* Path Selection Protocol */
16#define PM_OFFSET 5 /* Path Selection Metric */ 19#define PM_OFFSET 5 /* Path Selection Metric */
17#define CC_OFFSET 9 /* Congestion Control Mode */ 20#define CC_OFFSET 9 /* Congestion Control Mode */
@@ -35,19 +38,28 @@ void ieee80211s_stop(void)
35 kmem_cache_destroy(rm_cache); 38 kmem_cache_destroy(rm_cache);
36} 39}
37 40
41static void ieee80211_mesh_housekeeping_timer(unsigned long data)
42{
43 struct ieee80211_sub_if_data *sdata = (void *) data;
44 struct ieee80211_local *local = sdata->local;
45 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
46
47 ifmsh->housekeeping = true;
48 queue_work(local->hw.workqueue, &ifmsh->work);
49}
50
38/** 51/**
39 * mesh_matches_local - check if the config of a mesh point matches ours 52 * mesh_matches_local - check if the config of a mesh point matches ours
40 * 53 *
41 * @ie: information elements of a management frame from the mesh peer 54 * @ie: information elements of a management frame from the mesh peer
42 * @dev: local mesh interface 55 * @sdata: local mesh subif
43 * 56 *
44 * This function checks if the mesh configuration of a mesh point matches the 57 * This function checks if the mesh configuration of a mesh point matches the
45 * local mesh configuration, i.e. if both nodes belong to the same mesh network. 58 * local mesh configuration, i.e. if both nodes belong to the same mesh network.
46 */ 59 */
47bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev) 60bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata)
48{ 61{
49 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 62 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
50 struct ieee80211_if_sta *sta = &sdata->u.sta;
51 63
52 /* 64 /*
53 * As support for each feature is added, check for matching 65 * As support for each feature is added, check for matching
@@ -59,11 +71,11 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev)
59 * - MDA enabled 71 * - MDA enabled
60 * - Power management control on fc 72 * - Power management control on fc
61 */ 73 */
62 if (sta->mesh_id_len == ie->mesh_id_len && 74 if (ifmsh->mesh_id_len == ie->mesh_id_len &&
63 memcmp(sta->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && 75 memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
64 memcmp(sta->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 && 76 memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
65 memcmp(sta->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 && 77 memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
66 memcmp(sta->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0) 78 memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0)
67 return true; 79 return true;
68 80
69 return false; 81 return false;
@@ -73,10 +85,8 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev)
73 * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links 85 * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links
74 * 86 *
75 * @ie: information elements of a management frame from the mesh peer 87 * @ie: information elements of a management frame from the mesh peer
76 * @dev: local mesh interface
77 */ 88 */
78bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, 89bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
79 struct net_device *dev)
80{ 90{
81 return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0; 91 return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0;
82} 92}
@@ -98,11 +108,11 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
98 */ 108 */
99 free_plinks = mesh_plink_availables(sdata); 109 free_plinks = mesh_plink_availables(sdata);
100 110
101 if (free_plinks != sdata->u.sta.accepting_plinks) 111 if (free_plinks != sdata->u.mesh.accepting_plinks)
102 ieee80211_sta_timer((unsigned long) sdata); 112 ieee80211_mesh_housekeeping_timer((unsigned long) sdata);
103} 113}
104 114
105void mesh_ids_set_default(struct ieee80211_if_sta *sta) 115void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
106{ 116{
107 u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff}; 117 u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff};
108 118
@@ -111,28 +121,26 @@ void mesh_ids_set_default(struct ieee80211_if_sta *sta)
111 memcpy(sta->mesh_cc_id, def_id, 4); 121 memcpy(sta->mesh_cc_id, def_id, 4);
112} 122}
113 123
114int mesh_rmc_init(struct net_device *dev) 124int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
115{ 125{
116 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
117 int i; 126 int i;
118 127
119 sdata->u.sta.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); 128 sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL);
120 if (!sdata->u.sta.rmc) 129 if (!sdata->u.mesh.rmc)
121 return -ENOMEM; 130 return -ENOMEM;
122 sdata->u.sta.rmc->idx_mask = RMC_BUCKETS - 1; 131 sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1;
123 for (i = 0; i < RMC_BUCKETS; i++) 132 for (i = 0; i < RMC_BUCKETS; i++)
124 INIT_LIST_HEAD(&sdata->u.sta.rmc->bucket[i].list); 133 INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list);
125 return 0; 134 return 0;
126} 135}
127 136
128void mesh_rmc_free(struct net_device *dev) 137void mesh_rmc_free(struct ieee80211_sub_if_data *sdata)
129{ 138{
130 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 139 struct mesh_rmc *rmc = sdata->u.mesh.rmc;
131 struct mesh_rmc *rmc = sdata->u.sta.rmc;
132 struct rmc_entry *p, *n; 140 struct rmc_entry *p, *n;
133 int i; 141 int i;
134 142
135 if (!sdata->u.sta.rmc) 143 if (!sdata->u.mesh.rmc)
136 return; 144 return;
137 145
138 for (i = 0; i < RMC_BUCKETS; i++) 146 for (i = 0; i < RMC_BUCKETS; i++)
@@ -142,7 +150,7 @@ void mesh_rmc_free(struct net_device *dev)
142 } 150 }
143 151
144 kfree(rmc); 152 kfree(rmc);
145 sdata->u.sta.rmc = NULL; 153 sdata->u.mesh.rmc = NULL;
146} 154}
147 155
148/** 156/**
@@ -158,10 +166,9 @@ void mesh_rmc_free(struct net_device *dev)
158 * it. 166 * it.
159 */ 167 */
160int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, 168int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
161 struct net_device *dev) 169 struct ieee80211_sub_if_data *sdata)
162{ 170{
163 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 171 struct mesh_rmc *rmc = sdata->u.mesh.rmc;
164 struct mesh_rmc *rmc = sdata->u.sta.rmc;
165 u32 seqnum = 0; 172 u32 seqnum = 0;
166 int entries = 0; 173 int entries = 0;
167 u8 idx; 174 u8 idx;
@@ -194,10 +201,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
194 return 0; 201 return 0;
195} 202}
196 203
197void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev) 204void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
198{ 205{
199 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 206 struct ieee80211_local *local = sdata->local;
200 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
201 struct ieee80211_supported_band *sband; 207 struct ieee80211_supported_band *sband;
202 u8 *pos; 208 u8 *pos;
203 int len, i, rate; 209 int len, i, rate;
@@ -224,11 +230,11 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev)
224 } 230 }
225 } 231 }
226 232
227 pos = skb_put(skb, 2 + sdata->u.sta.mesh_id_len); 233 pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
228 *pos++ = WLAN_EID_MESH_ID; 234 *pos++ = WLAN_EID_MESH_ID;
229 *pos++ = sdata->u.sta.mesh_id_len; 235 *pos++ = sdata->u.mesh.mesh_id_len;
230 if (sdata->u.sta.mesh_id_len) 236 if (sdata->u.mesh.mesh_id_len)
231 memcpy(pos, sdata->u.sta.mesh_id, sdata->u.sta.mesh_id_len); 237 memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
232 238
233 pos = skb_put(skb, 21); 239 pos = skb_put(skb, 21);
234 *pos++ = WLAN_EID_MESH_CONFIG; 240 *pos++ = WLAN_EID_MESH_CONFIG;
@@ -237,15 +243,15 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev)
237 *pos++ = 1; 243 *pos++ = 1;
238 244
239 /* Active path selection protocol ID */ 245 /* Active path selection protocol ID */
240 memcpy(pos, sdata->u.sta.mesh_pp_id, 4); 246 memcpy(pos, sdata->u.mesh.mesh_pp_id, 4);
241 pos += 4; 247 pos += 4;
242 248
243 /* Active path selection metric ID */ 249 /* Active path selection metric ID */
244 memcpy(pos, sdata->u.sta.mesh_pm_id, 4); 250 memcpy(pos, sdata->u.mesh.mesh_pm_id, 4);
245 pos += 4; 251 pos += 4;
246 252
247 /* Congestion control mode identifier */ 253 /* Congestion control mode identifier */
248 memcpy(pos, sdata->u.sta.mesh_cc_id, 4); 254 memcpy(pos, sdata->u.mesh.mesh_cc_id, 4);
249 pos += 4; 255 pos += 4;
250 256
251 /* Channel precedence: 257 /* Channel precedence:
@@ -255,17 +261,17 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev)
255 pos += 4; 261 pos += 4;
256 262
257 /* Mesh capability */ 263 /* Mesh capability */
258 sdata->u.sta.accepting_plinks = mesh_plink_availables(sdata); 264 sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
259 *pos++ = sdata->u.sta.accepting_plinks ? ACCEPT_PLINKS : 0x00; 265 *pos++ = sdata->u.mesh.accepting_plinks ? ACCEPT_PLINKS : 0x00;
260 *pos++ = 0x00; 266 *pos++ = 0x00;
261 267
262 return; 268 return;
263} 269}
264 270
265u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl) 271u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
266{ 272{
267 /* Use last four bytes of hw addr and interface index as hash index */ 273 /* Use last four bytes of hw addr and interface index as hash index */
268 return jhash_2words(*(u32 *)(addr+2), dev->ifindex, tbl->hash_rnd) 274 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
269 & tbl->hash_mask; 275 & tbl->hash_mask;
270} 276}
271 277
@@ -344,10 +350,10 @@ static void ieee80211_mesh_path_timer(unsigned long data)
344{ 350{
345 struct ieee80211_sub_if_data *sdata = 351 struct ieee80211_sub_if_data *sdata =
346 (struct ieee80211_sub_if_data *) data; 352 (struct ieee80211_sub_if_data *) data;
347 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 353 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
348 struct ieee80211_local *local = wdev_priv(&sdata->wdev); 354 struct ieee80211_local *local = sdata->local;
349 355
350 queue_work(local->hw.workqueue, &ifsta->work); 356 queue_work(local->hw.workqueue, &ifmsh->work);
351} 357}
352 358
353struct mesh_table *mesh_table_grow(struct mesh_table *tbl) 359struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
@@ -399,50 +405,264 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
399 struct ieee80211_sub_if_data *sdata) 405 struct ieee80211_sub_if_data *sdata)
400{ 406{
401 meshhdr->flags = 0; 407 meshhdr->flags = 0;
402 meshhdr->ttl = sdata->u.sta.mshcfg.dot11MeshTTL; 408 meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
403 put_unaligned(cpu_to_le32(sdata->u.sta.mesh_seqnum), &meshhdr->seqnum); 409 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
404 sdata->u.sta.mesh_seqnum++; 410 sdata->u.mesh.mesh_seqnum++;
405 411
406 return 6; 412 return 6;
407} 413}
408 414
415static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
416 struct ieee80211_if_mesh *ifmsh)
417{
418 bool free_plinks;
419
420#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
421 printk(KERN_DEBUG "%s: running mesh housekeeping\n",
422 sdata->dev->name);
423#endif
424
425 ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
426 mesh_path_expire(sdata);
427
428 free_plinks = mesh_plink_availables(sdata);
429 if (free_plinks != sdata->u.mesh.accepting_plinks)
430 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
431
432 ifmsh->housekeeping = false;
433 mod_timer(&ifmsh->housekeeping_timer,
434 round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
435}
436
437
438void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
439{
440 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
441 struct ieee80211_local *local = sdata->local;
442
443 ifmsh->housekeeping = true;
444 queue_work(local->hw.workqueue, &ifmsh->work);
445 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
446}
447
448void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
449{
450 del_timer_sync(&sdata->u.mesh.housekeeping_timer);
451 /*
452 * If the timer fired while we waited for it, it will have
453 * requeued the work. Now the work will be running again
454 * but will not rearm the timer again because it checks
455 * whether the interface is running, which, at this point,
456 * it no longer is.
457 */
458 cancel_work_sync(&sdata->u.mesh.work);
459
460 /*
461 * When we get here, the interface is marked down.
462 * Call synchronize_rcu() to wait for the RX path
463 * should it be using the interface and enqueuing
464 * frames at this very time on another CPU.
465 */
466 synchronize_rcu();
467 skb_queue_purge(&sdata->u.mesh.skb_queue);
468}
469
470static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
471 u16 stype,
472 struct ieee80211_mgmt *mgmt,
473 size_t len,
474 struct ieee80211_rx_status *rx_status)
475{
476 struct ieee80211_local *local= sdata->local;
477 struct ieee802_11_elems elems;
478 struct ieee80211_channel *channel;
479 u64 supp_rates = 0;
480 size_t baselen;
481 int freq;
482 enum ieee80211_band band = rx_status->band;
483
484 /* ignore ProbeResp to foreign address */
485 if (stype == IEEE80211_STYPE_PROBE_RESP &&
486 compare_ether_addr(mgmt->da, sdata->dev->dev_addr))
487 return;
488
489 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
490 if (baselen > len)
491 return;
492
493 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
494 &elems);
495
496 if (elems.ds_params && elems.ds_params_len == 1)
497 freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
498 else
499 freq = rx_status->freq;
500
501 channel = ieee80211_get_channel(local->hw.wiphy, freq);
502
503 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
504 return;
505
506 if (elems.mesh_id && elems.mesh_config &&
507 mesh_matches_local(&elems, sdata)) {
508 supp_rates = ieee80211_sta_get_rates(local, &elems, band);
509
510 mesh_neighbour_update(mgmt->sa, supp_rates, sdata,
511 mesh_peer_accepts_plinks(&elems));
512 }
513}
514
515static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
516 struct ieee80211_mgmt *mgmt,
517 size_t len,
518 struct ieee80211_rx_status *rx_status)
519{
520 switch (mgmt->u.action.category) {
521 case PLINK_CATEGORY:
522 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
523 break;
524 case MESH_PATH_SEL_CATEGORY:
525 mesh_rx_path_sel_frame(sdata, mgmt, len);
526 break;
527 }
528}
529
530static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
531 struct sk_buff *skb)
532{
533 struct ieee80211_rx_status *rx_status;
534 struct ieee80211_if_mesh *ifmsh;
535 struct ieee80211_mgmt *mgmt;
536 u16 stype;
537
538 ifmsh = &sdata->u.mesh;
539
540 rx_status = (struct ieee80211_rx_status *) skb->cb;
541 mgmt = (struct ieee80211_mgmt *) skb->data;
542 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
543
544 switch (stype) {
545 case IEEE80211_STYPE_PROBE_RESP:
546 case IEEE80211_STYPE_BEACON:
547 ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len,
548 rx_status);
549 break;
550 case IEEE80211_STYPE_ACTION:
551 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
552 break;
553 }
554
555 kfree_skb(skb);
556}
557
558static void ieee80211_mesh_work(struct work_struct *work)
559{
560 struct ieee80211_sub_if_data *sdata =
561 container_of(work, struct ieee80211_sub_if_data, u.mesh.work);
562 struct ieee80211_local *local = sdata->local;
563 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
564 struct sk_buff *skb;
565
566 if (!netif_running(sdata->dev))
567 return;
568
569 if (local->sw_scanning || local->hw_scanning)
570 return;
571
572 while ((skb = skb_dequeue(&ifmsh->skb_queue)))
573 ieee80211_mesh_rx_queued_mgmt(sdata, skb);
574
575 if (ifmsh->preq_queue_len &&
576 time_after(jiffies,
577 ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
578 mesh_path_start_discovery(sdata);
579
580 if (ifmsh->housekeeping)
581 ieee80211_mesh_housekeeping(sdata, ifmsh);
582}
583
584void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
585{
586 struct ieee80211_sub_if_data *sdata;
587
588 rcu_read_lock();
589 list_for_each_entry_rcu(sdata, &local->interfaces, list)
590 if (ieee80211_vif_is_mesh(&sdata->vif))
591 queue_work(local->hw.workqueue, &sdata->u.mesh.work);
592 rcu_read_unlock();
593}
594
409void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) 595void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
410{ 596{
411 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 597 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
412 598
413 ifsta->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; 599 INIT_WORK(&ifmsh->work, ieee80211_mesh_work);
414 ifsta->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; 600 setup_timer(&ifmsh->housekeeping_timer,
415 ifsta->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T; 601 ieee80211_mesh_housekeeping_timer,
416 ifsta->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR; 602 (unsigned long) sdata);
417 ifsta->mshcfg.dot11MeshTTL = MESH_TTL; 603 skb_queue_head_init(&sdata->u.mesh.skb_queue);
418 ifsta->mshcfg.auto_open_plinks = true; 604
419 ifsta->mshcfg.dot11MeshMaxPeerLinks = 605 ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
606 ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
607 ifmsh->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T;
608 ifmsh->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR;
609 ifmsh->mshcfg.dot11MeshTTL = MESH_TTL;
610 ifmsh->mshcfg.auto_open_plinks = true;
611 ifmsh->mshcfg.dot11MeshMaxPeerLinks =
420 MESH_MAX_ESTAB_PLINKS; 612 MESH_MAX_ESTAB_PLINKS;
421 ifsta->mshcfg.dot11MeshHWMPactivePathTimeout = 613 ifmsh->mshcfg.dot11MeshHWMPactivePathTimeout =
422 MESH_PATH_TIMEOUT; 614 MESH_PATH_TIMEOUT;
423 ifsta->mshcfg.dot11MeshHWMPpreqMinInterval = 615 ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval =
424 MESH_PREQ_MIN_INT; 616 MESH_PREQ_MIN_INT;
425 ifsta->mshcfg.dot11MeshHWMPnetDiameterTraversalTime = 617 ifmsh->mshcfg.dot11MeshHWMPnetDiameterTraversalTime =
426 MESH_DIAM_TRAVERSAL_TIME; 618 MESH_DIAM_TRAVERSAL_TIME;
427 ifsta->mshcfg.dot11MeshHWMPmaxPREQretries = 619 ifmsh->mshcfg.dot11MeshHWMPmaxPREQretries =
428 MESH_MAX_PREQ_RETRIES; 620 MESH_MAX_PREQ_RETRIES;
429 ifsta->mshcfg.path_refresh_time = 621 ifmsh->mshcfg.path_refresh_time =
430 MESH_PATH_REFRESH_TIME; 622 MESH_PATH_REFRESH_TIME;
431 ifsta->mshcfg.min_discovery_timeout = 623 ifmsh->mshcfg.min_discovery_timeout =
432 MESH_MIN_DISCOVERY_TIMEOUT; 624 MESH_MIN_DISCOVERY_TIMEOUT;
433 ifsta->accepting_plinks = true; 625 ifmsh->accepting_plinks = true;
434 ifsta->preq_id = 0; 626 ifmsh->preq_id = 0;
435 ifsta->dsn = 0; 627 ifmsh->dsn = 0;
436 atomic_set(&ifsta->mpaths, 0); 628 atomic_set(&ifmsh->mpaths, 0);
437 mesh_rmc_init(sdata->dev); 629 mesh_rmc_init(sdata);
438 ifsta->last_preq = jiffies; 630 ifmsh->last_preq = jiffies;
439 /* Allocate all mesh structures when creating the first mesh interface. */ 631 /* Allocate all mesh structures when creating the first mesh interface. */
440 if (!mesh_allocated) 632 if (!mesh_allocated)
441 ieee80211s_init(); 633 ieee80211s_init();
442 mesh_ids_set_default(ifsta); 634 mesh_ids_set_default(ifmsh);
443 setup_timer(&ifsta->mesh_path_timer, 635 setup_timer(&ifmsh->mesh_path_timer,
444 ieee80211_mesh_path_timer, 636 ieee80211_mesh_path_timer,
445 (unsigned long) sdata); 637 (unsigned long) sdata);
446 INIT_LIST_HEAD(&ifsta->preq_queue.list); 638 INIT_LIST_HEAD(&ifmsh->preq_queue.list);
447 spin_lock_init(&ifsta->mesh_preq_queue_lock); 639 spin_lock_init(&ifmsh->mesh_preq_queue_lock);
640}
641
642ieee80211_rx_result
643ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
644 struct ieee80211_rx_status *rx_status)
645{
646 struct ieee80211_local *local = sdata->local;
647 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
648 struct ieee80211_mgmt *mgmt;
649 u16 fc;
650
651 if (skb->len < 24)
652 return RX_DROP_MONITOR;
653
654 mgmt = (struct ieee80211_mgmt *) skb->data;
655 fc = le16_to_cpu(mgmt->frame_control);
656
657 switch (fc & IEEE80211_FCTL_STYPE) {
658 case IEEE80211_STYPE_PROBE_RESP:
659 case IEEE80211_STYPE_BEACON:
660 case IEEE80211_STYPE_ACTION:
661 memcpy(skb->cb, rx_status, sizeof(*rx_status));
662 skb_queue_tail(&ifmsh->skb_queue, skb);
663 queue_work(local->hw.workqueue, &ifmsh->work);
664 return RX_QUEUED;
665 }
666
667 return RX_CONTINUE;
448} 668}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 7495fbb0d211..e10471c6ba42 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -47,7 +47,7 @@ enum mesh_path_flags {
47 * struct mesh_path - mac80211 mesh path structure 47 * struct mesh_path - mac80211 mesh path structure
48 * 48 *
49 * @dst: mesh path destination mac address 49 * @dst: mesh path destination mac address
50 * @dev: mesh path device 50 * @sdata: mesh subif
51 * @next_hop: mesh neighbor to which frames for this destination will be 51 * @next_hop: mesh neighbor to which frames for this destination will be
52 * forwarded 52 * forwarded
53 * @timer: mesh path discovery timer 53 * @timer: mesh path discovery timer
@@ -64,14 +64,15 @@ enum mesh_path_flags {
64 * @state_lock: mesh pat state lock 64 * @state_lock: mesh pat state lock
65 * 65 *
66 * 66 *
67 * The combination of dst and dev is unique in the mesh path table. Since the 67 * The combination of dst and sdata is unique in the mesh path table. Since the
68 * next_hop STA is only protected by RCU as well, deleting the STA must also 68 * next_hop STA is only protected by RCU as well, deleting the STA must also
69 * remove/substitute the mesh_path structure and wait until that is no longer 69 * remove/substitute the mesh_path structure and wait until that is no longer
70 * reachable before destroying the STA completely. 70 * reachable before destroying the STA completely.
71 */ 71 */
72struct mesh_path { 72struct mesh_path {
73 u8 dst[ETH_ALEN]; 73 u8 dst[ETH_ALEN];
74 struct net_device *dev; 74 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
75 struct ieee80211_sub_if_data *sdata;
75 struct sta_info *next_hop; 76 struct sta_info *next_hop;
76 struct timer_list timer; 77 struct timer_list timer;
77 struct sk_buff_head frame_queue; 78 struct sk_buff_head frame_queue;
@@ -203,67 +204,82 @@ int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
203int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, 204int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
204 struct ieee80211_sub_if_data *sdata); 205 struct ieee80211_sub_if_data *sdata);
205int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 206int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
206 struct net_device *dev); 207 struct ieee80211_sub_if_data *sdata);
207bool mesh_matches_local(struct ieee802_11_elems *ie, struct net_device *dev); 208bool mesh_matches_local(struct ieee802_11_elems *ie,
208void mesh_ids_set_default(struct ieee80211_if_sta *sta); 209 struct ieee80211_sub_if_data *sdata);
209void mesh_mgmt_ies_add(struct sk_buff *skb, struct net_device *dev); 210void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
210void mesh_rmc_free(struct net_device *dev); 211void mesh_mgmt_ies_add(struct sk_buff *skb,
211int mesh_rmc_init(struct net_device *dev); 212 struct ieee80211_sub_if_data *sdata);
213void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
214int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
212void ieee80211s_init(void); 215void ieee80211s_init(void);
213void ieee80211s_stop(void); 216void ieee80211s_stop(void);
214void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 217void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
218ieee80211_rx_result
219ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
220 struct ieee80211_rx_status *rx_status);
221void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
222void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
215 223
216/* Mesh paths */ 224/* Mesh paths */
217int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev); 225int mesh_nexthop_lookup(struct sk_buff *skb,
218void mesh_path_start_discovery(struct net_device *dev); 226 struct ieee80211_sub_if_data *sdata);
219struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev); 227void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata);
220struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev); 228struct mesh_path *mesh_path_lookup(u8 *dst,
229 struct ieee80211_sub_if_data *sdata);
230struct mesh_path *mpp_path_lookup(u8 *dst,
231 struct ieee80211_sub_if_data *sdata);
232int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata);
233struct mesh_path *mesh_path_lookup_by_idx(int idx,
234 struct ieee80211_sub_if_data *sdata);
221void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); 235void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
222void mesh_path_expire(struct net_device *dev); 236void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
223void mesh_path_flush(struct net_device *dev); 237void mesh_path_flush(struct ieee80211_sub_if_data *sdata);
224void mesh_rx_path_sel_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, 238void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
225 size_t len); 239 struct ieee80211_mgmt *mgmt, size_t len);
226int mesh_path_add(u8 *dst, struct net_device *dev); 240int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
227/* Mesh plinks */ 241/* Mesh plinks */
228void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, 242void mesh_neighbour_update(u8 *hw_addr, u64 rates,
229 bool add); 243 struct ieee80211_sub_if_data *sdata, bool add);
230bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie, 244bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
231 struct net_device *dev);
232void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 245void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
233void mesh_plink_broken(struct sta_info *sta); 246void mesh_plink_broken(struct sta_info *sta);
234void mesh_plink_deactivate(struct sta_info *sta); 247void mesh_plink_deactivate(struct sta_info *sta);
235int mesh_plink_open(struct sta_info *sta); 248int mesh_plink_open(struct sta_info *sta);
236int mesh_plink_close(struct sta_info *sta); 249int mesh_plink_close(struct sta_info *sta);
237void mesh_plink_block(struct sta_info *sta); 250void mesh_plink_block(struct sta_info *sta);
238void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, 251void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
239 size_t len, struct ieee80211_rx_status *rx_status); 252 struct ieee80211_mgmt *mgmt, size_t len,
253 struct ieee80211_rx_status *rx_status);
240 254
241/* Private interfaces */ 255/* Private interfaces */
242/* Mesh tables */ 256/* Mesh tables */
243struct mesh_table *mesh_table_alloc(int size_order); 257struct mesh_table *mesh_table_alloc(int size_order);
244void mesh_table_free(struct mesh_table *tbl, bool free_leafs); 258void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
245struct mesh_table *mesh_table_grow(struct mesh_table *tbl); 259struct mesh_table *mesh_table_grow(struct mesh_table *tbl);
246u32 mesh_table_hash(u8 *addr, struct net_device *dev, struct mesh_table *tbl); 260u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
261 struct mesh_table *tbl);
247/* Mesh paths */ 262/* Mesh paths */
248int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra, 263int mesh_path_error_tx(u8 *dest, __le32 dest_dsn, u8 *ra,
249 struct net_device *dev); 264 struct ieee80211_sub_if_data *sdata);
250void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); 265void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
251void mesh_path_flush_pending(struct mesh_path *mpath); 266void mesh_path_flush_pending(struct mesh_path *mpath);
252void mesh_path_tx_pending(struct mesh_path *mpath); 267void mesh_path_tx_pending(struct mesh_path *mpath);
253int mesh_pathtbl_init(void); 268int mesh_pathtbl_init(void);
254void mesh_pathtbl_unregister(void); 269void mesh_pathtbl_unregister(void);
255int mesh_path_del(u8 *addr, struct net_device *dev); 270int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
256void mesh_path_timer(unsigned long data); 271void mesh_path_timer(unsigned long data);
257void mesh_path_flush_by_nexthop(struct sta_info *sta); 272void mesh_path_flush_by_nexthop(struct sta_info *sta);
258void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev); 273void mesh_path_discard_frame(struct sk_buff *skb,
274 struct ieee80211_sub_if_data *sdata);
259 275
260#ifdef CONFIG_MAC80211_MESH 276#ifdef CONFIG_MAC80211_MESH
261extern int mesh_allocated; 277extern int mesh_allocated;
262 278
263static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) 279static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata)
264{ 280{
265 return sdata->u.sta.mshcfg.dot11MeshMaxPeerLinks - 281 return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks -
266 atomic_read(&sdata->u.sta.mshstats.estab_plinks); 282 atomic_read(&sdata->u.mesh.mshstats.estab_plinks);
267} 283}
268 284
269static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) 285static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata)
@@ -281,8 +297,12 @@ static inline void mesh_path_activate(struct mesh_path *mpath)
281 for (i = 0; i <= x->hash_mask; i++) \ 297 for (i = 0; i <= x->hash_mask; i++) \
282 hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) 298 hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
283 299
300void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
301
284#else 302#else
285#define mesh_allocated 0 303#define mesh_allocated 0
304static inline void
305ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
286#endif 306#endif
287 307
288#endif /* IEEE80211S_H */ 308#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 08aca446ca01..501c7831adb4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -64,14 +64,14 @@ static inline u32 u32_field_get(u8 *preq_elem, int offset, bool ae)
64#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0) 64#define DSN_LT(x, y) ((long) (x) - (long) (y) < 0)
65 65
66#define net_traversal_jiffies(s) \ 66#define net_traversal_jiffies(s) \
67 msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) 67 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
68#define default_lifetime(s) \ 68#define default_lifetime(s) \
69 MSEC_TO_TU(s->u.sta.mshcfg.dot11MeshHWMPactivePathTimeout) 69 MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
70#define min_preq_int_jiff(s) \ 70#define min_preq_int_jiff(s) \
71 (msecs_to_jiffies(s->u.sta.mshcfg.dot11MeshHWMPpreqMinInterval)) 71 (msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
72#define max_preq_retries(s) (s->u.sta.mshcfg.dot11MeshHWMPmaxPREQretries) 72#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
73#define disc_timeout_jiff(s) \ 73#define disc_timeout_jiff(s) \
74 msecs_to_jiffies(sdata->u.sta.mshcfg.min_discovery_timeout) 74 msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
75 75
76enum mpath_frame_type { 76enum mpath_frame_type {
77 MPATH_PREQ = 0, 77 MPATH_PREQ = 0,
@@ -82,9 +82,9 @@ enum mpath_frame_type {
82static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, 82static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
83 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst, 83 u8 *orig_addr, __le32 orig_dsn, u8 dst_flags, u8 *dst,
84 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime, 84 __le32 dst_dsn, u8 *da, u8 hop_count, u8 ttl, __le32 lifetime,
85 __le32 metric, __le32 preq_id, struct net_device *dev) 85 __le32 metric, __le32 preq_id, struct ieee80211_sub_if_data *sdata)
86{ 86{
87 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 87 struct ieee80211_local *local = sdata->local;
88 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 88 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
89 struct ieee80211_mgmt *mgmt; 89 struct ieee80211_mgmt *mgmt;
90 u8 *pos; 90 u8 *pos;
@@ -99,11 +99,11 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
99 mgmt = (struct ieee80211_mgmt *) 99 mgmt = (struct ieee80211_mgmt *)
100 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); 100 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
101 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); 101 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
102 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 102 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
103 IEEE80211_STYPE_ACTION); 103 IEEE80211_STYPE_ACTION);
104 104
105 memcpy(mgmt->da, da, ETH_ALEN); 105 memcpy(mgmt->da, da, ETH_ALEN);
106 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 106 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
107 /* BSSID is left zeroed, wildcard value */ 107 /* BSSID is left zeroed, wildcard value */
108 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 108 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
109 mgmt->u.action.u.mesh_action.action_code = action; 109 mgmt->u.action.u.mesh_action.action_code = action;
@@ -149,7 +149,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
149 pos += ETH_ALEN; 149 pos += ETH_ALEN;
150 memcpy(pos, &dst_dsn, 4); 150 memcpy(pos, &dst_dsn, 4);
151 151
152 ieee80211_sta_tx(dev, skb, 0); 152 ieee80211_tx_skb(sdata, skb, 0);
153 return 0; 153 return 0;
154} 154}
155 155
@@ -161,9 +161,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
161 * @ra: node this frame is addressed to 161 * @ra: node this frame is addressed to
162 */ 162 */
163int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra, 163int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
164 struct net_device *dev) 164 struct ieee80211_sub_if_data *sdata)
165{ 165{
166 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 166 struct ieee80211_local *local = sdata->local;
167 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 167 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
168 struct ieee80211_mgmt *mgmt; 168 struct ieee80211_mgmt *mgmt;
169 u8 *pos; 169 u8 *pos;
@@ -178,11 +178,11 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
178 mgmt = (struct ieee80211_mgmt *) 178 mgmt = (struct ieee80211_mgmt *)
179 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); 179 skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action));
180 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); 180 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action));
181 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 181 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
182 IEEE80211_STYPE_ACTION); 182 IEEE80211_STYPE_ACTION);
183 183
184 memcpy(mgmt->da, ra, ETH_ALEN); 184 memcpy(mgmt->da, ra, ETH_ALEN);
185 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 185 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
186 /* BSSID is left zeroed, wildcard value */ 186 /* BSSID is left zeroed, wildcard value */
187 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 187 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
188 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR; 188 mgmt->u.action.u.mesh_action.action_code = MPATH_PERR;
@@ -198,7 +198,7 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
198 pos += ETH_ALEN; 198 pos += ETH_ALEN;
199 memcpy(pos, &dst_dsn, 4); 199 memcpy(pos, &dst_dsn, 4);
200 200
201 ieee80211_sta_tx(dev, skb, 0); 201 ieee80211_tx_skb(sdata, skb, 0);
202 return 0; 202 return 0;
203} 203}
204 204
@@ -223,7 +223,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
223 /* bitrate is in units of 100 Kbps, while we need rate in units of 223 /* bitrate is in units of 100 Kbps, while we need rate in units of
224 * 1Mbps. This will be corrected on tx_time computation. 224 * 1Mbps. This will be corrected on tx_time computation.
225 */ 225 */
226 rate = sband->bitrates[sta->txrate_idx].bitrate; 226 rate = sband->bitrates[sta->last_txrate_idx].bitrate;
227 tx_time = (device_constant + 10 * test_frame_len / rate); 227 tx_time = (device_constant + 10 * test_frame_len / rate);
228 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); 228 estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
229 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; 229 result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ;
@@ -233,7 +233,7 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
233/** 233/**
234 * hwmp_route_info_get - Update routing info to originator and transmitter 234 * hwmp_route_info_get - Update routing info to originator and transmitter
235 * 235 *
236 * @dev: local mesh interface 236 * @sdata: local mesh subif
237 * @mgmt: mesh management frame 237 * @mgmt: mesh management frame
238 * @hwmp_ie: hwmp information element (PREP or PREQ) 238 * @hwmp_ie: hwmp information element (PREP or PREQ)
239 * 239 *
@@ -246,11 +246,11 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
246 * Notes: this function is the only place (besides user-provided info) where 246 * Notes: this function is the only place (besides user-provided info) where
247 * path routing information is updated. 247 * path routing information is updated.
248 */ 248 */
249static u32 hwmp_route_info_get(struct net_device *dev, 249static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
250 struct ieee80211_mgmt *mgmt, 250 struct ieee80211_mgmt *mgmt,
251 u8 *hwmp_ie) 251 u8 *hwmp_ie)
252{ 252{
253 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 253 struct ieee80211_local *local = sdata->local;
254 struct mesh_path *mpath; 254 struct mesh_path *mpath;
255 struct sta_info *sta; 255 struct sta_info *sta;
256 bool fresh_info; 256 bool fresh_info;
@@ -301,14 +301,14 @@ static u32 hwmp_route_info_get(struct net_device *dev,
301 new_metric = MAX_METRIC; 301 new_metric = MAX_METRIC;
302 exp_time = TU_TO_EXP_TIME(orig_lifetime); 302 exp_time = TU_TO_EXP_TIME(orig_lifetime);
303 303
304 if (memcmp(orig_addr, dev->dev_addr, ETH_ALEN) == 0) { 304 if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
305 /* This MP is the originator, we are not interested in this 305 /* This MP is the originator, we are not interested in this
306 * frame, except for updating transmitter's path info. 306 * frame, except for updating transmitter's path info.
307 */ 307 */
308 process = false; 308 process = false;
309 fresh_info = false; 309 fresh_info = false;
310 } else { 310 } else {
311 mpath = mesh_path_lookup(orig_addr, dev); 311 mpath = mesh_path_lookup(orig_addr, sdata);
312 if (mpath) { 312 if (mpath) {
313 spin_lock_bh(&mpath->state_lock); 313 spin_lock_bh(&mpath->state_lock);
314 if (mpath->flags & MESH_PATH_FIXED) 314 if (mpath->flags & MESH_PATH_FIXED)
@@ -324,8 +324,8 @@ static u32 hwmp_route_info_get(struct net_device *dev,
324 } 324 }
325 } 325 }
326 } else { 326 } else {
327 mesh_path_add(orig_addr, dev); 327 mesh_path_add(orig_addr, sdata);
328 mpath = mesh_path_lookup(orig_addr, dev); 328 mpath = mesh_path_lookup(orig_addr, sdata);
329 if (!mpath) { 329 if (!mpath) {
330 rcu_read_unlock(); 330 rcu_read_unlock();
331 return 0; 331 return 0;
@@ -357,7 +357,7 @@ static u32 hwmp_route_info_get(struct net_device *dev,
357 else { 357 else {
358 fresh_info = true; 358 fresh_info = true;
359 359
360 mpath = mesh_path_lookup(ta, dev); 360 mpath = mesh_path_lookup(ta, sdata);
361 if (mpath) { 361 if (mpath) {
362 spin_lock_bh(&mpath->state_lock); 362 spin_lock_bh(&mpath->state_lock);
363 if ((mpath->flags & MESH_PATH_FIXED) || 363 if ((mpath->flags & MESH_PATH_FIXED) ||
@@ -365,8 +365,8 @@ static u32 hwmp_route_info_get(struct net_device *dev,
365 (last_hop_metric > mpath->metric))) 365 (last_hop_metric > mpath->metric)))
366 fresh_info = false; 366 fresh_info = false;
367 } else { 367 } else {
368 mesh_path_add(ta, dev); 368 mesh_path_add(ta, sdata);
369 mpath = mesh_path_lookup(ta, dev); 369 mpath = mesh_path_lookup(ta, sdata);
370 if (!mpath) { 370 if (!mpath) {
371 rcu_read_unlock(); 371 rcu_read_unlock();
372 return 0; 372 return 0;
@@ -392,11 +392,10 @@ static u32 hwmp_route_info_get(struct net_device *dev,
392 return process ? new_metric : 0; 392 return process ? new_metric : 0;
393} 393}
394 394
395static void hwmp_preq_frame_process(struct net_device *dev, 395static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
396 struct ieee80211_mgmt *mgmt, 396 struct ieee80211_mgmt *mgmt,
397 u8 *preq_elem, u32 metric) { 397 u8 *preq_elem, u32 metric) {
398 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 398 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
399 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
400 struct mesh_path *mpath; 399 struct mesh_path *mpath;
401 u8 *dst_addr, *orig_addr; 400 u8 *dst_addr, *orig_addr;
402 u8 dst_flags, ttl; 401 u8 dst_flags, ttl;
@@ -411,19 +410,19 @@ static void hwmp_preq_frame_process(struct net_device *dev,
411 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem); 410 orig_dsn = PREQ_IE_ORIG_DSN(preq_elem);
412 dst_flags = PREQ_IE_DST_F(preq_elem); 411 dst_flags = PREQ_IE_DST_F(preq_elem);
413 412
414 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) { 413 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
415 forward = false; 414 forward = false;
416 reply = true; 415 reply = true;
417 metric = 0; 416 metric = 0;
418 if (time_after(jiffies, ifsta->last_dsn_update + 417 if (time_after(jiffies, ifmsh->last_dsn_update +
419 net_traversal_jiffies(sdata)) || 418 net_traversal_jiffies(sdata)) ||
420 time_before(jiffies, ifsta->last_dsn_update)) { 419 time_before(jiffies, ifmsh->last_dsn_update)) {
421 dst_dsn = ++ifsta->dsn; 420 dst_dsn = ++ifmsh->dsn;
422 ifsta->last_dsn_update = jiffies; 421 ifmsh->last_dsn_update = jiffies;
423 } 422 }
424 } else { 423 } else {
425 rcu_read_lock(); 424 rcu_read_lock();
426 mpath = mesh_path_lookup(dst_addr, dev); 425 mpath = mesh_path_lookup(dst_addr, sdata);
427 if (mpath) { 426 if (mpath) {
428 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) || 427 if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
429 DSN_LT(mpath->dsn, dst_dsn)) { 428 DSN_LT(mpath->dsn, dst_dsn)) {
@@ -445,15 +444,15 @@ static void hwmp_preq_frame_process(struct net_device *dev,
445 444
446 if (reply) { 445 if (reply) {
447 lifetime = PREQ_IE_LIFETIME(preq_elem); 446 lifetime = PREQ_IE_LIFETIME(preq_elem);
448 ttl = ifsta->mshcfg.dot11MeshTTL; 447 ttl = ifmsh->mshcfg.dot11MeshTTL;
449 if (ttl != 0) 448 if (ttl != 0)
450 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr, 449 mesh_path_sel_frame_tx(MPATH_PREP, 0, dst_addr,
451 cpu_to_le32(dst_dsn), 0, orig_addr, 450 cpu_to_le32(dst_dsn), 0, orig_addr,
452 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl, 451 cpu_to_le32(orig_dsn), mgmt->sa, 0, ttl,
453 cpu_to_le32(lifetime), cpu_to_le32(metric), 452 cpu_to_le32(lifetime), cpu_to_le32(metric),
454 0, dev); 453 0, sdata);
455 else 454 else
456 ifsta->mshstats.dropped_frames_ttl++; 455 ifmsh->mshstats.dropped_frames_ttl++;
457 } 456 }
458 457
459 if (forward) { 458 if (forward) {
@@ -463,7 +462,7 @@ static void hwmp_preq_frame_process(struct net_device *dev,
463 ttl = PREQ_IE_TTL(preq_elem); 462 ttl = PREQ_IE_TTL(preq_elem);
464 lifetime = PREQ_IE_LIFETIME(preq_elem); 463 lifetime = PREQ_IE_LIFETIME(preq_elem);
465 if (ttl <= 1) { 464 if (ttl <= 1) {
466 ifsta->mshstats.dropped_frames_ttl++; 465 ifmsh->mshstats.dropped_frames_ttl++;
467 return; 466 return;
468 } 467 }
469 --ttl; 468 --ttl;
@@ -472,20 +471,19 @@ static void hwmp_preq_frame_process(struct net_device *dev,
472 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1; 471 hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
473 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr, 472 mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
474 cpu_to_le32(orig_dsn), dst_flags, dst_addr, 473 cpu_to_le32(orig_dsn), dst_flags, dst_addr,
475 cpu_to_le32(dst_dsn), dev->broadcast, 474 cpu_to_le32(dst_dsn), sdata->dev->broadcast,
476 hopcount, ttl, cpu_to_le32(lifetime), 475 hopcount, ttl, cpu_to_le32(lifetime),
477 cpu_to_le32(metric), cpu_to_le32(preq_id), 476 cpu_to_le32(metric), cpu_to_le32(preq_id),
478 dev); 477 sdata);
479 ifsta->mshstats.fwded_frames++; 478 ifmsh->mshstats.fwded_frames++;
480 } 479 }
481} 480}
482 481
483 482
484static void hwmp_prep_frame_process(struct net_device *dev, 483static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
485 struct ieee80211_mgmt *mgmt, 484 struct ieee80211_mgmt *mgmt,
486 u8 *prep_elem, u32 metric) 485 u8 *prep_elem, u32 metric)
487{ 486{
488 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
489 struct mesh_path *mpath; 487 struct mesh_path *mpath;
490 u8 *dst_addr, *orig_addr; 488 u8 *dst_addr, *orig_addr;
491 u8 ttl, hopcount, flags; 489 u8 ttl, hopcount, flags;
@@ -499,18 +497,18 @@ static void hwmp_prep_frame_process(struct net_device *dev,
499 * replies 497 * replies
500 */ 498 */
501 dst_addr = PREP_IE_DST_ADDR(prep_elem); 499 dst_addr = PREP_IE_DST_ADDR(prep_elem);
502 if (memcmp(dst_addr, dev->dev_addr, ETH_ALEN) == 0) 500 if (memcmp(dst_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
503 /* destination, no forwarding required */ 501 /* destination, no forwarding required */
504 return; 502 return;
505 503
506 ttl = PREP_IE_TTL(prep_elem); 504 ttl = PREP_IE_TTL(prep_elem);
507 if (ttl <= 1) { 505 if (ttl <= 1) {
508 sdata->u.sta.mshstats.dropped_frames_ttl++; 506 sdata->u.mesh.mshstats.dropped_frames_ttl++;
509 return; 507 return;
510 } 508 }
511 509
512 rcu_read_lock(); 510 rcu_read_lock();
513 mpath = mesh_path_lookup(dst_addr, dev); 511 mpath = mesh_path_lookup(dst_addr, sdata);
514 if (mpath) 512 if (mpath)
515 spin_lock_bh(&mpath->state_lock); 513 spin_lock_bh(&mpath->state_lock);
516 else 514 else
@@ -519,7 +517,7 @@ static void hwmp_prep_frame_process(struct net_device *dev,
519 spin_unlock_bh(&mpath->state_lock); 517 spin_unlock_bh(&mpath->state_lock);
520 goto fail; 518 goto fail;
521 } 519 }
522 memcpy(next_hop, mpath->next_hop->addr, ETH_ALEN); 520 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN);
523 spin_unlock_bh(&mpath->state_lock); 521 spin_unlock_bh(&mpath->state_lock);
524 --ttl; 522 --ttl;
525 flags = PREP_IE_FLAGS(prep_elem); 523 flags = PREP_IE_FLAGS(prep_elem);
@@ -531,20 +529,20 @@ static void hwmp_prep_frame_process(struct net_device *dev,
531 529
532 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, 530 mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr,
533 cpu_to_le32(orig_dsn), 0, dst_addr, 531 cpu_to_le32(orig_dsn), 0, dst_addr,
534 cpu_to_le32(dst_dsn), mpath->next_hop->addr, hopcount, ttl, 532 cpu_to_le32(dst_dsn), mpath->next_hop->sta.addr, hopcount, ttl,
535 cpu_to_le32(lifetime), cpu_to_le32(metric), 533 cpu_to_le32(lifetime), cpu_to_le32(metric),
536 0, dev); 534 0, sdata);
537 rcu_read_unlock(); 535 rcu_read_unlock();
538 sdata->u.sta.mshstats.fwded_frames++; 536 sdata->u.mesh.mshstats.fwded_frames++;
539 return; 537 return;
540 538
541fail: 539fail:
542 rcu_read_unlock(); 540 rcu_read_unlock();
543 sdata->u.sta.mshstats.dropped_frames_no_route++; 541 sdata->u.mesh.mshstats.dropped_frames_no_route++;
544 return; 542 return;
545} 543}
546 544
547static void hwmp_perr_frame_process(struct net_device *dev, 545static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
548 struct ieee80211_mgmt *mgmt, u8 *perr_elem) 546 struct ieee80211_mgmt *mgmt, u8 *perr_elem)
549{ 547{
550 struct mesh_path *mpath; 548 struct mesh_path *mpath;
@@ -555,18 +553,18 @@ static void hwmp_perr_frame_process(struct net_device *dev,
555 dst_addr = PERR_IE_DST_ADDR(perr_elem); 553 dst_addr = PERR_IE_DST_ADDR(perr_elem);
556 dst_dsn = PERR_IE_DST_DSN(perr_elem); 554 dst_dsn = PERR_IE_DST_DSN(perr_elem);
557 rcu_read_lock(); 555 rcu_read_lock();
558 mpath = mesh_path_lookup(dst_addr, dev); 556 mpath = mesh_path_lookup(dst_addr, sdata);
559 if (mpath) { 557 if (mpath) {
560 spin_lock_bh(&mpath->state_lock); 558 spin_lock_bh(&mpath->state_lock);
561 if (mpath->flags & MESH_PATH_ACTIVE && 559 if (mpath->flags & MESH_PATH_ACTIVE &&
562 memcmp(ta, mpath->next_hop->addr, ETH_ALEN) == 0 && 560 memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 &&
563 (!(mpath->flags & MESH_PATH_DSN_VALID) || 561 (!(mpath->flags & MESH_PATH_DSN_VALID) ||
564 DSN_GT(dst_dsn, mpath->dsn))) { 562 DSN_GT(dst_dsn, mpath->dsn))) {
565 mpath->flags &= ~MESH_PATH_ACTIVE; 563 mpath->flags &= ~MESH_PATH_ACTIVE;
566 mpath->dsn = dst_dsn; 564 mpath->dsn = dst_dsn;
567 spin_unlock_bh(&mpath->state_lock); 565 spin_unlock_bh(&mpath->state_lock);
568 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn), 566 mesh_path_error_tx(dst_addr, cpu_to_le32(dst_dsn),
569 dev->broadcast, dev); 567 sdata->dev->broadcast, sdata);
570 } else 568 } else
571 spin_unlock_bh(&mpath->state_lock); 569 spin_unlock_bh(&mpath->state_lock);
572 } 570 }
@@ -575,7 +573,7 @@ static void hwmp_perr_frame_process(struct net_device *dev,
575 573
576 574
577 575
578void mesh_rx_path_sel_frame(struct net_device *dev, 576void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
579 struct ieee80211_mgmt *mgmt, 577 struct ieee80211_mgmt *mgmt,
580 size_t len) 578 size_t len)
581{ 579{
@@ -583,6 +581,10 @@ void mesh_rx_path_sel_frame(struct net_device *dev,
583 size_t baselen; 581 size_t baselen;
584 u32 last_hop_metric; 582 u32 last_hop_metric;
585 583
584 /* need action_code */
585 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
586 return;
587
586 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt; 588 baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
587 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable, 589 ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
588 len - baselen, &elems); 590 len - baselen, &elems);
@@ -592,25 +594,25 @@ void mesh_rx_path_sel_frame(struct net_device *dev,
592 if (!elems.preq || elems.preq_len != 37) 594 if (!elems.preq || elems.preq_len != 37)
593 /* Right now we support just 1 destination and no AE */ 595 /* Right now we support just 1 destination and no AE */
594 return; 596 return;
595 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.preq); 597 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.preq);
596 if (!last_hop_metric) 598 if (!last_hop_metric)
597 return; 599 return;
598 hwmp_preq_frame_process(dev, mgmt, elems.preq, last_hop_metric); 600 hwmp_preq_frame_process(sdata, mgmt, elems.preq, last_hop_metric);
599 break; 601 break;
600 case MPATH_PREP: 602 case MPATH_PREP:
601 if (!elems.prep || elems.prep_len != 31) 603 if (!elems.prep || elems.prep_len != 31)
602 /* Right now we support no AE */ 604 /* Right now we support no AE */
603 return; 605 return;
604 last_hop_metric = hwmp_route_info_get(dev, mgmt, elems.prep); 606 last_hop_metric = hwmp_route_info_get(sdata, mgmt, elems.prep);
605 if (!last_hop_metric) 607 if (!last_hop_metric)
606 return; 608 return;
607 hwmp_prep_frame_process(dev, mgmt, elems.prep, last_hop_metric); 609 hwmp_prep_frame_process(sdata, mgmt, elems.prep, last_hop_metric);
608 break; 610 break;
609 case MPATH_PERR: 611 case MPATH_PERR:
610 if (!elems.perr || elems.perr_len != 12) 612 if (!elems.perr || elems.perr_len != 12)
611 /* Right now we support only one destination per PERR */ 613 /* Right now we support only one destination per PERR */
612 return; 614 return;
613 hwmp_perr_frame_process(dev, mgmt, elems.perr); 615 hwmp_perr_frame_process(sdata, mgmt, elems.perr);
614 default: 616 default:
615 return; 617 return;
616 } 618 }
@@ -628,9 +630,8 @@ void mesh_rx_path_sel_frame(struct net_device *dev,
628 */ 630 */
629static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) 631static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
630{ 632{
631 struct ieee80211_sub_if_data *sdata = 633 struct ieee80211_sub_if_data *sdata = mpath->sdata;
632 IEEE80211_DEV_TO_SUB_IF(mpath->dev); 634 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
633 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
634 struct mesh_preq_queue *preq_node; 635 struct mesh_preq_queue *preq_node;
635 636
636 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL); 637 preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
@@ -639,9 +640,9 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
639 return; 640 return;
640 } 641 }
641 642
642 spin_lock(&ifsta->mesh_preq_queue_lock); 643 spin_lock(&ifmsh->mesh_preq_queue_lock);
643 if (ifsta->preq_queue_len == MAX_PREQ_QUEUE_LEN) { 644 if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
644 spin_unlock(&ifsta->mesh_preq_queue_lock); 645 spin_unlock(&ifmsh->mesh_preq_queue_lock);
645 kfree(preq_node); 646 kfree(preq_node);
646 if (printk_ratelimit()) 647 if (printk_ratelimit())
647 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n"); 648 printk(KERN_DEBUG "Mesh HWMP: PREQ node queue full\n");
@@ -651,55 +652,53 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
651 memcpy(preq_node->dst, mpath->dst, ETH_ALEN); 652 memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
652 preq_node->flags = flags; 653 preq_node->flags = flags;
653 654
654 list_add_tail(&preq_node->list, &ifsta->preq_queue.list); 655 list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
655 ++ifsta->preq_queue_len; 656 ++ifmsh->preq_queue_len;
656 spin_unlock(&ifsta->mesh_preq_queue_lock); 657 spin_unlock(&ifmsh->mesh_preq_queue_lock);
657 658
658 if (time_after(jiffies, ifsta->last_preq + min_preq_int_jiff(sdata))) 659 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
659 queue_work(sdata->local->hw.workqueue, &ifsta->work); 660 queue_work(sdata->local->hw.workqueue, &ifmsh->work);
660 661
661 else if (time_before(jiffies, ifsta->last_preq)) { 662 else if (time_before(jiffies, ifmsh->last_preq)) {
662 /* avoid long wait if did not send preqs for a long time 663 /* avoid long wait if did not send preqs for a long time
663 * and jiffies wrapped around 664 * and jiffies wrapped around
664 */ 665 */
665 ifsta->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; 666 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
666 queue_work(sdata->local->hw.workqueue, &ifsta->work); 667 queue_work(sdata->local->hw.workqueue, &ifmsh->work);
667 } else 668 } else
668 mod_timer(&ifsta->mesh_path_timer, ifsta->last_preq + 669 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
669 min_preq_int_jiff(sdata)); 670 min_preq_int_jiff(sdata));
670} 671}
671 672
672/** 673/**
673 * mesh_path_start_discovery - launch a path discovery from the PREQ queue 674 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
674 * 675 *
675 * @dev: local mesh interface 676 * @sdata: local mesh subif
676 */ 677 */
677void mesh_path_start_discovery(struct net_device *dev) 678void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
678{ 679{
679 struct ieee80211_sub_if_data *sdata = 680 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
680 IEEE80211_DEV_TO_SUB_IF(dev);
681 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
682 struct mesh_preq_queue *preq_node; 681 struct mesh_preq_queue *preq_node;
683 struct mesh_path *mpath; 682 struct mesh_path *mpath;
684 u8 ttl, dst_flags; 683 u8 ttl, dst_flags;
685 u32 lifetime; 684 u32 lifetime;
686 685
687 spin_lock(&ifsta->mesh_preq_queue_lock); 686 spin_lock(&ifmsh->mesh_preq_queue_lock);
688 if (!ifsta->preq_queue_len || 687 if (!ifmsh->preq_queue_len ||
689 time_before(jiffies, ifsta->last_preq + 688 time_before(jiffies, ifmsh->last_preq +
690 min_preq_int_jiff(sdata))) { 689 min_preq_int_jiff(sdata))) {
691 spin_unlock(&ifsta->mesh_preq_queue_lock); 690 spin_unlock(&ifmsh->mesh_preq_queue_lock);
692 return; 691 return;
693 } 692 }
694 693
695 preq_node = list_first_entry(&ifsta->preq_queue.list, 694 preq_node = list_first_entry(&ifmsh->preq_queue.list,
696 struct mesh_preq_queue, list); 695 struct mesh_preq_queue, list);
697 list_del(&preq_node->list); 696 list_del(&preq_node->list);
698 --ifsta->preq_queue_len; 697 --ifmsh->preq_queue_len;
699 spin_unlock(&ifsta->mesh_preq_queue_lock); 698 spin_unlock(&ifmsh->mesh_preq_queue_lock);
700 699
701 rcu_read_lock(); 700 rcu_read_lock();
702 mpath = mesh_path_lookup(preq_node->dst, dev); 701 mpath = mesh_path_lookup(preq_node->dst, sdata);
703 if (!mpath) 702 if (!mpath)
704 goto enddiscovery; 703 goto enddiscovery;
705 704
@@ -721,18 +720,18 @@ void mesh_path_start_discovery(struct net_device *dev)
721 goto enddiscovery; 720 goto enddiscovery;
722 } 721 }
723 722
724 ifsta->last_preq = jiffies; 723 ifmsh->last_preq = jiffies;
725 724
726 if (time_after(jiffies, ifsta->last_dsn_update + 725 if (time_after(jiffies, ifmsh->last_dsn_update +
727 net_traversal_jiffies(sdata)) || 726 net_traversal_jiffies(sdata)) ||
728 time_before(jiffies, ifsta->last_dsn_update)) { 727 time_before(jiffies, ifmsh->last_dsn_update)) {
729 ++ifsta->dsn; 728 ++ifmsh->dsn;
730 sdata->u.sta.last_dsn_update = jiffies; 729 sdata->u.mesh.last_dsn_update = jiffies;
731 } 730 }
732 lifetime = default_lifetime(sdata); 731 lifetime = default_lifetime(sdata);
733 ttl = sdata->u.sta.mshcfg.dot11MeshTTL; 732 ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
734 if (ttl == 0) { 733 if (ttl == 0) {
735 sdata->u.sta.mshstats.dropped_frames_ttl++; 734 sdata->u.mesh.mshstats.dropped_frames_ttl++;
736 spin_unlock_bh(&mpath->state_lock); 735 spin_unlock_bh(&mpath->state_lock);
737 goto enddiscovery; 736 goto enddiscovery;
738 } 737 }
@@ -743,11 +742,11 @@ void mesh_path_start_discovery(struct net_device *dev)
743 dst_flags = MP_F_RF; 742 dst_flags = MP_F_RF;
744 743
745 spin_unlock_bh(&mpath->state_lock); 744 spin_unlock_bh(&mpath->state_lock);
746 mesh_path_sel_frame_tx(MPATH_PREQ, 0, dev->dev_addr, 745 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
747 cpu_to_le32(ifsta->dsn), dst_flags, mpath->dst, 746 cpu_to_le32(ifmsh->dsn), dst_flags, mpath->dst,
748 cpu_to_le32(mpath->dsn), dev->broadcast, 0, 747 cpu_to_le32(mpath->dsn), sdata->dev->broadcast, 0,
749 ttl, cpu_to_le32(lifetime), 0, 748 ttl, cpu_to_le32(lifetime), 0,
750 cpu_to_le32(ifsta->preq_id++), dev); 749 cpu_to_le32(ifmsh->preq_id++), sdata);
751 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 750 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
752 751
753enddiscovery: 752enddiscovery:
@@ -759,7 +758,7 @@ enddiscovery:
759 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame 758 * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame
760 * 759 *
761 * @skb: 802.11 frame to be sent 760 * @skb: 802.11 frame to be sent
762 * @dev: network device the frame will be sent through 761 * @sdata: network subif the frame will be sent through
763 * @fwd_frame: true if this frame was originally from a different host 762 * @fwd_frame: true if this frame was originally from a different host
764 * 763 *
765 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is 764 * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is
@@ -767,9 +766,9 @@ enddiscovery:
767 * sent when the path is resolved. This means the caller must not free the skb 766 * sent when the path is resolved. This means the caller must not free the skb
768 * in this case. 767 * in this case.
769 */ 768 */
770int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) 769int mesh_nexthop_lookup(struct sk_buff *skb,
770 struct ieee80211_sub_if_data *sdata)
771{ 771{
772 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
773 struct sk_buff *skb_to_free = NULL; 772 struct sk_buff *skb_to_free = NULL;
774 struct mesh_path *mpath; 773 struct mesh_path *mpath;
775 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 774 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -777,14 +776,14 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
777 int err = 0; 776 int err = 0;
778 777
779 rcu_read_lock(); 778 rcu_read_lock();
780 mpath = mesh_path_lookup(dst_addr, dev); 779 mpath = mesh_path_lookup(dst_addr, sdata);
781 780
782 if (!mpath) { 781 if (!mpath) {
783 mesh_path_add(dst_addr, dev); 782 mesh_path_add(dst_addr, sdata);
784 mpath = mesh_path_lookup(dst_addr, dev); 783 mpath = mesh_path_lookup(dst_addr, sdata);
785 if (!mpath) { 784 if (!mpath) {
786 dev_kfree_skb(skb); 785 dev_kfree_skb(skb);
787 sdata->u.sta.mshstats.dropped_frames_no_route++; 786 sdata->u.mesh.mshstats.dropped_frames_no_route++;
788 err = -ENOSPC; 787 err = -ENOSPC;
789 goto endlookup; 788 goto endlookup;
790 } 789 }
@@ -792,14 +791,15 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
792 791
793 if (mpath->flags & MESH_PATH_ACTIVE) { 792 if (mpath->flags & MESH_PATH_ACTIVE) {
794 if (time_after(jiffies, mpath->exp_time - 793 if (time_after(jiffies, mpath->exp_time -
795 msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time)) 794 msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
796 && !memcmp(dev->dev_addr, hdr->addr4, ETH_ALEN) 795 && !memcmp(sdata->dev->dev_addr, hdr->addr4,
796 ETH_ALEN)
797 && !(mpath->flags & MESH_PATH_RESOLVING) 797 && !(mpath->flags & MESH_PATH_RESOLVING)
798 && !(mpath->flags & MESH_PATH_FIXED)) { 798 && !(mpath->flags & MESH_PATH_FIXED)) {
799 mesh_queue_preq(mpath, 799 mesh_queue_preq(mpath,
800 PREQ_Q_F_START | PREQ_Q_F_REFRESH); 800 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
801 } 801 }
802 memcpy(hdr->addr1, mpath->next_hop->addr, 802 memcpy(hdr->addr1, mpath->next_hop->sta.addr,
803 ETH_ALEN); 803 ETH_ALEN);
804 } else { 804 } else {
805 if (!(mpath->flags & MESH_PATH_RESOLVING)) { 805 if (!(mpath->flags & MESH_PATH_RESOLVING)) {
@@ -815,7 +815,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev)
815 815
816 skb_queue_tail(&mpath->frame_queue, skb); 816 skb_queue_tail(&mpath->frame_queue, skb);
817 if (skb_to_free) 817 if (skb_to_free)
818 mesh_path_discard_frame(skb_to_free, dev); 818 mesh_path_discard_frame(skb_to_free, sdata);
819 err = -ENOENT; 819 err = -ENOENT;
820 } 820 }
821 821
@@ -835,7 +835,7 @@ void mesh_path_timer(unsigned long data)
835 if (!mpath) 835 if (!mpath)
836 goto endmpathtimer; 836 goto endmpathtimer;
837 spin_lock_bh(&mpath->state_lock); 837 spin_lock_bh(&mpath->state_lock);
838 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev); 838 sdata = mpath->sdata;
839 if (mpath->flags & MESH_PATH_RESOLVED || 839 if (mpath->flags & MESH_PATH_RESOLVED ||
840 (!(mpath->flags & MESH_PATH_RESOLVING))) 840 (!(mpath->flags & MESH_PATH_RESOLVING)))
841 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED); 841 mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 838ee60492ad..3c72557df45a 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/etherdevice.h> 10#include <linux/etherdevice.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/netdevice.h>
13#include <linux/random.h> 12#include <linux/random.h>
14#include <linux/spinlock.h> 13#include <linux/spinlock.h>
15#include <linux/string.h> 14#include <linux/string.h>
@@ -37,6 +36,7 @@ struct mpath_node {
37}; 36};
38 37
39static struct mesh_table *mesh_paths; 38static struct mesh_table *mesh_paths;
39static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
40 40
41/* This lock will have the grow table function as writer and add / delete nodes 41/* This lock will have the grow table function as writer and add / delete nodes
42 * as readers. When reading the table (i.e. doing lookups) we are well protected 42 * as readers. When reading the table (i.e. doing lookups) we are well protected
@@ -62,13 +62,13 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
62/** 62/**
63 * mesh_path_lookup - look up a path in the mesh path table 63 * mesh_path_lookup - look up a path in the mesh path table
64 * @dst: hardware address (ETH_ALEN length) of destination 64 * @dst: hardware address (ETH_ALEN length) of destination
65 * @dev: local interface 65 * @sdata: local subif
66 * 66 *
67 * Returns: pointer to the mesh path structure, or NULL if not found 67 * Returns: pointer to the mesh path structure, or NULL if not found
68 * 68 *
69 * Locking: must be called within a read rcu section. 69 * Locking: must be called within a read rcu section.
70 */ 70 */
71struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev) 71struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
72{ 72{
73 struct mesh_path *mpath; 73 struct mesh_path *mpath;
74 struct hlist_node *n; 74 struct hlist_node *n;
@@ -78,10 +78,10 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
78 78
79 tbl = rcu_dereference(mesh_paths); 79 tbl = rcu_dereference(mesh_paths);
80 80
81 bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)]; 81 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
82 hlist_for_each_entry_rcu(node, n, bucket, list) { 82 hlist_for_each_entry_rcu(node, n, bucket, list) {
83 mpath = node->mpath; 83 mpath = node->mpath;
84 if (mpath->dev == dev && 84 if (mpath->sdata == sdata &&
85 memcmp(dst, mpath->dst, ETH_ALEN) == 0) { 85 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
86 if (MPATH_EXPIRED(mpath)) { 86 if (MPATH_EXPIRED(mpath)) {
87 spin_lock_bh(&mpath->state_lock); 87 spin_lock_bh(&mpath->state_lock);
@@ -95,16 +95,44 @@ struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
95 return NULL; 95 return NULL;
96} 96}
97 97
98struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
99{
100 struct mesh_path *mpath;
101 struct hlist_node *n;
102 struct hlist_head *bucket;
103 struct mesh_table *tbl;
104 struct mpath_node *node;
105
106 tbl = rcu_dereference(mpp_paths);
107
108 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
109 hlist_for_each_entry_rcu(node, n, bucket, list) {
110 mpath = node->mpath;
111 if (mpath->sdata == sdata &&
112 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
113 if (MPATH_EXPIRED(mpath)) {
114 spin_lock_bh(&mpath->state_lock);
115 if (MPATH_EXPIRED(mpath))
116 mpath->flags &= ~MESH_PATH_ACTIVE;
117 spin_unlock_bh(&mpath->state_lock);
118 }
119 return mpath;
120 }
121 }
122 return NULL;
123}
124
125
98/** 126/**
99 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index 127 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
100 * @idx: index 128 * @idx: index
101 * @dev: local interface, or NULL for all entries 129 * @sdata: local subif, or NULL for all entries
102 * 130 *
103 * Returns: pointer to the mesh path structure, or NULL if not found. 131 * Returns: pointer to the mesh path structure, or NULL if not found.
104 * 132 *
105 * Locking: must be called within a read rcu section. 133 * Locking: must be called within a read rcu section.
106 */ 134 */
107struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev) 135struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
108{ 136{
109 struct mpath_node *node; 137 struct mpath_node *node;
110 struct hlist_node *p; 138 struct hlist_node *p;
@@ -112,7 +140,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
112 int j = 0; 140 int j = 0;
113 141
114 for_each_mesh_entry(mesh_paths, p, node, i) { 142 for_each_mesh_entry(mesh_paths, p, node, i) {
115 if (dev && node->mpath->dev != dev) 143 if (sdata && node->mpath->sdata != sdata)
116 continue; 144 continue;
117 if (j++ == idx) { 145 if (j++ == idx) {
118 if (MPATH_EXPIRED(node->mpath)) { 146 if (MPATH_EXPIRED(node->mpath)) {
@@ -131,15 +159,14 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
131/** 159/**
132 * mesh_path_add - allocate and add a new path to the mesh path table 160 * mesh_path_add - allocate and add a new path to the mesh path table
133 * @addr: destination address of the path (ETH_ALEN length) 161 * @addr: destination address of the path (ETH_ALEN length)
134 * @dev: local interface 162 * @sdata: local subif
135 * 163 *
136 * Returns: 0 on sucess 164 * Returns: 0 on sucess
137 * 165 *
138 * State: the initial state of the new path is set to 0 166 * State: the initial state of the new path is set to 0
139 */ 167 */
140int mesh_path_add(u8 *dst, struct net_device *dev) 168int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
141{ 169{
142 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
143 struct mesh_path *mpath, *new_mpath; 170 struct mesh_path *mpath, *new_mpath;
144 struct mpath_node *node, *new_node; 171 struct mpath_node *node, *new_node;
145 struct hlist_head *bucket; 172 struct hlist_head *bucket;
@@ -148,14 +175,14 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
148 int err = 0; 175 int err = 0;
149 u32 hash_idx; 176 u32 hash_idx;
150 177
151 if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0) 178 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
152 /* never add ourselves as neighbours */ 179 /* never add ourselves as neighbours */
153 return -ENOTSUPP; 180 return -ENOTSUPP;
154 181
155 if (is_multicast_ether_addr(dst)) 182 if (is_multicast_ether_addr(dst))
156 return -ENOTSUPP; 183 return -ENOTSUPP;
157 184
158 if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0) 185 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
159 return -ENOSPC; 186 return -ENOSPC;
160 187
161 err = -ENOMEM; 188 err = -ENOMEM;
@@ -169,7 +196,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
169 196
170 read_lock(&pathtbl_resize_lock); 197 read_lock(&pathtbl_resize_lock);
171 memcpy(new_mpath->dst, dst, ETH_ALEN); 198 memcpy(new_mpath->dst, dst, ETH_ALEN);
172 new_mpath->dev = dev; 199 new_mpath->sdata = sdata;
173 new_mpath->flags = 0; 200 new_mpath->flags = 0;
174 skb_queue_head_init(&new_mpath->frame_queue); 201 skb_queue_head_init(&new_mpath->frame_queue);
175 new_node->mpath = new_mpath; 202 new_node->mpath = new_mpath;
@@ -179,7 +206,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
179 spin_lock_init(&new_mpath->state_lock); 206 spin_lock_init(&new_mpath->state_lock);
180 init_timer(&new_mpath->timer); 207 init_timer(&new_mpath->timer);
181 208
182 hash_idx = mesh_table_hash(dst, dev, mesh_paths); 209 hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
183 bucket = &mesh_paths->hash_buckets[hash_idx]; 210 bucket = &mesh_paths->hash_buckets[hash_idx];
184 211
185 spin_lock(&mesh_paths->hashwlock[hash_idx]); 212 spin_lock(&mesh_paths->hashwlock[hash_idx]);
@@ -187,7 +214,7 @@ int mesh_path_add(u8 *dst, struct net_device *dev)
187 err = -EEXIST; 214 err = -EEXIST;
188 hlist_for_each_entry(node, n, bucket, list) { 215 hlist_for_each_entry(node, n, bucket, list) {
189 mpath = node->mpath; 216 mpath = node->mpath;
190 if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN) == 0) 217 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
191 goto err_exists; 218 goto err_exists;
192 } 219 }
193 220
@@ -223,7 +250,92 @@ err_exists:
223err_node_alloc: 250err_node_alloc:
224 kfree(new_mpath); 251 kfree(new_mpath);
225err_path_alloc: 252err_path_alloc:
226 atomic_dec(&sdata->u.sta.mpaths); 253 atomic_dec(&sdata->u.mesh.mpaths);
254 return err;
255}
256
257
258int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
259{
260 struct mesh_path *mpath, *new_mpath;
261 struct mpath_node *node, *new_node;
262 struct hlist_head *bucket;
263 struct hlist_node *n;
264 int grow = 0;
265 int err = 0;
266 u32 hash_idx;
267
268
269 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
270 /* never add ourselves as neighbours */
271 return -ENOTSUPP;
272
273 if (is_multicast_ether_addr(dst))
274 return -ENOTSUPP;
275
276 err = -ENOMEM;
277 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
278 if (!new_mpath)
279 goto err_path_alloc;
280
281 new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
282 if (!new_node)
283 goto err_node_alloc;
284
285 read_lock(&pathtbl_resize_lock);
286 memcpy(new_mpath->dst, dst, ETH_ALEN);
287 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
288 new_mpath->sdata = sdata;
289 new_mpath->flags = 0;
290 skb_queue_head_init(&new_mpath->frame_queue);
291 new_node->mpath = new_mpath;
292 new_mpath->exp_time = jiffies;
293 spin_lock_init(&new_mpath->state_lock);
294
295 hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
296 bucket = &mpp_paths->hash_buckets[hash_idx];
297
298 spin_lock(&mpp_paths->hashwlock[hash_idx]);
299
300 err = -EEXIST;
301 hlist_for_each_entry(node, n, bucket, list) {
302 mpath = node->mpath;
303 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
304 goto err_exists;
305 }
306
307 hlist_add_head_rcu(&new_node->list, bucket);
308 if (atomic_inc_return(&mpp_paths->entries) >=
309 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
310 grow = 1;
311
312 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
313 read_unlock(&pathtbl_resize_lock);
314 if (grow) {
315 struct mesh_table *oldtbl, *newtbl;
316
317 write_lock(&pathtbl_resize_lock);
318 oldtbl = mpp_paths;
319 newtbl = mesh_table_grow(mpp_paths);
320 if (!newtbl) {
321 write_unlock(&pathtbl_resize_lock);
322 return 0;
323 }
324 rcu_assign_pointer(mpp_paths, newtbl);
325 write_unlock(&pathtbl_resize_lock);
326
327 synchronize_rcu();
328 mesh_table_free(oldtbl, false);
329 }
330 return 0;
331
332err_exists:
333 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
334 read_unlock(&pathtbl_resize_lock);
335 kfree(new_node);
336err_node_alloc:
337 kfree(new_mpath);
338err_path_alloc:
227 return err; 339 return err;
228} 340}
229 341
@@ -241,7 +353,7 @@ void mesh_plink_broken(struct sta_info *sta)
241 struct mesh_path *mpath; 353 struct mesh_path *mpath;
242 struct mpath_node *node; 354 struct mpath_node *node;
243 struct hlist_node *p; 355 struct hlist_node *p;
244 struct net_device *dev = sta->sdata->dev; 356 struct ieee80211_sub_if_data *sdata = sta->sdata;
245 int i; 357 int i;
246 358
247 rcu_read_lock(); 359 rcu_read_lock();
@@ -256,7 +368,7 @@ void mesh_plink_broken(struct sta_info *sta)
256 spin_unlock_bh(&mpath->state_lock); 368 spin_unlock_bh(&mpath->state_lock);
257 mesh_path_error_tx(mpath->dst, 369 mesh_path_error_tx(mpath->dst,
258 cpu_to_le32(mpath->dsn), 370 cpu_to_le32(mpath->dsn),
259 dev->broadcast, dev); 371 sdata->dev->broadcast, sdata);
260 } else 372 } else
261 spin_unlock_bh(&mpath->state_lock); 373 spin_unlock_bh(&mpath->state_lock);
262 } 374 }
@@ -284,11 +396,11 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
284 for_each_mesh_entry(mesh_paths, p, node, i) { 396 for_each_mesh_entry(mesh_paths, p, node, i) {
285 mpath = node->mpath; 397 mpath = node->mpath;
286 if (mpath->next_hop == sta) 398 if (mpath->next_hop == sta)
287 mesh_path_del(mpath->dst, mpath->dev); 399 mesh_path_del(mpath->dst, mpath->sdata);
288 } 400 }
289} 401}
290 402
291void mesh_path_flush(struct net_device *dev) 403void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
292{ 404{
293 struct mesh_path *mpath; 405 struct mesh_path *mpath;
294 struct mpath_node *node; 406 struct mpath_node *node;
@@ -297,19 +409,18 @@ void mesh_path_flush(struct net_device *dev)
297 409
298 for_each_mesh_entry(mesh_paths, p, node, i) { 410 for_each_mesh_entry(mesh_paths, p, node, i) {
299 mpath = node->mpath; 411 mpath = node->mpath;
300 if (mpath->dev == dev) 412 if (mpath->sdata == sdata)
301 mesh_path_del(mpath->dst, mpath->dev); 413 mesh_path_del(mpath->dst, mpath->sdata);
302 } 414 }
303} 415}
304 416
305static void mesh_path_node_reclaim(struct rcu_head *rp) 417static void mesh_path_node_reclaim(struct rcu_head *rp)
306{ 418{
307 struct mpath_node *node = container_of(rp, struct mpath_node, rcu); 419 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
308 struct ieee80211_sub_if_data *sdata = 420 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
309 IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
310 421
311 del_timer_sync(&node->mpath->timer); 422 del_timer_sync(&node->mpath->timer);
312 atomic_dec(&sdata->u.sta.mpaths); 423 atomic_dec(&sdata->u.mesh.mpaths);
313 kfree(node->mpath); 424 kfree(node->mpath);
314 kfree(node); 425 kfree(node);
315} 426}
@@ -318,11 +429,11 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
318 * mesh_path_del - delete a mesh path from the table 429 * mesh_path_del - delete a mesh path from the table
319 * 430 *
320 * @addr: dst address (ETH_ALEN length) 431 * @addr: dst address (ETH_ALEN length)
321 * @dev: local interface 432 * @sdata: local subif
322 * 433 *
323 * Returns: 0 if succesful 434 * Returns: 0 if succesful
324 */ 435 */
325int mesh_path_del(u8 *addr, struct net_device *dev) 436int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
326{ 437{
327 struct mesh_path *mpath; 438 struct mesh_path *mpath;
328 struct mpath_node *node; 439 struct mpath_node *node;
@@ -332,13 +443,13 @@ int mesh_path_del(u8 *addr, struct net_device *dev)
332 int err = 0; 443 int err = 0;
333 444
334 read_lock(&pathtbl_resize_lock); 445 read_lock(&pathtbl_resize_lock);
335 hash_idx = mesh_table_hash(addr, dev, mesh_paths); 446 hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
336 bucket = &mesh_paths->hash_buckets[hash_idx]; 447 bucket = &mesh_paths->hash_buckets[hash_idx];
337 448
338 spin_lock(&mesh_paths->hashwlock[hash_idx]); 449 spin_lock(&mesh_paths->hashwlock[hash_idx]);
339 hlist_for_each_entry(node, n, bucket, list) { 450 hlist_for_each_entry(node, n, bucket, list) {
340 mpath = node->mpath; 451 mpath = node->mpath;
341 if (mpath->dev == dev && 452 if (mpath->sdata == sdata &&
342 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 453 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
343 spin_lock_bh(&mpath->state_lock); 454 spin_lock_bh(&mpath->state_lock);
344 mpath->flags |= MESH_PATH_RESOLVING; 455 mpath->flags |= MESH_PATH_RESOLVING;
@@ -378,33 +489,33 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
378 * mesh_path_discard_frame - discard a frame whose path could not be resolved 489 * mesh_path_discard_frame - discard a frame whose path could not be resolved
379 * 490 *
380 * @skb: frame to discard 491 * @skb: frame to discard
381 * @dev: network device the frame was to be sent through 492 * @sdata: network subif the frame was to be sent through
382 * 493 *
383 * If the frame was beign forwarded from another MP, a PERR frame will be sent 494 * If the frame was beign forwarded from another MP, a PERR frame will be sent
384 * to the precursor. 495 * to the precursor.
385 * 496 *
386 * Locking: the function must me called within a rcu_read_lock region 497 * Locking: the function must me called within a rcu_read_lock region
387 */ 498 */
388void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) 499void mesh_path_discard_frame(struct sk_buff *skb,
500 struct ieee80211_sub_if_data *sdata)
389{ 501{
390 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
391 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 502 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
392 struct mesh_path *mpath; 503 struct mesh_path *mpath;
393 u32 dsn = 0; 504 u32 dsn = 0;
394 505
395 if (memcmp(hdr->addr4, dev->dev_addr, ETH_ALEN) != 0) { 506 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
396 u8 *ra, *da; 507 u8 *ra, *da;
397 508
398 da = hdr->addr3; 509 da = hdr->addr3;
399 ra = hdr->addr2; 510 ra = hdr->addr2;
400 mpath = mesh_path_lookup(da, dev); 511 mpath = mesh_path_lookup(da, sdata);
401 if (mpath) 512 if (mpath)
402 dsn = ++mpath->dsn; 513 dsn = ++mpath->dsn;
403 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev); 514 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
404 } 515 }
405 516
406 kfree_skb(skb); 517 kfree_skb(skb);
407 sdata->u.sta.mshstats.dropped_frames_no_route++; 518 sdata->u.mesh.mshstats.dropped_frames_no_route++;
408} 519}
409 520
410/** 521/**
@@ -416,14 +527,11 @@ void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev)
416 */ 527 */
417void mesh_path_flush_pending(struct mesh_path *mpath) 528void mesh_path_flush_pending(struct mesh_path *mpath)
418{ 529{
419 struct ieee80211_sub_if_data *sdata;
420 struct sk_buff *skb; 530 struct sk_buff *skb;
421 531
422 sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
423
424 while ((skb = skb_dequeue(&mpath->frame_queue)) && 532 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
425 (mpath->flags & MESH_PATH_ACTIVE)) 533 (mpath->flags & MESH_PATH_ACTIVE))
426 mesh_path_discard_frame(skb, mpath->dev); 534 mesh_path_discard_frame(skb, mpath->sdata);
427} 535}
428 536
429/** 537/**
@@ -472,7 +580,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
472 node = hlist_entry(p, struct mpath_node, list); 580 node = hlist_entry(p, struct mpath_node, list);
473 mpath = node->mpath; 581 mpath = node->mpath;
474 new_node->mpath = mpath; 582 new_node->mpath = mpath;
475 hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl); 583 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
476 hlist_add_head(&new_node->list, 584 hlist_add_head(&new_node->list,
477 &newtbl->hash_buckets[hash_idx]); 585 &newtbl->hash_buckets[hash_idx]);
478 return 0; 586 return 0;
@@ -481,15 +589,25 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
481int mesh_pathtbl_init(void) 589int mesh_pathtbl_init(void)
482{ 590{
483 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 591 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
592 if (!mesh_paths)
593 return -ENOMEM;
484 mesh_paths->free_node = &mesh_path_node_free; 594 mesh_paths->free_node = &mesh_path_node_free;
485 mesh_paths->copy_node = &mesh_path_node_copy; 595 mesh_paths->copy_node = &mesh_path_node_copy;
486 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; 596 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
487 if (!mesh_paths) 597
598 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
599 if (!mpp_paths) {
600 mesh_table_free(mesh_paths, true);
488 return -ENOMEM; 601 return -ENOMEM;
602 }
603 mpp_paths->free_node = &mesh_path_node_free;
604 mpp_paths->copy_node = &mesh_path_node_copy;
605 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
606
489 return 0; 607 return 0;
490} 608}
491 609
492void mesh_path_expire(struct net_device *dev) 610void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
493{ 611{
494 struct mesh_path *mpath; 612 struct mesh_path *mpath;
495 struct mpath_node *node; 613 struct mpath_node *node;
@@ -498,7 +616,7 @@ void mesh_path_expire(struct net_device *dev)
498 616
499 read_lock(&pathtbl_resize_lock); 617 read_lock(&pathtbl_resize_lock);
500 for_each_mesh_entry(mesh_paths, p, node, i) { 618 for_each_mesh_entry(mesh_paths, p, node, i) {
501 if (node->mpath->dev != dev) 619 if (node->mpath->sdata != sdata)
502 continue; 620 continue;
503 mpath = node->mpath; 621 mpath = node->mpath;
504 spin_lock_bh(&mpath->state_lock); 622 spin_lock_bh(&mpath->state_lock);
@@ -507,7 +625,7 @@ void mesh_path_expire(struct net_device *dev)
507 time_after(jiffies, 625 time_after(jiffies,
508 mpath->exp_time + MESH_PATH_EXPIRE)) { 626 mpath->exp_time + MESH_PATH_EXPIRE)) {
509 spin_unlock_bh(&mpath->state_lock); 627 spin_unlock_bh(&mpath->state_lock);
510 mesh_path_del(mpath->dst, mpath->dev); 628 mesh_path_del(mpath->dst, mpath->sdata);
511 } else 629 } else
512 spin_unlock_bh(&mpath->state_lock); 630 spin_unlock_bh(&mpath->state_lock);
513 } 631 }
@@ -517,4 +635,5 @@ void mesh_path_expire(struct net_device *dev)
517void mesh_pathtbl_unregister(void) 635void mesh_pathtbl_unregister(void)
518{ 636{
519 mesh_table_free(mesh_paths, true); 637 mesh_table_free(mesh_paths, true);
638 mesh_table_free(mpp_paths, true);
520} 639}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 9efeb1f07025..faac101c0f85 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -36,11 +36,11 @@
36#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9 36#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9
37#define MESH_SECURITY_FAILED_VERIFICATION 10 37#define MESH_SECURITY_FAILED_VERIFICATION 10
38 38
39#define dot11MeshMaxRetries(s) (s->u.sta.mshcfg.dot11MeshMaxRetries) 39#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
40#define dot11MeshRetryTimeout(s) (s->u.sta.mshcfg.dot11MeshRetryTimeout) 40#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
41#define dot11MeshConfirmTimeout(s) (s->u.sta.mshcfg.dot11MeshConfirmTimeout) 41#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
42#define dot11MeshHoldingTimeout(s) (s->u.sta.mshcfg.dot11MeshHoldingTimeout) 42#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
43#define dot11MeshMaxPeerLinks(s) (s->u.sta.mshcfg.dot11MeshMaxPeerLinks) 43#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
44 44
45enum plink_frame_type { 45enum plink_frame_type {
46 PLINK_OPEN = 0, 46 PLINK_OPEN = 0,
@@ -63,14 +63,14 @@ enum plink_event {
63static inline 63static inline
64void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) 64void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
65{ 65{
66 atomic_inc(&sdata->u.sta.mshstats.estab_plinks); 66 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
67 mesh_accept_plinks_update(sdata); 67 mesh_accept_plinks_update(sdata);
68} 68}
69 69
70static inline 70static inline
71void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) 71void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
72{ 72{
73 atomic_dec(&sdata->u.sta.mshstats.estab_plinks); 73 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
74 mesh_accept_plinks_update(sdata); 74 mesh_accept_plinks_update(sdata);
75} 75}
76 76
@@ -106,7 +106,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
106 return NULL; 106 return NULL;
107 107
108 sta->flags = WLAN_STA_AUTHORIZED; 108 sta->flags = WLAN_STA_AUTHORIZED;
109 sta->supp_rates[local->hw.conf.channel->band] = rates; 109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
110 110
111 return sta; 111 return sta;
112} 112}
@@ -144,10 +144,10 @@ void mesh_plink_deactivate(struct sta_info *sta)
144 spin_unlock_bh(&sta->lock); 144 spin_unlock_bh(&sta->lock);
145} 145}
146 146
147static int mesh_plink_frame_tx(struct net_device *dev, 147static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
148 enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid, 148 enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid,
149 __le16 reason) { 149 __le16 reason) {
150 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 150 struct ieee80211_local *local = sdata->local;
151 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); 151 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400);
152 struct ieee80211_mgmt *mgmt; 152 struct ieee80211_mgmt *mgmt;
153 bool include_plid = false; 153 bool include_plid = false;
@@ -163,10 +163,10 @@ static int mesh_plink_frame_tx(struct net_device *dev,
163 mgmt = (struct ieee80211_mgmt *) 163 mgmt = (struct ieee80211_mgmt *)
164 skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action)); 164 skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action));
165 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action)); 165 memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action));
166 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 166 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
167 IEEE80211_STYPE_ACTION); 167 IEEE80211_STYPE_ACTION);
168 memcpy(mgmt->da, da, ETH_ALEN); 168 memcpy(mgmt->da, da, ETH_ALEN);
169 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 169 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
170 /* BSSID is left zeroed, wildcard value */ 170 /* BSSID is left zeroed, wildcard value */
171 mgmt->u.action.category = PLINK_CATEGORY; 171 mgmt->u.action.category = PLINK_CATEGORY;
172 mgmt->u.action.u.plink_action.action_code = action; 172 mgmt->u.action.u.plink_action.action_code = action;
@@ -180,7 +180,7 @@ static int mesh_plink_frame_tx(struct net_device *dev,
180 /* two-byte status code followed by two-byte AID */ 180 /* two-byte status code followed by two-byte AID */
181 memset(pos, 0, 4); 181 memset(pos, 0, 4);
182 } 182 }
183 mesh_mgmt_ies_add(skb, dev); 183 mesh_mgmt_ies_add(skb, sdata);
184 } 184 }
185 185
186 /* Add Peer Link Management element */ 186 /* Add Peer Link Management element */
@@ -217,15 +217,14 @@ static int mesh_plink_frame_tx(struct net_device *dev,
217 memcpy(pos, &reason, 2); 217 memcpy(pos, &reason, 2);
218 } 218 }
219 219
220 ieee80211_sta_tx(dev, skb, 0); 220 ieee80211_tx_skb(sdata, skb, 0);
221 return 0; 221 return 0;
222} 222}
223 223
224void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev, 224void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct ieee80211_sub_if_data *sdata,
225 bool peer_accepting_plinks) 225 bool peer_accepting_plinks)
226{ 226{
227 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 227 struct ieee80211_local *local = sdata->local;
228 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
229 struct sta_info *sta; 228 struct sta_info *sta;
230 229
231 rcu_read_lock(); 230 rcu_read_lock();
@@ -244,10 +243,10 @@ void mesh_neighbour_update(u8 *hw_addr, u64 rates, struct net_device *dev,
244 } 243 }
245 244
246 sta->last_rx = jiffies; 245 sta->last_rx = jiffies;
247 sta->supp_rates[local->hw.conf.channel->band] = rates; 246 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
248 if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && 247 if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN &&
249 sdata->u.sta.accepting_plinks && 248 sdata->u.mesh.accepting_plinks &&
250 sdata->u.sta.mshcfg.auto_open_plinks) 249 sdata->u.mesh.mshcfg.auto_open_plinks)
251 mesh_plink_open(sta); 250 mesh_plink_open(sta);
252 251
253 rcu_read_unlock(); 252 rcu_read_unlock();
@@ -257,7 +256,6 @@ static void mesh_plink_timer(unsigned long data)
257{ 256{
258 struct sta_info *sta; 257 struct sta_info *sta;
259 __le16 llid, plid, reason; 258 __le16 llid, plid, reason;
260 struct net_device *dev = NULL;
261 struct ieee80211_sub_if_data *sdata; 259 struct ieee80211_sub_if_data *sdata;
262#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG 260#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
263 DECLARE_MAC_BUF(mac); 261 DECLARE_MAC_BUF(mac);
@@ -277,12 +275,11 @@ static void mesh_plink_timer(unsigned long data)
277 return; 275 return;
278 } 276 }
279 mpl_dbg("Mesh plink timer for %s fired on state %d\n", 277 mpl_dbg("Mesh plink timer for %s fired on state %d\n",
280 print_mac(mac, sta->addr), sta->plink_state); 278 print_mac(mac, sta->sta.addr), sta->plink_state);
281 reason = 0; 279 reason = 0;
282 llid = sta->llid; 280 llid = sta->llid;
283 plid = sta->plid; 281 plid = sta->plid;
284 sdata = sta->sdata; 282 sdata = sta->sdata;
285 dev = sdata->dev;
286 283
287 switch (sta->plink_state) { 284 switch (sta->plink_state) {
288 case PLINK_OPN_RCVD: 285 case PLINK_OPN_RCVD:
@@ -291,7 +288,7 @@ static void mesh_plink_timer(unsigned long data)
291 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { 288 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
292 u32 rand; 289 u32 rand;
293 mpl_dbg("Mesh plink for %s (retry, timeout): %d %d\n", 290 mpl_dbg("Mesh plink for %s (retry, timeout): %d %d\n",
294 print_mac(mac, sta->addr), 291 print_mac(mac, sta->sta.addr),
295 sta->plink_retries, sta->plink_timeout); 292 sta->plink_retries, sta->plink_timeout);
296 get_random_bytes(&rand, sizeof(u32)); 293 get_random_bytes(&rand, sizeof(u32));
297 sta->plink_timeout = sta->plink_timeout + 294 sta->plink_timeout = sta->plink_timeout +
@@ -299,7 +296,7 @@ static void mesh_plink_timer(unsigned long data)
299 ++sta->plink_retries; 296 ++sta->plink_retries;
300 mod_plink_timer(sta, sta->plink_timeout); 297 mod_plink_timer(sta, sta->plink_timeout);
301 spin_unlock_bh(&sta->lock); 298 spin_unlock_bh(&sta->lock);
302 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 299 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
303 0, 0); 300 0, 0);
304 break; 301 break;
305 } 302 }
@@ -312,7 +309,7 @@ static void mesh_plink_timer(unsigned long data)
312 sta->plink_state = PLINK_HOLDING; 309 sta->plink_state = PLINK_HOLDING;
313 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 310 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
314 spin_unlock_bh(&sta->lock); 311 spin_unlock_bh(&sta->lock);
315 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, plid, 312 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid,
316 reason); 313 reason);
317 break; 314 break;
318 case PLINK_HOLDING: 315 case PLINK_HOLDING:
@@ -355,10 +352,10 @@ int mesh_plink_open(struct sta_info *sta)
355 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 352 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
356 spin_unlock_bh(&sta->lock); 353 spin_unlock_bh(&sta->lock);
357 mpl_dbg("Mesh plink: starting establishment with %s\n", 354 mpl_dbg("Mesh plink: starting establishment with %s\n",
358 print_mac(mac, sta->addr)); 355 print_mac(mac, sta->sta.addr));
359 356
360 return mesh_plink_frame_tx(sdata->dev, PLINK_OPEN, 357 return mesh_plink_frame_tx(sdata, PLINK_OPEN,
361 sta->addr, llid, 0, 0); 358 sta->sta.addr, llid, 0, 0);
362} 359}
363 360
364void mesh_plink_block(struct sta_info *sta) 361void mesh_plink_block(struct sta_info *sta)
@@ -382,7 +379,7 @@ int mesh_plink_close(struct sta_info *sta)
382#endif 379#endif
383 380
384 mpl_dbg("Mesh plink: closing link with %s\n", 381 mpl_dbg("Mesh plink: closing link with %s\n",
385 print_mac(mac, sta->addr)); 382 print_mac(mac, sta->sta.addr));
386 spin_lock_bh(&sta->lock); 383 spin_lock_bh(&sta->lock);
387 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED); 384 sta->reason = cpu_to_le16(MESH_LINK_CANCELLED);
388 reason = sta->reason; 385 reason = sta->reason;
@@ -403,15 +400,14 @@ int mesh_plink_close(struct sta_info *sta)
403 llid = sta->llid; 400 llid = sta->llid;
404 plid = sta->plid; 401 plid = sta->plid;
405 spin_unlock_bh(&sta->lock); 402 spin_unlock_bh(&sta->lock);
406 mesh_plink_frame_tx(sta->sdata->dev, PLINK_CLOSE, sta->addr, llid, 403 mesh_plink_frame_tx(sta->sdata, PLINK_CLOSE, sta->sta.addr, llid,
407 plid, reason); 404 plid, reason);
408 return 0; 405 return 0;
409} 406}
410 407
411void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt, 408void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
412 size_t len, struct ieee80211_rx_status *rx_status) 409 size_t len, struct ieee80211_rx_status *rx_status)
413{ 410{
414 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
415 struct ieee80211_local *local = sdata->local; 411 struct ieee80211_local *local = sdata->local;
416 struct ieee802_11_elems elems; 412 struct ieee802_11_elems elems;
417 struct sta_info *sta; 413 struct sta_info *sta;
@@ -425,6 +421,10 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
425 DECLARE_MAC_BUF(mac); 421 DECLARE_MAC_BUF(mac);
426#endif 422#endif
427 423
424 /* need action_code, aux */
425 if (len < IEEE80211_MIN_ACTION_SIZE + 3)
426 return;
427
428 if (is_multicast_ether_addr(mgmt->da)) { 428 if (is_multicast_ether_addr(mgmt->da)) {
429 mpl_dbg("Mesh plink: ignore frame from multicast address"); 429 mpl_dbg("Mesh plink: ignore frame from multicast address");
430 return; 430 return;
@@ -478,7 +478,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
478 478
479 /* Now we will figure out the appropriate event... */ 479 /* Now we will figure out the appropriate event... */
480 event = PLINK_UNDEFINED; 480 event = PLINK_UNDEFINED;
481 if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, dev))) { 481 if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
482 switch (ftype) { 482 switch (ftype) {
483 case PLINK_OPEN: 483 case PLINK_OPEN:
484 event = OPN_RJCT; 484 event = OPN_RJCT;
@@ -577,9 +577,9 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
577 sta->llid = llid; 577 sta->llid = llid;
578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 578 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
579 spin_unlock_bh(&sta->lock); 579 spin_unlock_bh(&sta->lock);
580 mesh_plink_frame_tx(dev, PLINK_OPEN, sta->addr, llid, 580 mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
581 0, 0); 581 0, 0);
582 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, 582 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr,
583 llid, plid, 0); 583 llid, plid, 0);
584 break; 584 break;
585 default: 585 default:
@@ -604,7 +604,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
604 604
605 llid = sta->llid; 605 llid = sta->llid;
606 spin_unlock_bh(&sta->lock); 606 spin_unlock_bh(&sta->lock);
607 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 607 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
608 plid, reason); 608 plid, reason);
609 break; 609 break;
610 case OPN_ACPT: 610 case OPN_ACPT:
@@ -613,7 +613,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
613 sta->plid = plid; 613 sta->plid = plid;
614 llid = sta->llid; 614 llid = sta->llid;
615 spin_unlock_bh(&sta->lock); 615 spin_unlock_bh(&sta->lock);
616 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 616 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
617 plid, 0); 617 plid, 0);
618 break; 618 break;
619 case CNF_ACPT: 619 case CNF_ACPT:
@@ -646,13 +646,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
646 646
647 llid = sta->llid; 647 llid = sta->llid;
648 spin_unlock_bh(&sta->lock); 648 spin_unlock_bh(&sta->lock);
649 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 649 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
650 plid, reason); 650 plid, reason);
651 break; 651 break;
652 case OPN_ACPT: 652 case OPN_ACPT:
653 llid = sta->llid; 653 llid = sta->llid;
654 spin_unlock_bh(&sta->lock); 654 spin_unlock_bh(&sta->lock);
655 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 655 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
656 plid, 0); 656 plid, 0);
657 break; 657 break;
658 case CNF_ACPT: 658 case CNF_ACPT:
@@ -661,7 +661,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
661 mesh_plink_inc_estab_count(sdata); 661 mesh_plink_inc_estab_count(sdata);
662 spin_unlock_bh(&sta->lock); 662 spin_unlock_bh(&sta->lock);
663 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 663 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
664 print_mac(mac, sta->addr)); 664 print_mac(mac, sta->sta.addr));
665 break; 665 break;
666 default: 666 default:
667 spin_unlock_bh(&sta->lock); 667 spin_unlock_bh(&sta->lock);
@@ -685,7 +685,7 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
685 685
686 llid = sta->llid; 686 llid = sta->llid;
687 spin_unlock_bh(&sta->lock); 687 spin_unlock_bh(&sta->lock);
688 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 688 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
689 plid, reason); 689 plid, reason);
690 break; 690 break;
691 case OPN_ACPT: 691 case OPN_ACPT:
@@ -694,8 +694,8 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
694 mesh_plink_inc_estab_count(sdata); 694 mesh_plink_inc_estab_count(sdata);
695 spin_unlock_bh(&sta->lock); 695 spin_unlock_bh(&sta->lock);
696 mpl_dbg("Mesh plink with %s ESTABLISHED\n", 696 mpl_dbg("Mesh plink with %s ESTABLISHED\n",
697 print_mac(mac, sta->addr)); 697 print_mac(mac, sta->sta.addr));
698 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 698 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
699 plid, 0); 699 plid, 0);
700 break; 700 break;
701 default: 701 default:
@@ -714,13 +714,13 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
714 llid = sta->llid; 714 llid = sta->llid;
715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 715 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
716 spin_unlock_bh(&sta->lock); 716 spin_unlock_bh(&sta->lock);
717 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 717 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
718 plid, reason); 718 plid, reason);
719 break; 719 break;
720 case OPN_ACPT: 720 case OPN_ACPT:
721 llid = sta->llid; 721 llid = sta->llid;
722 spin_unlock_bh(&sta->lock); 722 spin_unlock_bh(&sta->lock);
723 mesh_plink_frame_tx(dev, PLINK_CONFIRM, sta->addr, llid, 723 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
724 plid, 0); 724 plid, 0);
725 break; 725 break;
726 default: 726 default:
@@ -743,8 +743,8 @@ void mesh_rx_plink_frame(struct net_device *dev, struct ieee80211_mgmt *mgmt,
743 llid = sta->llid; 743 llid = sta->llid;
744 reason = sta->reason; 744 reason = sta->reason;
745 spin_unlock_bh(&sta->lock); 745 spin_unlock_bh(&sta->lock);
746 mesh_plink_frame_tx(dev, PLINK_CLOSE, sta->addr, llid, 746 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr,
747 plid, reason); 747 llid, plid, reason);
748 break; 748 break;
749 default: 749 default:
750 spin_unlock_bh(&sta->lock); 750 spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 902cac1bd246..e859a0ab6162 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -11,11 +11,6 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14/* TODO:
15 * order BSS list by RSSI(?) ("quality of AP")
16 * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE,
17 * SSID)
18 */
19#include <linux/delay.h> 14#include <linux/delay.h>
20#include <linux/if_ether.h> 15#include <linux/if_ether.h>
21#include <linux/skbuff.h> 16#include <linux/skbuff.h>
@@ -26,607 +21,184 @@
26#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
27#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
28#include <net/iw_handler.h> 23#include <net/iw_handler.h>
29#include <asm/types.h>
30
31#include <net/mac80211.h> 24#include <net/mac80211.h>
25#include <asm/unaligned.h>
26
32#include "ieee80211_i.h" 27#include "ieee80211_i.h"
33#include "rate.h" 28#include "rate.h"
34#include "led.h" 29#include "led.h"
35#include "mesh.h"
36 30
31#define IEEE80211_ASSOC_SCANS_MAX_TRIES 2
37#define IEEE80211_AUTH_TIMEOUT (HZ / 5) 32#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
38#define IEEE80211_AUTH_MAX_TRIES 3 33#define IEEE80211_AUTH_MAX_TRIES 3
39#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 34#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
40#define IEEE80211_ASSOC_MAX_TRIES 3 35#define IEEE80211_ASSOC_MAX_TRIES 3
41#define IEEE80211_MONITORING_INTERVAL (2 * HZ) 36#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
42#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
43#define IEEE80211_PROBE_INTERVAL (60 * HZ) 37#define IEEE80211_PROBE_INTERVAL (60 * HZ)
44#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) 38#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
45#define IEEE80211_SCAN_INTERVAL (2 * HZ) 39#define IEEE80211_SCAN_INTERVAL (2 * HZ)
46#define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) 40#define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ)
47#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) 41#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ)
48 42
49#define IEEE80211_PROBE_DELAY (HZ / 33)
50#define IEEE80211_CHANNEL_TIME (HZ / 33)
51#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5)
52#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
53#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ) 43#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
54#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ) 44#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
55#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
56 45
57#define IEEE80211_IBSS_MAX_STA_ENTRIES 128 46#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
58 47
59 48
60#define ERP_INFO_USE_PROTECTION BIT(1) 49/* utils */
61
62/* mgmt header + 1 byte action code */
63#define IEEE80211_MIN_ACTION_SIZE (24 + 1)
64
65#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
66#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
67#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0
68#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
69#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
70
71/* next values represent the buffer size for A-MPDU frame.
72 * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2) */
73#define IEEE80211_MIN_AMPDU_BUF 0x8
74#define IEEE80211_MAX_AMPDU_BUF 0x40
75
76static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst,
77 u8 *ssid, size_t ssid_len);
78static struct ieee80211_sta_bss *
79ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
80 u8 *ssid, u8 ssid_len);
81static void ieee80211_rx_bss_put(struct ieee80211_local *local,
82 struct ieee80211_sta_bss *bss);
83static int ieee80211_sta_find_ibss(struct net_device *dev,
84 struct ieee80211_if_sta *ifsta);
85static int ieee80211_sta_wep_configured(struct net_device *dev);
86static int ieee80211_sta_start_scan(struct net_device *dev,
87 u8 *ssid, size_t ssid_len);
88static int ieee80211_sta_config_auth(struct net_device *dev,
89 struct ieee80211_if_sta *ifsta);
90static void sta_rx_agg_session_timer_expired(unsigned long data);
91
92
93void ieee802_11_parse_elems(u8 *start, size_t len,
94 struct ieee802_11_elems *elems)
95{
96 size_t left = len;
97 u8 *pos = start;
98
99 memset(elems, 0, sizeof(*elems));
100
101 while (left >= 2) {
102 u8 id, elen;
103
104 id = *pos++;
105 elen = *pos++;
106 left -= 2;
107
108 if (elen > left)
109 return;
110
111 switch (id) {
112 case WLAN_EID_SSID:
113 elems->ssid = pos;
114 elems->ssid_len = elen;
115 break;
116 case WLAN_EID_SUPP_RATES:
117 elems->supp_rates = pos;
118 elems->supp_rates_len = elen;
119 break;
120 case WLAN_EID_FH_PARAMS:
121 elems->fh_params = pos;
122 elems->fh_params_len = elen;
123 break;
124 case WLAN_EID_DS_PARAMS:
125 elems->ds_params = pos;
126 elems->ds_params_len = elen;
127 break;
128 case WLAN_EID_CF_PARAMS:
129 elems->cf_params = pos;
130 elems->cf_params_len = elen;
131 break;
132 case WLAN_EID_TIM:
133 elems->tim = pos;
134 elems->tim_len = elen;
135 break;
136 case WLAN_EID_IBSS_PARAMS:
137 elems->ibss_params = pos;
138 elems->ibss_params_len = elen;
139 break;
140 case WLAN_EID_CHALLENGE:
141 elems->challenge = pos;
142 elems->challenge_len = elen;
143 break;
144 case WLAN_EID_WPA:
145 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
146 pos[2] == 0xf2) {
147 /* Microsoft OUI (00:50:F2) */
148 if (pos[3] == 1) {
149 /* OUI Type 1 - WPA IE */
150 elems->wpa = pos;
151 elems->wpa_len = elen;
152 } else if (elen >= 5 && pos[3] == 2) {
153 if (pos[4] == 0) {
154 elems->wmm_info = pos;
155 elems->wmm_info_len = elen;
156 } else if (pos[4] == 1) {
157 elems->wmm_param = pos;
158 elems->wmm_param_len = elen;
159 }
160 }
161 }
162 break;
163 case WLAN_EID_RSN:
164 elems->rsn = pos;
165 elems->rsn_len = elen;
166 break;
167 case WLAN_EID_ERP_INFO:
168 elems->erp_info = pos;
169 elems->erp_info_len = elen;
170 break;
171 case WLAN_EID_EXT_SUPP_RATES:
172 elems->ext_supp_rates = pos;
173 elems->ext_supp_rates_len = elen;
174 break;
175 case WLAN_EID_HT_CAPABILITY:
176 elems->ht_cap_elem = pos;
177 elems->ht_cap_elem_len = elen;
178 break;
179 case WLAN_EID_HT_EXTRA_INFO:
180 elems->ht_info_elem = pos;
181 elems->ht_info_elem_len = elen;
182 break;
183 case WLAN_EID_MESH_ID:
184 elems->mesh_id = pos;
185 elems->mesh_id_len = elen;
186 break;
187 case WLAN_EID_MESH_CONFIG:
188 elems->mesh_config = pos;
189 elems->mesh_config_len = elen;
190 break;
191 case WLAN_EID_PEER_LINK:
192 elems->peer_link = pos;
193 elems->peer_link_len = elen;
194 break;
195 case WLAN_EID_PREQ:
196 elems->preq = pos;
197 elems->preq_len = elen;
198 break;
199 case WLAN_EID_PREP:
200 elems->prep = pos;
201 elems->prep_len = elen;
202 break;
203 case WLAN_EID_PERR:
204 elems->perr = pos;
205 elems->perr_len = elen;
206 break;
207 case WLAN_EID_CHANNEL_SWITCH:
208 elems->ch_switch_elem = pos;
209 elems->ch_switch_elem_len = elen;
210 break;
211 case WLAN_EID_QUIET:
212 if (!elems->quiet_elem) {
213 elems->quiet_elem = pos;
214 elems->quiet_elem_len = elen;
215 }
216 elems->num_of_quiet_elem++;
217 break;
218 case WLAN_EID_COUNTRY:
219 elems->country_elem = pos;
220 elems->country_elem_len = elen;
221 break;
222 case WLAN_EID_PWR_CONSTRAINT:
223 elems->pwr_constr_elem = pos;
224 elems->pwr_constr_elem_len = elen;
225 break;
226 default:
227 break;
228 }
229
230 left -= elen;
231 pos += elen;
232 }
233}
234
235
236static int ecw2cw(int ecw) 50static int ecw2cw(int ecw)
237{ 51{
238 return (1 << ecw) - 1; 52 return (1 << ecw) - 1;
239} 53}
240 54
241 55static u8 *ieee80211_bss_get_ie(struct ieee80211_bss *bss, u8 ie)
242static void ieee80211_sta_def_wmm_params(struct net_device *dev,
243 struct ieee80211_sta_bss *bss,
244 int ibss)
245{ 56{
246 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 57 u8 *end, *pos;
247 struct ieee80211_local *local = sdata->local;
248 int i, have_higher_than_11mbit = 0;
249
250 58
251 /* cf. IEEE 802.11 9.2.12 */ 59 pos = bss->ies;
252 for (i = 0; i < bss->supp_rates_len; i++) 60 if (pos == NULL)
253 if ((bss->supp_rates[i] & 0x7f) * 5 > 110) 61 return NULL;
254 have_higher_than_11mbit = 1; 62 end = pos + bss->ies_len;
255
256 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
257 have_higher_than_11mbit)
258 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
259 else
260 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
261
262
263 if (local->ops->conf_tx) {
264 struct ieee80211_tx_queue_params qparam;
265
266 memset(&qparam, 0, sizeof(qparam));
267
268 qparam.aifs = 2;
269
270 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
271 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE))
272 qparam.cw_min = 31;
273 else
274 qparam.cw_min = 15;
275
276 qparam.cw_max = 1023;
277 qparam.txop = 0;
278
279 for (i = 0; i < local_to_hw(local)->queues; i++)
280 local->ops->conf_tx(local_to_hw(local), i, &qparam);
281 }
282}
283
284static void ieee80211_sta_wmm_params(struct net_device *dev,
285 struct ieee80211_if_sta *ifsta,
286 u8 *wmm_param, size_t wmm_param_len)
287{
288 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
289 struct ieee80211_tx_queue_params params;
290 size_t left;
291 int count;
292 u8 *pos;
293
294 if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
295 return;
296
297 if (!wmm_param)
298 return;
299
300 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
301 return;
302 count = wmm_param[6] & 0x0f;
303 if (count == ifsta->wmm_last_param_set)
304 return;
305 ifsta->wmm_last_param_set = count;
306
307 pos = wmm_param + 8;
308 left = wmm_param_len - 8;
309
310 memset(&params, 0, sizeof(params));
311
312 if (!local->ops->conf_tx)
313 return;
314
315 local->wmm_acm = 0;
316 for (; left >= 4; left -= 4, pos += 4) {
317 int aci = (pos[0] >> 5) & 0x03;
318 int acm = (pos[0] >> 4) & 0x01;
319 int queue;
320 63
321 switch (aci) { 64 while (pos + 1 < end) {
322 case 1: 65 if (pos + 2 + pos[1] > end)
323 queue = 3;
324 if (acm)
325 local->wmm_acm |= BIT(0) | BIT(3);
326 break;
327 case 2:
328 queue = 1;
329 if (acm)
330 local->wmm_acm |= BIT(4) | BIT(5);
331 break;
332 case 3:
333 queue = 0;
334 if (acm)
335 local->wmm_acm |= BIT(6) | BIT(7);
336 break; 66 break;
337 case 0: 67 if (pos[0] == ie)
338 default: 68 return pos;
339 queue = 2; 69 pos += 2 + pos[1];
340 if (acm)
341 local->wmm_acm |= BIT(1) | BIT(2);
342 break;
343 }
344
345 params.aifs = pos[0] & 0x0f;
346 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
347 params.cw_min = ecw2cw(pos[1] & 0x0f);
348 params.txop = get_unaligned_le16(pos + 2);
349#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
350 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
351 "cWmin=%d cWmax=%d txop=%d\n",
352 dev->name, queue, aci, acm, params.aifs, params.cw_min,
353 params.cw_max, params.txop);
354#endif
355 /* TODO: handle ACM (block TX, fallback to next lowest allowed
356 * AC for now) */
357 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) {
358 printk(KERN_DEBUG "%s: failed to set TX queue "
359 "parameters for queue %d\n", dev->name, queue);
360 }
361 }
362}
363
364static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
365 bool use_protection,
366 bool use_short_preamble)
367{
368 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf;
369#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
370 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
371 DECLARE_MAC_BUF(mac);
372#endif
373 u32 changed = 0;
374
375 if (use_protection != bss_conf->use_cts_prot) {
376#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
377 if (net_ratelimit()) {
378 printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
379 "%s)\n",
380 sdata->dev->name,
381 use_protection ? "enabled" : "disabled",
382 print_mac(mac, ifsta->bssid));
383 }
384#endif
385 bss_conf->use_cts_prot = use_protection;
386 changed |= BSS_CHANGED_ERP_CTS_PROT;
387 } 70 }
388 71
389 if (use_short_preamble != bss_conf->use_short_preamble) { 72 return NULL;
390#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
391 if (net_ratelimit()) {
392 printk(KERN_DEBUG "%s: switched to %s barker preamble"
393 " (BSSID=%s)\n",
394 sdata->dev->name,
395 use_short_preamble ? "short" : "long",
396 print_mac(mac, ifsta->bssid));
397 }
398#endif
399 bss_conf->use_short_preamble = use_short_preamble;
400 changed |= BSS_CHANGED_ERP_PREAMBLE;
401 }
402
403 return changed;
404} 73}
405 74
406static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata, 75static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
407 u8 erp_value) 76 struct ieee80211_supported_band *sband,
408{ 77 u64 *rates)
409 bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
410 bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0;
411
412 return ieee80211_handle_protect_preamb(sdata,
413 use_protection, use_short_preamble);
414}
415
416static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
417 struct ieee80211_sta_bss *bss)
418{ 78{
419 u32 changed = 0; 79 int i, j, count;
80 *rates = 0;
81 count = 0;
82 for (i = 0; i < bss->supp_rates_len; i++) {
83 int rate = (bss->supp_rates[i] & 0x7F) * 5;
420 84
421 if (bss->has_erp_value) 85 for (j = 0; j < sband->n_bitrates; j++)
422 changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value); 86 if (sband->bitrates[j].bitrate == rate) {
423 else { 87 *rates |= BIT(j);
424 u16 capab = bss->capability; 88 count++;
425 changed |= ieee80211_handle_protect_preamb(sdata, false, 89 break;
426 (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0); 90 }
427 } 91 }
428 92
429 return changed; 93 return count;
430}
431
432int ieee80211_ht_cap_ie_to_ht_info(struct ieee80211_ht_cap *ht_cap_ie,
433 struct ieee80211_ht_info *ht_info)
434{
435
436 if (ht_info == NULL)
437 return -EINVAL;
438
439 memset(ht_info, 0, sizeof(*ht_info));
440
441 if (ht_cap_ie) {
442 u8 ampdu_info = ht_cap_ie->ampdu_params_info;
443
444 ht_info->ht_supported = 1;
445 ht_info->cap = le16_to_cpu(ht_cap_ie->cap_info);
446 ht_info->ampdu_factor =
447 ampdu_info & IEEE80211_HT_CAP_AMPDU_FACTOR;
448 ht_info->ampdu_density =
449 (ampdu_info & IEEE80211_HT_CAP_AMPDU_DENSITY) >> 2;
450 memcpy(ht_info->supp_mcs_set, ht_cap_ie->supp_mcs_set, 16);
451 } else
452 ht_info->ht_supported = 0;
453
454 return 0;
455} 94}
456 95
457int ieee80211_ht_addt_info_ie_to_ht_bss_info( 96/* also used by mesh code */
458 struct ieee80211_ht_addt_info *ht_add_info_ie, 97u64 ieee80211_sta_get_rates(struct ieee80211_local *local,
459 struct ieee80211_ht_bss_info *bss_info) 98 struct ieee802_11_elems *elems,
99 enum ieee80211_band band)
460{ 100{
461 if (bss_info == NULL) 101 struct ieee80211_supported_band *sband;
462 return -EINVAL; 102 struct ieee80211_rate *bitrates;
463 103 size_t num_rates;
464 memset(bss_info, 0, sizeof(*bss_info)); 104 u64 supp_rates;
465 105 int i, j;
466 if (ht_add_info_ie) { 106 sband = local->hw.wiphy->bands[band];
467 u16 op_mode;
468 op_mode = le16_to_cpu(ht_add_info_ie->operation_mode);
469 107
470 bss_info->primary_channel = ht_add_info_ie->control_chan; 108 if (!sband) {
471 bss_info->bss_cap = ht_add_info_ie->ht_param; 109 WARN_ON(1);
472 bss_info->bss_op_mode = (u8)(op_mode & 0xff); 110 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
473 } 111 }
474 112
475 return 0; 113 bitrates = sband->bitrates;
114 num_rates = sband->n_bitrates;
115 supp_rates = 0;
116 for (i = 0; i < elems->supp_rates_len +
117 elems->ext_supp_rates_len; i++) {
118 u8 rate = 0;
119 int own_rate;
120 if (i < elems->supp_rates_len)
121 rate = elems->supp_rates[i];
122 else if (elems->ext_supp_rates)
123 rate = elems->ext_supp_rates
124 [i - elems->supp_rates_len];
125 own_rate = 5 * (rate & 0x7f);
126 for (j = 0; j < num_rates; j++)
127 if (bitrates[j].bitrate == own_rate)
128 supp_rates |= BIT(j);
129 }
130 return supp_rates;
476} 131}
477 132
478static void ieee80211_sta_send_associnfo(struct net_device *dev, 133/* frame sending functions */
479 struct ieee80211_if_sta *ifsta) 134
135/* also used by scanning code */
136void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
137 u8 *ssid, size_t ssid_len)
480{ 138{
481 char *buf; 139 struct ieee80211_local *local = sdata->local;
482 size_t len; 140 struct ieee80211_supported_band *sband;
141 struct sk_buff *skb;
142 struct ieee80211_mgmt *mgmt;
143 u8 *pos, *supp_rates, *esupp_rates = NULL;
483 int i; 144 int i;
484 union iwreq_data wrqu;
485
486 if (!ifsta->assocreq_ies && !ifsta->assocresp_ies)
487 return;
488 145
489 buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len + 146 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200);
490 ifsta->assocresp_ies_len), GFP_KERNEL); 147 if (!skb) {
491 if (!buf) 148 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
149 "request\n", sdata->dev->name);
492 return; 150 return;
493
494 len = sprintf(buf, "ASSOCINFO(");
495 if (ifsta->assocreq_ies) {
496 len += sprintf(buf + len, "ReqIEs=");
497 for (i = 0; i < ifsta->assocreq_ies_len; i++) {
498 len += sprintf(buf + len, "%02x",
499 ifsta->assocreq_ies[i]);
500 }
501 } 151 }
502 if (ifsta->assocresp_ies) { 152 skb_reserve(skb, local->hw.extra_tx_headroom);
503 if (ifsta->assocreq_ies)
504 len += sprintf(buf + len, " ");
505 len += sprintf(buf + len, "RespIEs=");
506 for (i = 0; i < ifsta->assocresp_ies_len; i++) {
507 len += sprintf(buf + len, "%02x",
508 ifsta->assocresp_ies[i]);
509 }
510 }
511 len += sprintf(buf + len, ")");
512 153
513 if (len > IW_CUSTOM_MAX) { 154 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
514 len = sprintf(buf, "ASSOCRESPIE="); 155 memset(mgmt, 0, 24);
515 for (i = 0; i < ifsta->assocresp_ies_len; i++) { 156 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
516 len += sprintf(buf + len, "%02x", 157 IEEE80211_STYPE_PROBE_REQ);
517 ifsta->assocresp_ies[i]); 158 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
518 } 159 if (dst) {
160 memcpy(mgmt->da, dst, ETH_ALEN);
161 memcpy(mgmt->bssid, dst, ETH_ALEN);
162 } else {
163 memset(mgmt->da, 0xff, ETH_ALEN);
164 memset(mgmt->bssid, 0xff, ETH_ALEN);
519 } 165 }
166 pos = skb_put(skb, 2 + ssid_len);
167 *pos++ = WLAN_EID_SSID;
168 *pos++ = ssid_len;
169 memcpy(pos, ssid, ssid_len);
520 170
521 memset(&wrqu, 0, sizeof(wrqu)); 171 supp_rates = skb_put(skb, 2);
522 wrqu.data.length = len; 172 supp_rates[0] = WLAN_EID_SUPP_RATES;
523 wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); 173 supp_rates[1] = 0;
524 174 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
525 kfree(buf);
526}
527
528
529static void ieee80211_set_associated(struct net_device *dev,
530 struct ieee80211_if_sta *ifsta,
531 bool assoc)
532{
533 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
534 struct ieee80211_local *local = sdata->local;
535 struct ieee80211_conf *conf = &local_to_hw(local)->conf;
536 union iwreq_data wrqu;
537 u32 changed = BSS_CHANGED_ASSOC;
538
539 if (assoc) {
540 struct ieee80211_sta_bss *bss;
541
542 ifsta->flags |= IEEE80211_STA_ASSOCIATED;
543
544 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
545 return;
546
547 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
548 conf->channel->center_freq,
549 ifsta->ssid, ifsta->ssid_len);
550 if (bss) {
551 /* set timing information */
552 sdata->bss_conf.beacon_int = bss->beacon_int;
553 sdata->bss_conf.timestamp = bss->timestamp;
554 sdata->bss_conf.dtim_period = bss->dtim_period;
555
556 changed |= ieee80211_handle_bss_capability(sdata, bss);
557
558 ieee80211_rx_bss_put(local, bss);
559 }
560 175
561 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { 176 for (i = 0; i < sband->n_bitrates; i++) {
562 changed |= BSS_CHANGED_HT; 177 struct ieee80211_rate *rate = &sband->bitrates[i];
563 sdata->bss_conf.assoc_ht = 1; 178 if (esupp_rates) {
564 sdata->bss_conf.ht_conf = &conf->ht_conf; 179 pos = skb_put(skb, 1);
565 sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf; 180 esupp_rates[1]++;
181 } else if (supp_rates[1] == 8) {
182 esupp_rates = skb_put(skb, 3);
183 esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES;
184 esupp_rates[1] = 1;
185 pos = &esupp_rates[2];
186 } else {
187 pos = skb_put(skb, 1);
188 supp_rates[1]++;
566 } 189 }
567 190 *pos = rate->bitrate / 5;
568 ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET;
569 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN);
570 memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN);
571 ieee80211_sta_send_associnfo(dev, ifsta);
572 } else {
573 netif_carrier_off(dev);
574 ieee80211_sta_tear_down_BA_sessions(dev, ifsta->bssid);
575 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
576 changed |= ieee80211_reset_erp_info(dev);
577
578 sdata->bss_conf.assoc_ht = 0;
579 sdata->bss_conf.ht_conf = NULL;
580 sdata->bss_conf.ht_bss_conf = NULL;
581
582 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
583 } 191 }
584 ifsta->last_probe = jiffies;
585 ieee80211_led_assoc(local, assoc);
586
587 sdata->bss_conf.assoc = assoc;
588 ieee80211_bss_info_change_notify(sdata, changed);
589 192
590 if (assoc) 193 ieee80211_tx_skb(sdata, skb, 0);
591 netif_carrier_on(dev);
592
593 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
594 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
595} 194}
596 195
597static void ieee80211_set_disassoc(struct net_device *dev, 196static void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
598 struct ieee80211_if_sta *ifsta, int deauth)
599{
600 if (deauth)
601 ifsta->auth_tries = 0;
602 ifsta->assoc_tries = 0;
603 ieee80211_set_associated(dev, ifsta, 0);
604}
605
606void ieee80211_sta_tx(struct net_device *dev, struct sk_buff *skb,
607 int encrypt)
608{
609 struct ieee80211_sub_if_data *sdata;
610
611 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
612 skb->dev = sdata->local->mdev;
613 skb_set_mac_header(skb, 0);
614 skb_set_network_header(skb, 0);
615 skb_set_transport_header(skb, 0);
616
617 skb->iif = sdata->dev->ifindex;
618 skb->do_not_encrypt = !encrypt;
619
620 dev_queue_xmit(skb);
621}
622
623
624static void ieee80211_send_auth(struct net_device *dev,
625 struct ieee80211_if_sta *ifsta, 197 struct ieee80211_if_sta *ifsta,
626 int transaction, u8 *extra, size_t extra_len, 198 int transaction, u8 *extra, size_t extra_len,
627 int encrypt) 199 int encrypt)
628{ 200{
629 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 201 struct ieee80211_local *local = sdata->local;
630 struct sk_buff *skb; 202 struct sk_buff *skb;
631 struct ieee80211_mgmt *mgmt; 203 struct ieee80211_mgmt *mgmt;
632 204
@@ -634,19 +206,19 @@ static void ieee80211_send_auth(struct net_device *dev,
634 sizeof(*mgmt) + 6 + extra_len); 206 sizeof(*mgmt) + 6 + extra_len);
635 if (!skb) { 207 if (!skb) {
636 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 208 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
637 "frame\n", dev->name); 209 "frame\n", sdata->dev->name);
638 return; 210 return;
639 } 211 }
640 skb_reserve(skb, local->hw.extra_tx_headroom); 212 skb_reserve(skb, local->hw.extra_tx_headroom);
641 213
642 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 214 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
643 memset(mgmt, 0, 24 + 6); 215 memset(mgmt, 0, 24 + 6);
644 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 216 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
645 IEEE80211_STYPE_AUTH); 217 IEEE80211_STYPE_AUTH);
646 if (encrypt) 218 if (encrypt)
647 mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 219 mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
648 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 220 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
649 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 221 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
650 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 222 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
651 mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg); 223 mgmt->u.auth.auth_alg = cpu_to_le16(ifsta->auth_alg);
652 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); 224 mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
@@ -655,64 +227,19 @@ static void ieee80211_send_auth(struct net_device *dev,
655 if (extra) 227 if (extra)
656 memcpy(skb_put(skb, extra_len), extra, extra_len); 228 memcpy(skb_put(skb, extra_len), extra, extra_len);
657 229
658 ieee80211_sta_tx(dev, skb, encrypt); 230 ieee80211_tx_skb(sdata, skb, encrypt);
659}
660
661
662static void ieee80211_authenticate(struct net_device *dev,
663 struct ieee80211_if_sta *ifsta)
664{
665 DECLARE_MAC_BUF(mac);
666
667 ifsta->auth_tries++;
668 if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
669 printk(KERN_DEBUG "%s: authentication with AP %s"
670 " timed out\n",
671 dev->name, print_mac(mac, ifsta->bssid));
672 ifsta->state = IEEE80211_DISABLED;
673 return;
674 }
675
676 ifsta->state = IEEE80211_AUTHENTICATE;
677 printk(KERN_DEBUG "%s: authenticate with AP %s\n",
678 dev->name, print_mac(mac, ifsta->bssid));
679
680 ieee80211_send_auth(dev, ifsta, 1, NULL, 0, 0);
681
682 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
683} 231}
684 232
685static int ieee80211_compatible_rates(struct ieee80211_sta_bss *bss, 233static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
686 struct ieee80211_supported_band *sband,
687 u64 *rates)
688{
689 int i, j, count;
690 *rates = 0;
691 count = 0;
692 for (i = 0; i < bss->supp_rates_len; i++) {
693 int rate = (bss->supp_rates[i] & 0x7F) * 5;
694
695 for (j = 0; j < sband->n_bitrates; j++)
696 if (sband->bitrates[j].bitrate == rate) {
697 *rates |= BIT(j);
698 count++;
699 break;
700 }
701 }
702
703 return count;
704}
705
706static void ieee80211_send_assoc(struct net_device *dev,
707 struct ieee80211_if_sta *ifsta) 234 struct ieee80211_if_sta *ifsta)
708{ 235{
709 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 236 struct ieee80211_local *local = sdata->local;
710 struct sk_buff *skb; 237 struct sk_buff *skb;
711 struct ieee80211_mgmt *mgmt; 238 struct ieee80211_mgmt *mgmt;
712 u8 *pos, *ies; 239 u8 *pos, *ies, *ht_add_ie;
713 int i, len, count, rates_len, supp_rates_len; 240 int i, len, count, rates_len, supp_rates_len;
714 u16 capab; 241 u16 capab;
715 struct ieee80211_sta_bss *bss; 242 struct ieee80211_bss *bss;
716 int wmm = 0; 243 int wmm = 0;
717 struct ieee80211_supported_band *sband; 244 struct ieee80211_supported_band *sband;
718 u64 rates = 0; 245 u64 rates = 0;
@@ -722,7 +249,7 @@ static void ieee80211_send_assoc(struct net_device *dev,
722 ifsta->ssid_len); 249 ifsta->ssid_len);
723 if (!skb) { 250 if (!skb) {
724 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " 251 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
725 "frame\n", dev->name); 252 "frame\n", sdata->dev->name);
726 return; 253 return;
727 } 254 }
728 skb_reserve(skb, local->hw.extra_tx_headroom); 255 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -738,13 +265,13 @@ static void ieee80211_send_assoc(struct net_device *dev,
738 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE; 265 capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
739 } 266 }
740 267
741 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 268 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
742 local->hw.conf.channel->center_freq, 269 local->hw.conf.channel->center_freq,
743 ifsta->ssid, ifsta->ssid_len); 270 ifsta->ssid, ifsta->ssid_len);
744 if (bss) { 271 if (bss) {
745 if (bss->capability & WLAN_CAPABILITY_PRIVACY) 272 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
746 capab |= WLAN_CAPABILITY_PRIVACY; 273 capab |= WLAN_CAPABILITY_PRIVACY;
747 if (bss->wmm_ie) 274 if (bss->wmm_used)
748 wmm = 1; 275 wmm = 1;
749 276
750 /* get all rates supported by the device and the AP as 277 /* get all rates supported by the device and the AP as
@@ -766,13 +293,13 @@ static void ieee80211_send_assoc(struct net_device *dev,
766 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 293 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
767 memset(mgmt, 0, 24); 294 memset(mgmt, 0, 24);
768 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 295 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
769 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 296 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
770 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 297 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
771 298
772 if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) { 299 if (ifsta->flags & IEEE80211_STA_PREV_BSSID_SET) {
773 skb_put(skb, 10); 300 skb_put(skb, 10);
774 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 301 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
775 IEEE80211_STYPE_REASSOC_REQ); 302 IEEE80211_STYPE_REASSOC_REQ);
776 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab); 303 mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
777 mgmt->u.reassoc_req.listen_interval = 304 mgmt->u.reassoc_req.listen_interval =
778 cpu_to_le16(local->hw.conf.listen_interval); 305 cpu_to_le16(local->hw.conf.listen_interval);
@@ -780,8 +307,8 @@ static void ieee80211_send_assoc(struct net_device *dev,
780 ETH_ALEN); 307 ETH_ALEN);
781 } else { 308 } else {
782 skb_put(skb, 4); 309 skb_put(skb, 4);
783 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 310 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
784 IEEE80211_STYPE_ASSOC_REQ); 311 IEEE80211_STYPE_ASSOC_REQ);
785 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab); 312 mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
786 mgmt->u.reassoc_req.listen_interval = 313 mgmt->u.reassoc_req.listen_interval =
787 cpu_to_le16(local->hw.conf.listen_interval); 314 cpu_to_le16(local->hw.conf.listen_interval);
@@ -866,9 +393,10 @@ static void ieee80211_send_assoc(struct net_device *dev,
866 393
867 /* wmm support is a must to HT */ 394 /* wmm support is a must to HT */
868 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) && 395 if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) &&
869 sband->ht_info.ht_supported && bss->ht_add_ie) { 396 sband->ht_info.ht_supported &&
397 (ht_add_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_EXTRA_INFO))) {
870 struct ieee80211_ht_addt_info *ht_add_info = 398 struct ieee80211_ht_addt_info *ht_add_info =
871 (struct ieee80211_ht_addt_info *)bss->ht_add_ie; 399 (struct ieee80211_ht_addt_info *)ht_add_ie;
872 u16 cap = sband->ht_info.cap; 400 u16 cap = sband->ht_info.cap;
873 __le16 tmp; 401 __le16 tmp;
874 u32 flags = local->hw.conf.channel->flags; 402 u32 flags = local->hw.conf.channel->flags;
@@ -907,21 +435,22 @@ static void ieee80211_send_assoc(struct net_device *dev,
907 if (ifsta->assocreq_ies) 435 if (ifsta->assocreq_ies)
908 memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len); 436 memcpy(ifsta->assocreq_ies, ies, ifsta->assocreq_ies_len);
909 437
910 ieee80211_sta_tx(dev, skb, 0); 438 ieee80211_tx_skb(sdata, skb, 0);
911} 439}
912 440
913 441
914static void ieee80211_send_deauth(struct net_device *dev, 442static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
915 struct ieee80211_if_sta *ifsta, u16 reason) 443 u16 stype, u16 reason)
916{ 444{
917 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 445 struct ieee80211_local *local = sdata->local;
446 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
918 struct sk_buff *skb; 447 struct sk_buff *skb;
919 struct ieee80211_mgmt *mgmt; 448 struct ieee80211_mgmt *mgmt;
920 449
921 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 450 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
922 if (!skb) { 451 if (!skb) {
923 printk(KERN_DEBUG "%s: failed to allocate buffer for deauth " 452 printk(KERN_DEBUG "%s: failed to allocate buffer for "
924 "frame\n", dev->name); 453 "deauth/disassoc frame\n", sdata->dev->name);
925 return; 454 return;
926 } 455 }
927 skb_reserve(skb, local->hw.extra_tx_headroom); 456 skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -929,940 +458,561 @@ static void ieee80211_send_deauth(struct net_device *dev,
929 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 458 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
930 memset(mgmt, 0, 24); 459 memset(mgmt, 0, 24);
931 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 460 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN);
932 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 461 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
933 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 462 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
934 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 463 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
935 IEEE80211_STYPE_DEAUTH);
936 skb_put(skb, 2); 464 skb_put(skb, 2);
465 /* u.deauth.reason_code == u.disassoc.reason_code */
937 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 466 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
938 467
939 ieee80211_sta_tx(dev, skb, 0); 468 ieee80211_tx_skb(sdata, skb, 0);
940} 469}
941 470
942 471/* MLME */
943static void ieee80211_send_disassoc(struct net_device *dev, 472static void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
944 struct ieee80211_if_sta *ifsta, u16 reason) 473 struct ieee80211_bss *bss)
945{ 474{
946 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 475 struct ieee80211_local *local = sdata->local;
947 struct sk_buff *skb; 476 int i, have_higher_than_11mbit = 0;
948 struct ieee80211_mgmt *mgmt;
949 477
950 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt)); 478 /* cf. IEEE 802.11 9.2.12 */
951 if (!skb) { 479 for (i = 0; i < bss->supp_rates_len; i++)
952 printk(KERN_DEBUG "%s: failed to allocate buffer for disassoc " 480 if ((bss->supp_rates[i] & 0x7f) * 5 > 110)
953 "frame\n", dev->name); 481 have_higher_than_11mbit = 1;
954 return;
955 }
956 skb_reserve(skb, local->hw.extra_tx_headroom);
957 482
958 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 483 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
959 memset(mgmt, 0, 24); 484 have_higher_than_11mbit)
960 memcpy(mgmt->da, ifsta->bssid, ETH_ALEN); 485 sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
961 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 486 else
962 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 487 sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
963 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
964 IEEE80211_STYPE_DISASSOC);
965 skb_put(skb, 2);
966 mgmt->u.disassoc.reason_code = cpu_to_le16(reason);
967 488
968 ieee80211_sta_tx(dev, skb, 0); 489 ieee80211_set_wmm_default(sdata);
969} 490}
970 491
971 492static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
972static int ieee80211_privacy_mismatch(struct net_device *dev, 493 struct ieee80211_if_sta *ifsta,
973 struct ieee80211_if_sta *ifsta) 494 u8 *wmm_param, size_t wmm_param_len)
974{ 495{
975 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 496 struct ieee80211_tx_queue_params params;
976 struct ieee80211_sta_bss *bss; 497 size_t left;
977 int bss_privacy; 498 int count;
978 int wep_privacy; 499 u8 *pos;
979 int privacy_invoked;
980
981 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
982 return 0;
983
984 bss = ieee80211_rx_bss_get(dev, ifsta->bssid,
985 local->hw.conf.channel->center_freq,
986 ifsta->ssid, ifsta->ssid_len);
987 if (!bss)
988 return 0;
989
990 bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY);
991 wep_privacy = !!ieee80211_sta_wep_configured(dev);
992 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
993 500
994 ieee80211_rx_bss_put(local, bss); 501 if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
502 return;
995 503
996 if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked)) 504 if (!wmm_param)
997 return 0; 505 return;
998 506
999 return 1; 507 if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
1000} 508 return;
509 count = wmm_param[6] & 0x0f;
510 if (count == ifsta->wmm_last_param_set)
511 return;
512 ifsta->wmm_last_param_set = count;
1001 513
514 pos = wmm_param + 8;
515 left = wmm_param_len - 8;
1002 516
1003static void ieee80211_associate(struct net_device *dev, 517 memset(&params, 0, sizeof(params));
1004 struct ieee80211_if_sta *ifsta)
1005{
1006 DECLARE_MAC_BUF(mac);
1007 518
1008 ifsta->assoc_tries++; 519 if (!local->ops->conf_tx)
1009 if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
1010 printk(KERN_DEBUG "%s: association with AP %s"
1011 " timed out\n",
1012 dev->name, print_mac(mac, ifsta->bssid));
1013 ifsta->state = IEEE80211_DISABLED;
1014 return; 520 return;
1015 }
1016 521
1017 ifsta->state = IEEE80211_ASSOCIATE; 522 local->wmm_acm = 0;
1018 printk(KERN_DEBUG "%s: associate with AP %s\n", 523 for (; left >= 4; left -= 4, pos += 4) {
1019 dev->name, print_mac(mac, ifsta->bssid)); 524 int aci = (pos[0] >> 5) & 0x03;
1020 if (ieee80211_privacy_mismatch(dev, ifsta)) { 525 int acm = (pos[0] >> 4) & 0x01;
1021 printk(KERN_DEBUG "%s: mismatch in privacy configuration and " 526 int queue;
1022 "mixed-cell disabled - abort association\n", dev->name);
1023 ifsta->state = IEEE80211_DISABLED;
1024 return;
1025 }
1026 527
1027 ieee80211_send_assoc(dev, ifsta); 528 switch (aci) {
529 case 1:
530 queue = 3;
531 if (acm)
532 local->wmm_acm |= BIT(0) | BIT(3);
533 break;
534 case 2:
535 queue = 1;
536 if (acm)
537 local->wmm_acm |= BIT(4) | BIT(5);
538 break;
539 case 3:
540 queue = 0;
541 if (acm)
542 local->wmm_acm |= BIT(6) | BIT(7);
543 break;
544 case 0:
545 default:
546 queue = 2;
547 if (acm)
548 local->wmm_acm |= BIT(1) | BIT(2);
549 break;
550 }
1028 551
1029 mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); 552 params.aifs = pos[0] & 0x0f;
553 params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
554 params.cw_min = ecw2cw(pos[1] & 0x0f);
555 params.txop = get_unaligned_le16(pos + 2);
556#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
557 printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
558 "cWmin=%d cWmax=%d txop=%d\n",
559 local->mdev->name, queue, aci, acm, params.aifs, params.cw_min,
560 params.cw_max, params.txop);
561#endif
562 /* TODO: handle ACM (block TX, fallback to next lowest allowed
563 * AC for now) */
564 if (local->ops->conf_tx(local_to_hw(local), queue, &params)) {
565 printk(KERN_DEBUG "%s: failed to set TX queue "
566 "parameters for queue %d\n", local->mdev->name, queue);
567 }
568 }
1030} 569}
1031 570
1032 571static u32 ieee80211_handle_protect_preamb(struct ieee80211_sub_if_data *sdata,
1033static void ieee80211_associated(struct net_device *dev, 572 bool use_protection,
1034 struct ieee80211_if_sta *ifsta) 573 bool use_short_preamble)
1035{ 574{
1036 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 575 struct ieee80211_bss_conf *bss_conf = &sdata->bss_conf;
1037 struct sta_info *sta; 576#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1038 int disassoc; 577 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1039 DECLARE_MAC_BUF(mac); 578 DECLARE_MAC_BUF(mac);
579#endif
580 u32 changed = 0;
1040 581
1041 /* TODO: start monitoring current AP signal quality and number of 582 if (use_protection != bss_conf->use_cts_prot) {
1042 * missed beacons. Scan other channels every now and then and search 583#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1043 * for better APs. */ 584 if (net_ratelimit()) {
1044 /* TODO: remove expired BSSes */ 585 printk(KERN_DEBUG "%s: CTS protection %s (BSSID="
1045 586 "%s)\n",
1046 ifsta->state = IEEE80211_ASSOCIATED; 587 sdata->dev->name,
1047 588 use_protection ? "enabled" : "disabled",
1048 rcu_read_lock(); 589 print_mac(mac, ifsta->bssid));
1049
1050 sta = sta_info_get(local, ifsta->bssid);
1051 if (!sta) {
1052 printk(KERN_DEBUG "%s: No STA entry for own AP %s\n",
1053 dev->name, print_mac(mac, ifsta->bssid));
1054 disassoc = 1;
1055 } else {
1056 disassoc = 0;
1057 if (time_after(jiffies,
1058 sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
1059 if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) {
1060 printk(KERN_DEBUG "%s: No ProbeResp from "
1061 "current AP %s - assume out of "
1062 "range\n",
1063 dev->name, print_mac(mac, ifsta->bssid));
1064 disassoc = 1;
1065 sta_info_unlink(&sta);
1066 } else
1067 ieee80211_send_probe_req(dev, ifsta->bssid,
1068 local->scan_ssid,
1069 local->scan_ssid_len);
1070 ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL;
1071 } else {
1072 ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1073 if (time_after(jiffies, ifsta->last_probe +
1074 IEEE80211_PROBE_INTERVAL)) {
1075 ifsta->last_probe = jiffies;
1076 ieee80211_send_probe_req(dev, ifsta->bssid,
1077 ifsta->ssid,
1078 ifsta->ssid_len);
1079 }
1080 } 590 }
591#endif
592 bss_conf->use_cts_prot = use_protection;
593 changed |= BSS_CHANGED_ERP_CTS_PROT;
1081 } 594 }
1082 595
1083 rcu_read_unlock(); 596 if (use_short_preamble != bss_conf->use_short_preamble) {
1084 597#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1085 if (disassoc && sta) 598 if (net_ratelimit()) {
1086 sta_info_destroy(sta); 599 printk(KERN_DEBUG "%s: switched to %s barker preamble"
1087 600 " (BSSID=%s)\n",
1088 if (disassoc) { 601 sdata->dev->name,
1089 ifsta->state = IEEE80211_DISABLED; 602 use_short_preamble ? "short" : "long",
1090 ieee80211_set_associated(dev, ifsta, 0); 603 print_mac(mac, ifsta->bssid));
1091 } else { 604 }
1092 mod_timer(&ifsta->timer, jiffies + 605#endif
1093 IEEE80211_MONITORING_INTERVAL); 606 bss_conf->use_short_preamble = use_short_preamble;
607 changed |= BSS_CHANGED_ERP_PREAMBLE;
1094 } 608 }
1095}
1096 609
610 return changed;
611}
1097 612
1098static void ieee80211_send_probe_req(struct net_device *dev, u8 *dst, 613static u32 ieee80211_handle_erp_ie(struct ieee80211_sub_if_data *sdata,
1099 u8 *ssid, size_t ssid_len) 614 u8 erp_value)
1100{ 615{
1101 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 616 bool use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
1102 struct ieee80211_supported_band *sband; 617 bool use_short_preamble = (erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0;
1103 struct sk_buff *skb;
1104 struct ieee80211_mgmt *mgmt;
1105 u8 *pos, *supp_rates, *esupp_rates = NULL;
1106 int i;
1107
1108 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200);
1109 if (!skb) {
1110 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
1111 "request\n", dev->name);
1112 return;
1113 }
1114 skb_reserve(skb, local->hw.extra_tx_headroom);
1115 618
1116 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 619 return ieee80211_handle_protect_preamb(sdata,
1117 memset(mgmt, 0, 24); 620 use_protection, use_short_preamble);
1118 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 621}
1119 IEEE80211_STYPE_PROBE_REQ);
1120 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
1121 if (dst) {
1122 memcpy(mgmt->da, dst, ETH_ALEN);
1123 memcpy(mgmt->bssid, dst, ETH_ALEN);
1124 } else {
1125 memset(mgmt->da, 0xff, ETH_ALEN);
1126 memset(mgmt->bssid, 0xff, ETH_ALEN);
1127 }
1128 pos = skb_put(skb, 2 + ssid_len);
1129 *pos++ = WLAN_EID_SSID;
1130 *pos++ = ssid_len;
1131 memcpy(pos, ssid, ssid_len);
1132 622
1133 supp_rates = skb_put(skb, 2); 623static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
1134 supp_rates[0] = WLAN_EID_SUPP_RATES; 624 struct ieee80211_bss *bss)
1135 supp_rates[1] = 0; 625{
1136 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 626 u32 changed = 0;
1137 627
1138 for (i = 0; i < sband->n_bitrates; i++) { 628 if (bss->has_erp_value)
1139 struct ieee80211_rate *rate = &sband->bitrates[i]; 629 changed |= ieee80211_handle_erp_ie(sdata, bss->erp_value);
1140 if (esupp_rates) { 630 else {
1141 pos = skb_put(skb, 1); 631 u16 capab = bss->capability;
1142 esupp_rates[1]++; 632 changed |= ieee80211_handle_protect_preamb(sdata, false,
1143 } else if (supp_rates[1] == 8) { 633 (capab & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0);
1144 esupp_rates = skb_put(skb, 3);
1145 esupp_rates[0] = WLAN_EID_EXT_SUPP_RATES;
1146 esupp_rates[1] = 1;
1147 pos = &esupp_rates[2];
1148 } else {
1149 pos = skb_put(skb, 1);
1150 supp_rates[1]++;
1151 }
1152 *pos = rate->bitrate / 5;
1153 } 634 }
1154 635
1155 ieee80211_sta_tx(dev, skb, 0); 636 return changed;
1156} 637}
1157 638
639static void ieee80211_sta_send_apinfo(struct ieee80211_sub_if_data *sdata,
640 struct ieee80211_if_sta *ifsta)
641{
642 union iwreq_data wrqu;
643 memset(&wrqu, 0, sizeof(wrqu));
644 if (ifsta->flags & IEEE80211_STA_ASSOCIATED)
645 memcpy(wrqu.ap_addr.sa_data, sdata->u.sta.bssid, ETH_ALEN);
646 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
647 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
648}
1158 649
1159static int ieee80211_sta_wep_configured(struct net_device *dev) 650static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata,
651 struct ieee80211_if_sta *ifsta)
1160{ 652{
1161 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 653 union iwreq_data wrqu;
1162 if (!sdata || !sdata->default_key || 654
1163 sdata->default_key->conf.alg != ALG_WEP) 655 if (ifsta->assocreq_ies) {
1164 return 0; 656 memset(&wrqu, 0, sizeof(wrqu));
1165 return 1; 657 wrqu.data.length = ifsta->assocreq_ies_len;
658 wireless_send_event(sdata->dev, IWEVASSOCREQIE, &wrqu,
659 ifsta->assocreq_ies);
660 }
661 if (ifsta->assocresp_ies) {
662 memset(&wrqu, 0, sizeof(wrqu));
663 wrqu.data.length = ifsta->assocresp_ies_len;
664 wireless_send_event(sdata->dev, IWEVASSOCRESPIE, &wrqu,
665 ifsta->assocresp_ies);
666 }
1166} 667}
1167 668
1168 669
1169static void ieee80211_auth_completed(struct net_device *dev, 670static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1170 struct ieee80211_if_sta *ifsta) 671 struct ieee80211_if_sta *ifsta)
1171{ 672{
1172 printk(KERN_DEBUG "%s: authenticated\n", dev->name); 673 struct ieee80211_local *local = sdata->local;
1173 ifsta->flags |= IEEE80211_STA_AUTHENTICATED; 674 struct ieee80211_conf *conf = &local_to_hw(local)->conf;
1174 ieee80211_associate(dev, ifsta); 675 u32 changed = BSS_CHANGED_ASSOC;
1175}
1176 676
677 struct ieee80211_bss *bss;
1177 678
1178static void ieee80211_auth_challenge(struct net_device *dev, 679 ifsta->flags |= IEEE80211_STA_ASSOCIATED;
1179 struct ieee80211_if_sta *ifsta,
1180 struct ieee80211_mgmt *mgmt,
1181 size_t len)
1182{
1183 u8 *pos;
1184 struct ieee802_11_elems elems;
1185 680
1186 pos = mgmt->u.auth.variable; 681 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1187 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1188 if (!elems.challenge)
1189 return; 682 return;
1190 ieee80211_send_auth(dev, ifsta, 3, elems.challenge - 2,
1191 elems.challenge_len + 2, 1);
1192}
1193 683
1194static void ieee80211_send_addba_resp(struct net_device *dev, u8 *da, u16 tid, 684 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
1195 u8 dialog_token, u16 status, u16 policy, 685 conf->channel->center_freq,
1196 u16 buf_size, u16 timeout) 686 ifsta->ssid, ifsta->ssid_len);
1197{ 687 if (bss) {
1198 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 688 /* set timing information */
1199 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 689 sdata->bss_conf.beacon_int = bss->beacon_int;
1200 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 690 sdata->bss_conf.timestamp = bss->timestamp;
1201 struct sk_buff *skb; 691 sdata->bss_conf.dtim_period = bss->dtim_period;
1202 struct ieee80211_mgmt *mgmt;
1203 u16 capab;
1204 692
1205 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom); 693 changed |= ieee80211_handle_bss_capability(sdata, bss);
1206 694
1207 if (!skb) { 695 ieee80211_rx_bss_put(local, bss);
1208 printk(KERN_DEBUG "%s: failed to allocate buffer "
1209 "for addba resp frame\n", dev->name);
1210 return;
1211 } 696 }
1212 697
1213 skb_reserve(skb, local->hw.extra_tx_headroom); 698 if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) {
1214 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 699 changed |= BSS_CHANGED_HT;
1215 memset(mgmt, 0, 24); 700 sdata->bss_conf.assoc_ht = 1;
1216 memcpy(mgmt->da, da, ETH_ALEN); 701 sdata->bss_conf.ht_conf = &conf->ht_conf;
1217 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 702 sdata->bss_conf.ht_bss_conf = &conf->ht_bss_conf;
1218 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) 703 }
1219 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN);
1220 else
1221 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1222 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1223 IEEE80211_STYPE_ACTION);
1224 704
1225 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_resp)); 705 ifsta->flags |= IEEE80211_STA_PREV_BSSID_SET;
1226 mgmt->u.action.category = WLAN_CATEGORY_BACK; 706 memcpy(ifsta->prev_bssid, sdata->u.sta.bssid, ETH_ALEN);
1227 mgmt->u.action.u.addba_resp.action_code = WLAN_ACTION_ADDBA_RESP; 707 ieee80211_sta_send_associnfo(sdata, ifsta);
1228 mgmt->u.action.u.addba_resp.dialog_token = dialog_token;
1229 708
1230 capab = (u16)(policy << 1); /* bit 1 aggregation policy */ 709 ifsta->last_probe = jiffies;
1231 capab |= (u16)(tid << 2); /* bit 5:2 TID number */ 710 ieee80211_led_assoc(local, 1);
1232 capab |= (u16)(buf_size << 6); /* bit 15:6 max size of aggregation */
1233 711
1234 mgmt->u.action.u.addba_resp.capab = cpu_to_le16(capab); 712 sdata->bss_conf.assoc = 1;
1235 mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); 713 /*
1236 mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); 714 * For now just always ask the driver to update the basic rateset
715 * when we have associated, we aren't checking whether it actually
716 * changed or not.
717 */
718 changed |= BSS_CHANGED_BASIC_RATES;
719 ieee80211_bss_info_change_notify(sdata, changed);
1237 720
1238 ieee80211_sta_tx(dev, skb, 0); 721 netif_tx_start_all_queues(sdata->dev);
722 netif_carrier_on(sdata->dev);
1239 723
1240 return; 724 ieee80211_sta_send_apinfo(sdata, ifsta);
1241} 725}
1242 726
1243void ieee80211_send_addba_request(struct net_device *dev, const u8 *da, 727static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
1244 u16 tid, u8 dialog_token, u16 start_seq_num, 728 struct ieee80211_if_sta *ifsta)
1245 u16 agg_size, u16 timeout)
1246{ 729{
1247 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 730 DECLARE_MAC_BUF(mac);
1248 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1249 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1250 struct sk_buff *skb;
1251 struct ieee80211_mgmt *mgmt;
1252 u16 capab;
1253
1254 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1255 731
1256 if (!skb) { 732 ifsta->direct_probe_tries++;
1257 printk(KERN_ERR "%s: failed to allocate buffer " 733 if (ifsta->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) {
1258 "for addba request frame\n", dev->name); 734 printk(KERN_DEBUG "%s: direct probe to AP %s timed out\n",
735 sdata->dev->name, print_mac(mac, ifsta->bssid));
736 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1259 return; 737 return;
1260 } 738 }
1261 skb_reserve(skb, local->hw.extra_tx_headroom);
1262 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
1263 memset(mgmt, 0, 24);
1264 memcpy(mgmt->da, da, ETH_ALEN);
1265 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
1266 if (sdata->vif.type == IEEE80211_IF_TYPE_AP)
1267 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN);
1268 else
1269 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1270 739
1271 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 740 printk(KERN_DEBUG "%s: direct probe to AP %s try %d\n",
1272 IEEE80211_STYPE_ACTION); 741 sdata->dev->name, print_mac(mac, ifsta->bssid),
742 ifsta->direct_probe_tries);
1273 743
1274 skb_put(skb, 1 + sizeof(mgmt->u.action.u.addba_req)); 744 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
1275 745
1276 mgmt->u.action.category = WLAN_CATEGORY_BACK; 746 set_bit(IEEE80211_STA_REQ_DIRECT_PROBE, &ifsta->request);
1277 mgmt->u.action.u.addba_req.action_code = WLAN_ACTION_ADDBA_REQ;
1278 747
1279 mgmt->u.action.u.addba_req.dialog_token = dialog_token; 748 /* Direct probe is sent to broadcast address as some APs
1280 capab = (u16)(1 << 1); /* bit 1 aggregation policy */ 749 * will not answer to direct packet in unassociated state.
1281 capab |= (u16)(tid << 2); /* bit 5:2 TID number */ 750 */
1282 capab |= (u16)(agg_size << 6); /* bit 15:6 max size of aggergation */ 751 ieee80211_send_probe_req(sdata, NULL,
1283 752 ifsta->ssid, ifsta->ssid_len);
1284 mgmt->u.action.u.addba_req.capab = cpu_to_le16(capab);
1285
1286 mgmt->u.action.u.addba_req.timeout = cpu_to_le16(timeout);
1287 mgmt->u.action.u.addba_req.start_seq_num =
1288 cpu_to_le16(start_seq_num << 4);
1289 753
1290 ieee80211_sta_tx(dev, skb, 0); 754 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
1291} 755}
1292 756
1293static void ieee80211_sta_process_addba_request(struct net_device *dev, 757
1294 struct ieee80211_mgmt *mgmt, 758static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
1295 size_t len) 759 struct ieee80211_if_sta *ifsta)
1296{ 760{
1297 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1298 struct ieee80211_hw *hw = &local->hw;
1299 struct ieee80211_conf *conf = &hw->conf;
1300 struct sta_info *sta;
1301 struct tid_ampdu_rx *tid_agg_rx;
1302 u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num, status;
1303 u8 dialog_token;
1304 int ret = -EOPNOTSUPP;
1305 DECLARE_MAC_BUF(mac); 761 DECLARE_MAC_BUF(mac);
1306 762
1307 rcu_read_lock(); 763 ifsta->auth_tries++;
1308 764 if (ifsta->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
1309 sta = sta_info_get(local, mgmt->sa); 765 printk(KERN_DEBUG "%s: authentication with AP %s"
1310 if (!sta) { 766 " timed out\n",
1311 rcu_read_unlock(); 767 sdata->dev->name, print_mac(mac, ifsta->bssid));
768 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1312 return; 769 return;
1313 } 770 }
1314 771
1315 /* extract session parameters from addba request frame */ 772 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
1316 dialog_token = mgmt->u.action.u.addba_req.dialog_token; 773 printk(KERN_DEBUG "%s: authenticate with AP %s\n",
1317 timeout = le16_to_cpu(mgmt->u.action.u.addba_req.timeout); 774 sdata->dev->name, print_mac(mac, ifsta->bssid));
1318 start_seq_num =
1319 le16_to_cpu(mgmt->u.action.u.addba_req.start_seq_num) >> 4;
1320
1321 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
1322 ba_policy = (capab & IEEE80211_ADDBA_PARAM_POLICY_MASK) >> 1;
1323 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
1324 buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
1325
1326 status = WLAN_STATUS_REQUEST_DECLINED;
1327
1328 /* sanity check for incoming parameters:
1329 * check if configuration can support the BA policy
1330 * and if buffer size does not exceeds max value */
1331 if (((ba_policy != 1)
1332 && (!(conf->ht_conf.cap & IEEE80211_HT_CAP_DELAY_BA)))
1333 || (buf_size > IEEE80211_MAX_AMPDU_BUF)) {
1334 status = WLAN_STATUS_INVALID_QOS_PARAM;
1335#ifdef CONFIG_MAC80211_HT_DEBUG
1336 if (net_ratelimit())
1337 printk(KERN_DEBUG "AddBA Req with bad params from "
1338 "%s on tid %u. policy %d, buffer size %d\n",
1339 print_mac(mac, mgmt->sa), tid, ba_policy,
1340 buf_size);
1341#endif /* CONFIG_MAC80211_HT_DEBUG */
1342 goto end_no_lock;
1343 }
1344 /* determine default buffer size */
1345 if (buf_size == 0) {
1346 struct ieee80211_supported_band *sband;
1347
1348 sband = local->hw.wiphy->bands[conf->channel->band];
1349 buf_size = IEEE80211_MIN_AMPDU_BUF;
1350 buf_size = buf_size << sband->ht_info.ampdu_factor;
1351 }
1352
1353
1354 /* examine state machine */
1355 spin_lock_bh(&sta->lock);
1356 775
1357 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { 776 ieee80211_send_auth(sdata, ifsta, 1, NULL, 0, 0);
1358#ifdef CONFIG_MAC80211_HT_DEBUG
1359 if (net_ratelimit())
1360 printk(KERN_DEBUG "unexpected AddBA Req from "
1361 "%s on tid %u\n",
1362 print_mac(mac, mgmt->sa), tid);
1363#endif /* CONFIG_MAC80211_HT_DEBUG */
1364 goto end;
1365 }
1366 777
1367 /* prepare A-MPDU MLME for Rx aggregation */ 778 mod_timer(&ifsta->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
1368 sta->ampdu_mlme.tid_rx[tid] =
1369 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
1370 if (!sta->ampdu_mlme.tid_rx[tid]) {
1371#ifdef CONFIG_MAC80211_HT_DEBUG
1372 if (net_ratelimit())
1373 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
1374 tid);
1375#endif
1376 goto end;
1377 }
1378 /* rx timer */
1379 sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
1380 sta_rx_agg_session_timer_expired;
1381 sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
1382 (unsigned long)&sta->timer_to_tid[tid];
1383 init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
1384
1385 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
1386
1387 /* prepare reordering buffer */
1388 tid_agg_rx->reorder_buf =
1389 kmalloc(buf_size * sizeof(struct sk_buff *), GFP_ATOMIC);
1390 if (!tid_agg_rx->reorder_buf) {
1391#ifdef CONFIG_MAC80211_HT_DEBUG
1392 if (net_ratelimit())
1393 printk(KERN_ERR "can not allocate reordering buffer "
1394 "to tid %d\n", tid);
1395#endif
1396 kfree(sta->ampdu_mlme.tid_rx[tid]);
1397 goto end;
1398 }
1399 memset(tid_agg_rx->reorder_buf, 0,
1400 buf_size * sizeof(struct sk_buff *));
1401
1402 if (local->ops->ampdu_action)
1403 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_START,
1404 sta->addr, tid, &start_seq_num);
1405#ifdef CONFIG_MAC80211_HT_DEBUG
1406 printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
1407#endif /* CONFIG_MAC80211_HT_DEBUG */
1408
1409 if (ret) {
1410 kfree(tid_agg_rx->reorder_buf);
1411 kfree(tid_agg_rx);
1412 sta->ampdu_mlme.tid_rx[tid] = NULL;
1413 goto end;
1414 }
1415
1416 /* change state and send addba resp */
1417 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL;
1418 tid_agg_rx->dialog_token = dialog_token;
1419 tid_agg_rx->ssn = start_seq_num;
1420 tid_agg_rx->head_seq_num = start_seq_num;
1421 tid_agg_rx->buf_size = buf_size;
1422 tid_agg_rx->timeout = timeout;
1423 tid_agg_rx->stored_mpdu_num = 0;
1424 status = WLAN_STATUS_SUCCESS;
1425end:
1426 spin_unlock_bh(&sta->lock);
1427
1428end_no_lock:
1429 ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid,
1430 dialog_token, status, 1, buf_size, timeout);
1431 rcu_read_unlock();
1432} 779}
1433 780
1434static void ieee80211_sta_process_addba_resp(struct net_device *dev, 781static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1435 struct ieee80211_mgmt *mgmt, 782 struct ieee80211_if_sta *ifsta, bool deauth,
1436 size_t len) 783 bool self_disconnected, u16 reason)
1437{ 784{
1438 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 785 struct ieee80211_local *local = sdata->local;
1439 struct ieee80211_hw *hw = &local->hw;
1440 struct sta_info *sta; 786 struct sta_info *sta;
1441 u16 capab; 787 u32 changed = BSS_CHANGED_ASSOC;
1442 u16 tid;
1443 u8 *state;
1444 788
1445 rcu_read_lock(); 789 rcu_read_lock();
1446 790
1447 sta = sta_info_get(local, mgmt->sa); 791 sta = sta_info_get(local, ifsta->bssid);
1448 if (!sta) { 792 if (!sta) {
1449 rcu_read_unlock(); 793 rcu_read_unlock();
1450 return; 794 return;
1451 } 795 }
1452 796
1453 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 797 if (deauth) {
1454 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 798 ifsta->direct_probe_tries = 0;
799 ifsta->auth_tries = 0;
800 }
801 ifsta->assoc_scan_tries = 0;
802 ifsta->assoc_tries = 0;
1455 803
1456 state = &sta->ampdu_mlme.tid_state_tx[tid]; 804 netif_tx_stop_all_queues(sdata->dev);
805 netif_carrier_off(sdata->dev);
1457 806
1458 spin_lock_bh(&sta->lock); 807 ieee80211_sta_tear_down_BA_sessions(sdata, sta->sta.addr);
1459 808
1460 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 809 if (self_disconnected) {
1461 spin_unlock_bh(&sta->lock); 810 if (deauth)
1462 goto addba_resp_exit; 811 ieee80211_send_deauth_disassoc(sdata,
812 IEEE80211_STYPE_DEAUTH, reason);
813 else
814 ieee80211_send_deauth_disassoc(sdata,
815 IEEE80211_STYPE_DISASSOC, reason);
1463 } 816 }
1464 817
1465 if (mgmt->u.action.u.addba_resp.dialog_token != 818 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
1466 sta->ampdu_mlme.tid_tx[tid]->dialog_token) { 819 changed |= ieee80211_reset_erp_info(sdata);
1467 spin_unlock_bh(&sta->lock);
1468#ifdef CONFIG_MAC80211_HT_DEBUG
1469 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
1470#endif /* CONFIG_MAC80211_HT_DEBUG */
1471 goto addba_resp_exit;
1472 }
1473 820
1474 del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 821 if (sdata->bss_conf.assoc_ht)
1475#ifdef CONFIG_MAC80211_HT_DEBUG 822 changed |= BSS_CHANGED_HT;
1476 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
1477#endif /* CONFIG_MAC80211_HT_DEBUG */
1478 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
1479 == WLAN_STATUS_SUCCESS) {
1480 *state |= HT_ADDBA_RECEIVED_MSK;
1481 sta->ampdu_mlme.addba_req_num[tid] = 0;
1482 823
1483 if (*state == HT_AGG_STATE_OPERATIONAL) 824 sdata->bss_conf.assoc_ht = 0;
1484 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 825 sdata->bss_conf.ht_conf = NULL;
826 sdata->bss_conf.ht_bss_conf = NULL;
1485 827
1486 spin_unlock_bh(&sta->lock); 828 ieee80211_led_assoc(local, 0);
1487 } else { 829 sdata->bss_conf.assoc = 0;
1488 sta->ampdu_mlme.addba_req_num[tid]++; 830
1489 /* this will allow the state check in stop_BA_session */ 831 ieee80211_sta_send_apinfo(sdata, ifsta);
1490 *state = HT_AGG_STATE_OPERATIONAL; 832
1491 spin_unlock_bh(&sta->lock); 833 if (self_disconnected)
1492 ieee80211_stop_tx_ba_session(hw, sta->addr, tid, 834 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1493 WLAN_BACK_INITIATOR); 835
1494 } 836 sta_info_unlink(&sta);
1495 837
1496addba_resp_exit:
1497 rcu_read_unlock(); 838 rcu_read_unlock();
839
840 sta_info_destroy(sta);
1498} 841}
1499 842
1500void ieee80211_send_delba(struct net_device *dev, const u8 *da, u16 tid, 843static int ieee80211_sta_wep_configured(struct ieee80211_sub_if_data *sdata)
1501 u16 initiator, u16 reason_code)
1502{ 844{
1503 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 845 if (!sdata || !sdata->default_key ||
1504 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 846 sdata->default_key->conf.alg != ALG_WEP)
1505 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 847 return 0;
1506 struct sk_buff *skb; 848 return 1;
1507 struct ieee80211_mgmt *mgmt; 849}
1508 u16 params;
1509
1510 skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
1511
1512 if (!skb) {
1513 printk(KERN_ERR "%s: failed to allocate buffer "
1514 "for delba frame\n", dev->name);
1515 return;
1516 }
1517 850
1518 skb_reserve(skb, local->hw.extra_tx_headroom); 851static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata,
1519 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24); 852 struct ieee80211_if_sta *ifsta)
1520 memset(mgmt, 0, 24); 853{
1521 memcpy(mgmt->da, da, ETH_ALEN); 854 struct ieee80211_local *local = sdata->local;
1522 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 855 struct ieee80211_bss *bss;
1523 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) 856 int bss_privacy;
1524 memcpy(mgmt->bssid, dev->dev_addr, ETH_ALEN); 857 int wep_privacy;
1525 else 858 int privacy_invoked;
1526 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
1527 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1528 IEEE80211_STYPE_ACTION);
1529 859
1530 skb_put(skb, 1 + sizeof(mgmt->u.action.u.delba)); 860 if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
861 return 0;
1531 862
1532 mgmt->u.action.category = WLAN_CATEGORY_BACK; 863 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
1533 mgmt->u.action.u.delba.action_code = WLAN_ACTION_DELBA; 864 local->hw.conf.channel->center_freq,
1534 params = (u16)(initiator << 11); /* bit 11 initiator */ 865 ifsta->ssid, ifsta->ssid_len);
1535 params |= (u16)(tid << 12); /* bit 15:12 TID number */ 866 if (!bss)
867 return 0;
1536 868
1537 mgmt->u.action.u.delba.params = cpu_to_le16(params); 869 bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY);
1538 mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code); 870 wep_privacy = !!ieee80211_sta_wep_configured(sdata);
871 privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
1539 872
1540 ieee80211_sta_tx(dev, skb, 0); 873 ieee80211_rx_bss_put(local, bss);
1541}
1542 874
1543void ieee80211_send_bar(struct net_device *dev, u8 *ra, u16 tid, u16 ssn) 875 if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked))
1544{ 876 return 0;
1545 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1546 struct sk_buff *skb;
1547 struct ieee80211_bar *bar;
1548 u16 bar_control = 0;
1549 877
1550 skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom); 878 return 1;
1551 if (!skb) {
1552 printk(KERN_ERR "%s: failed to allocate buffer for "
1553 "bar frame\n", dev->name);
1554 return;
1555 }
1556 skb_reserve(skb, local->hw.extra_tx_headroom);
1557 bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
1558 memset(bar, 0, sizeof(*bar));
1559 bar->frame_control = IEEE80211_FC(IEEE80211_FTYPE_CTL,
1560 IEEE80211_STYPE_BACK_REQ);
1561 memcpy(bar->ra, ra, ETH_ALEN);
1562 memcpy(bar->ta, dev->dev_addr, ETH_ALEN);
1563 bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
1564 bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
1565 bar_control |= (u16)(tid << 12);
1566 bar->control = cpu_to_le16(bar_control);
1567 bar->start_seq_num = cpu_to_le16(ssn);
1568
1569 ieee80211_sta_tx(dev, skb, 0);
1570} 879}
1571 880
1572void ieee80211_sta_stop_rx_ba_session(struct net_device *dev, u8 *ra, u16 tid, 881static void ieee80211_associate(struct ieee80211_sub_if_data *sdata,
1573 u16 initiator, u16 reason) 882 struct ieee80211_if_sta *ifsta)
1574{ 883{
1575 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1576 struct ieee80211_hw *hw = &local->hw;
1577 struct sta_info *sta;
1578 int ret, i;
1579 DECLARE_MAC_BUF(mac); 884 DECLARE_MAC_BUF(mac);
1580 885
1581 rcu_read_lock(); 886 ifsta->assoc_tries++;
1582 887 if (ifsta->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
1583 sta = sta_info_get(local, ra); 888 printk(KERN_DEBUG "%s: association with AP %s"
1584 if (!sta) { 889 " timed out\n",
1585 rcu_read_unlock(); 890 sdata->dev->name, print_mac(mac, ifsta->bssid));
891 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1586 return; 892 return;
1587 } 893 }
1588 894
1589 /* check if TID is in operational state */ 895 ifsta->state = IEEE80211_STA_MLME_ASSOCIATE;
1590 spin_lock_bh(&sta->lock); 896 printk(KERN_DEBUG "%s: associate with AP %s\n",
1591 if (sta->ampdu_mlme.tid_state_rx[tid] 897 sdata->dev->name, print_mac(mac, ifsta->bssid));
1592 != HT_AGG_STATE_OPERATIONAL) { 898 if (ieee80211_privacy_mismatch(sdata, ifsta)) {
1593 spin_unlock_bh(&sta->lock); 899 printk(KERN_DEBUG "%s: mismatch in privacy configuration and "
1594 rcu_read_unlock(); 900 "mixed-cell disabled - abort association\n", sdata->dev->name);
901 ifsta->state = IEEE80211_STA_MLME_DISABLED;
1595 return; 902 return;
1596 } 903 }
1597 sta->ampdu_mlme.tid_state_rx[tid] =
1598 HT_AGG_STATE_REQ_STOP_BA_MSK |
1599 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
1600 spin_unlock_bh(&sta->lock);
1601
1602 /* stop HW Rx aggregation. ampdu_action existence
1603 * already verified in session init so we add the BUG_ON */
1604 BUG_ON(!local->ops->ampdu_action);
1605
1606#ifdef CONFIG_MAC80211_HT_DEBUG
1607 printk(KERN_DEBUG "Rx BA session stop requested for %s tid %u\n",
1608 print_mac(mac, ra), tid);
1609#endif /* CONFIG_MAC80211_HT_DEBUG */
1610
1611 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_RX_STOP,
1612 ra, tid, NULL);
1613 if (ret)
1614 printk(KERN_DEBUG "HW problem - can not stop rx "
1615 "aggregation for tid %d\n", tid);
1616
1617 /* shutdown timer has not expired */
1618 if (initiator != WLAN_BACK_TIMER)
1619 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
1620
1621 /* check if this is a self generated aggregation halt */
1622 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER)
1623 ieee80211_send_delba(dev, ra, tid, 0, reason);
1624
1625 /* free the reordering buffer */
1626 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) {
1627 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) {
1628 /* release the reordered frames */
1629 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]);
1630 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--;
1631 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL;
1632 }
1633 }
1634 /* free resources */
1635 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf);
1636 kfree(sta->ampdu_mlme.tid_rx[tid]);
1637 sta->ampdu_mlme.tid_rx[tid] = NULL;
1638 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
1639 904
1640 rcu_read_unlock(); 905 ieee80211_send_assoc(sdata, ifsta);
906
907 mod_timer(&ifsta->timer, jiffies + IEEE80211_ASSOC_TIMEOUT);
1641} 908}
1642 909
1643 910
1644static void ieee80211_sta_process_delba(struct net_device *dev, 911static void ieee80211_associated(struct ieee80211_sub_if_data *sdata,
1645 struct ieee80211_mgmt *mgmt, size_t len) 912 struct ieee80211_if_sta *ifsta)
1646{ 913{
1647 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 914 struct ieee80211_local *local = sdata->local;
1648 struct sta_info *sta; 915 struct sta_info *sta;
1649 u16 tid, params; 916 int disassoc;
1650 u16 initiator;
1651 DECLARE_MAC_BUF(mac); 917 DECLARE_MAC_BUF(mac);
1652 918
1653 rcu_read_lock(); 919 /* TODO: start monitoring current AP signal quality and number of
1654 920 * missed beacons. Scan other channels every now and then and search
1655 sta = sta_info_get(local, mgmt->sa); 921 * for better APs. */
1656 if (!sta) { 922 /* TODO: remove expired BSSes */
1657 rcu_read_unlock();
1658 return;
1659 }
1660
1661 params = le16_to_cpu(mgmt->u.action.u.delba.params);
1662 tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
1663 initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
1664
1665#ifdef CONFIG_MAC80211_HT_DEBUG
1666 if (net_ratelimit())
1667 printk(KERN_DEBUG "delba from %s (%s) tid %d reason code %d\n",
1668 print_mac(mac, mgmt->sa),
1669 initiator ? "initiator" : "recipient", tid,
1670 mgmt->u.action.u.delba.reason_code);
1671#endif /* CONFIG_MAC80211_HT_DEBUG */
1672
1673 if (initiator == WLAN_BACK_INITIATOR)
1674 ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid,
1675 WLAN_BACK_INITIATOR, 0);
1676 else { /* WLAN_BACK_RECIPIENT */
1677 spin_lock_bh(&sta->lock);
1678 sta->ampdu_mlme.tid_state_tx[tid] =
1679 HT_AGG_STATE_OPERATIONAL;
1680 spin_unlock_bh(&sta->lock);
1681 ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
1682 WLAN_BACK_RECIPIENT);
1683 }
1684 rcu_read_unlock();
1685}
1686 923
1687/* 924 ifsta->state = IEEE80211_STA_MLME_ASSOCIATED;
1688 * After sending add Block Ack request we activated a timer until
1689 * add Block Ack response will arrive from the recipient.
1690 * If this timer expires sta_addba_resp_timer_expired will be executed.
1691 */
1692void sta_addba_resp_timer_expired(unsigned long data)
1693{
1694 /* not an elegant detour, but there is no choice as the timer passes
1695 * only one argument, and both sta_info and TID are needed, so init
1696 * flow in sta_info_create gives the TID as data, while the timer_to_id
1697 * array gives the sta through container_of */
1698 u16 tid = *(u8 *)data;
1699 struct sta_info *temp_sta = container_of((void *)data,
1700 struct sta_info, timer_to_tid[tid]);
1701
1702 struct ieee80211_local *local = temp_sta->local;
1703 struct ieee80211_hw *hw = &local->hw;
1704 struct sta_info *sta;
1705 u8 *state;
1706 925
1707 rcu_read_lock(); 926 rcu_read_lock();
1708 927
1709 sta = sta_info_get(local, temp_sta->addr); 928 sta = sta_info_get(local, ifsta->bssid);
1710 if (!sta) { 929 if (!sta) {
1711 rcu_read_unlock(); 930 printk(KERN_DEBUG "%s: No STA entry for own AP %s\n",
1712 return; 931 sdata->dev->name, print_mac(mac, ifsta->bssid));
1713 } 932 disassoc = 1;
1714 933 } else {
1715 state = &sta->ampdu_mlme.tid_state_tx[tid]; 934 disassoc = 0;
1716 /* check if the TID waits for addBA response */ 935 if (time_after(jiffies,
1717 spin_lock_bh(&sta->lock); 936 sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
1718 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 937 if (ifsta->flags & IEEE80211_STA_PROBEREQ_POLL) {
1719 spin_unlock_bh(&sta->lock); 938 printk(KERN_DEBUG "%s: No ProbeResp from "
1720 *state = HT_AGG_STATE_IDLE; 939 "current AP %s - assume out of "
1721#ifdef CONFIG_MAC80211_HT_DEBUG 940 "range\n",
1722 printk(KERN_DEBUG "timer expired on tid %d but we are not " 941 sdata->dev->name, print_mac(mac, ifsta->bssid));
1723 "expecting addBA response there", tid); 942 disassoc = 1;
1724#endif 943 } else
1725 goto timer_expired_exit; 944 ieee80211_send_probe_req(sdata, ifsta->bssid,
945 ifsta->ssid,
946 ifsta->ssid_len);
947 ifsta->flags ^= IEEE80211_STA_PROBEREQ_POLL;
948 } else {
949 ifsta->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
950 if (time_after(jiffies, ifsta->last_probe +
951 IEEE80211_PROBE_INTERVAL)) {
952 ifsta->last_probe = jiffies;
953 ieee80211_send_probe_req(sdata, ifsta->bssid,
954 ifsta->ssid,
955 ifsta->ssid_len);
956 }
957 }
1726 } 958 }
1727 959
1728#ifdef CONFIG_MAC80211_HT_DEBUG
1729 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
1730#endif
1731
1732 /* go through the state check in stop_BA_session */
1733 *state = HT_AGG_STATE_OPERATIONAL;
1734 spin_unlock_bh(&sta->lock);
1735 ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid,
1736 WLAN_BACK_INITIATOR);
1737
1738timer_expired_exit:
1739 rcu_read_unlock(); 960 rcu_read_unlock();
1740}
1741 961
1742/* 962 if (disassoc)
1743 * After accepting the AddBA Request we activated a timer, 963 ieee80211_set_disassoc(sdata, ifsta, true, true,
1744 * resetting it after each frame that arrives from the originator. 964 WLAN_REASON_PREV_AUTH_NOT_VALID);
1745 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed. 965 else
1746 */ 966 mod_timer(&ifsta->timer, jiffies +
1747static void sta_rx_agg_session_timer_expired(unsigned long data) 967 IEEE80211_MONITORING_INTERVAL);
1748{
1749 /* not an elegant detour, but there is no choice as the timer passes
1750 * only one argument, and various sta_info are needed here, so init
1751 * flow in sta_info_create gives the TID as data, while the timer_to_id
1752 * array gives the sta through container_of */
1753 u8 *ptid = (u8 *)data;
1754 u8 *timer_to_id = ptid - *ptid;
1755 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
1756 timer_to_tid[0]);
1757
1758#ifdef CONFIG_MAC80211_HT_DEBUG
1759 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
1760#endif
1761 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr,
1762 (u16)*ptid, WLAN_BACK_TIMER,
1763 WLAN_REASON_QSTA_TIMEOUT);
1764} 968}
1765 969
1766void ieee80211_sta_tear_down_BA_sessions(struct net_device *dev, u8 *addr)
1767{
1768 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1769 int i;
1770 970
1771 for (i = 0; i < STA_TID_NUM; i++) { 971static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
1772 ieee80211_stop_tx_ba_session(&local->hw, addr, i, 972 struct ieee80211_if_sta *ifsta)
1773 WLAN_BACK_INITIATOR); 973{
1774 ieee80211_sta_stop_rx_ba_session(dev, addr, i, 974 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
1775 WLAN_BACK_RECIPIENT, 975 ifsta->flags |= IEEE80211_STA_AUTHENTICATED;
1776 WLAN_REASON_QSTA_LEAVE_QBSS); 976 ieee80211_associate(sdata, ifsta);
1777 }
1778} 977}
1779 978
1780static void ieee80211_send_refuse_measurement_request(struct net_device *dev,
1781 struct ieee80211_msrment_ie *request_ie,
1782 const u8 *da, const u8 *bssid,
1783 u8 dialog_token)
1784{
1785 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1786 struct sk_buff *skb;
1787 struct ieee80211_mgmt *msr_report;
1788 979
1789 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom + 980static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
1790 sizeof(struct ieee80211_msrment_ie)); 981 struct ieee80211_if_sta *ifsta,
982 struct ieee80211_mgmt *mgmt,
983 size_t len)
984{
985 u8 *pos;
986 struct ieee802_11_elems elems;
1791 987
1792 if (!skb) { 988 pos = mgmt->u.auth.variable;
1793 printk(KERN_ERR "%s: failed to allocate buffer for " 989 ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
1794 "measurement report frame\n", dev->name); 990 if (!elems.challenge)
1795 return; 991 return;
1796 } 992 ieee80211_send_auth(sdata, ifsta, 3, elems.challenge - 2,
1797 993 elems.challenge_len + 2, 1);
1798 skb_reserve(skb, local->hw.extra_tx_headroom);
1799 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
1800 memset(msr_report, 0, 24);
1801 memcpy(msr_report->da, da, ETH_ALEN);
1802 memcpy(msr_report->sa, dev->dev_addr, ETH_ALEN);
1803 memcpy(msr_report->bssid, bssid, ETH_ALEN);
1804 msr_report->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT,
1805 IEEE80211_STYPE_ACTION);
1806
1807 skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
1808 msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
1809 msr_report->u.action.u.measurement.action_code =
1810 WLAN_ACTION_SPCT_MSR_RPRT;
1811 msr_report->u.action.u.measurement.dialog_token = dialog_token;
1812
1813 msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
1814 msr_report->u.action.u.measurement.length =
1815 sizeof(struct ieee80211_msrment_ie);
1816
1817 memset(&msr_report->u.action.u.measurement.msr_elem, 0,
1818 sizeof(struct ieee80211_msrment_ie));
1819 msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
1820 msr_report->u.action.u.measurement.msr_elem.mode |=
1821 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
1822 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
1823
1824 ieee80211_sta_tx(dev, skb, 0);
1825}
1826
1827static void ieee80211_sta_process_measurement_req(struct net_device *dev,
1828 struct ieee80211_mgmt *mgmt,
1829 size_t len)
1830{
1831 /*
1832 * Ignoring measurement request is spec violation.
1833 * Mandatory measurements must be reported optional
1834 * measurements might be refused or reported incapable
1835 * For now just refuse
1836 * TODO: Answer basic measurement as unmeasured
1837 */
1838 ieee80211_send_refuse_measurement_request(dev,
1839 &mgmt->u.action.u.measurement.msr_elem,
1840 mgmt->sa, mgmt->bssid,
1841 mgmt->u.action.u.measurement.dialog_token);
1842} 994}
1843 995
1844 996static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1845static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1846 struct ieee80211_if_sta *ifsta, 997 struct ieee80211_if_sta *ifsta,
1847 struct ieee80211_mgmt *mgmt, 998 struct ieee80211_mgmt *mgmt,
1848 size_t len) 999 size_t len)
1849{ 1000{
1850 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1851 u16 auth_alg, auth_transaction, status_code; 1001 u16 auth_alg, auth_transaction, status_code;
1852 DECLARE_MAC_BUF(mac); 1002 DECLARE_MAC_BUF(mac);
1853 1003
1854 if (ifsta->state != IEEE80211_AUTHENTICATE && 1004 if (ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE &&
1855 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) 1005 sdata->vif.type != NL80211_IFTYPE_ADHOC)
1856 return; 1006 return;
1857 1007
1858 if (len < 24 + 6) 1008 if (len < 24 + 6)
1859 return; 1009 return;
1860 1010
1861 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1011 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1862 memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0) 1012 memcmp(ifsta->bssid, mgmt->sa, ETH_ALEN) != 0)
1863 return; 1013 return;
1864 1014
1865 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1015 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1866 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) 1016 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
1867 return; 1017 return;
1868 1018
@@ -1870,7 +1020,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1870 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 1020 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
1871 status_code = le16_to_cpu(mgmt->u.auth.status_code); 1021 status_code = le16_to_cpu(mgmt->u.auth.status_code);
1872 1022
1873 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 1023 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1874 /* 1024 /*
1875 * IEEE 802.11 standard does not require authentication in IBSS 1025 * IEEE 802.11 standard does not require authentication in IBSS
1876 * networks and most implementations do not seem to use it. 1026 * networks and most implementations do not seem to use it.
@@ -1879,7 +1029,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1879 */ 1029 */
1880 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1) 1030 if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
1881 return; 1031 return;
1882 ieee80211_send_auth(dev, ifsta, 2, NULL, 0, 0); 1032 ieee80211_send_auth(sdata, ifsta, 2, NULL, 0, 0);
1883 } 1033 }
1884 1034
1885 if (auth_alg != ifsta->auth_alg || 1035 if (auth_alg != ifsta->auth_alg ||
@@ -1912,7 +1062,7 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1912 algs[pos] == 0xff) 1062 algs[pos] == 0xff)
1913 continue; 1063 continue;
1914 if (algs[pos] == WLAN_AUTH_SHARED_KEY && 1064 if (algs[pos] == WLAN_AUTH_SHARED_KEY &&
1915 !ieee80211_sta_wep_configured(dev)) 1065 !ieee80211_sta_wep_configured(sdata))
1916 continue; 1066 continue;
1917 ifsta->auth_alg = algs[pos]; 1067 ifsta->auth_alg = algs[pos];
1918 break; 1068 break;
@@ -1924,19 +1074,19 @@ static void ieee80211_rx_mgmt_auth(struct net_device *dev,
1924 switch (ifsta->auth_alg) { 1074 switch (ifsta->auth_alg) {
1925 case WLAN_AUTH_OPEN: 1075 case WLAN_AUTH_OPEN:
1926 case WLAN_AUTH_LEAP: 1076 case WLAN_AUTH_LEAP:
1927 ieee80211_auth_completed(dev, ifsta); 1077 ieee80211_auth_completed(sdata, ifsta);
1928 break; 1078 break;
1929 case WLAN_AUTH_SHARED_KEY: 1079 case WLAN_AUTH_SHARED_KEY:
1930 if (ifsta->auth_transaction == 4) 1080 if (ifsta->auth_transaction == 4)
1931 ieee80211_auth_completed(dev, ifsta); 1081 ieee80211_auth_completed(sdata, ifsta);
1932 else 1082 else
1933 ieee80211_auth_challenge(dev, ifsta, mgmt, len); 1083 ieee80211_auth_challenge(sdata, ifsta, mgmt, len);
1934 break; 1084 break;
1935 } 1085 }
1936} 1086}
1937 1087
1938 1088
1939static void ieee80211_rx_mgmt_deauth(struct net_device *dev, 1089static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1940 struct ieee80211_if_sta *ifsta, 1090 struct ieee80211_if_sta *ifsta,
1941 struct ieee80211_mgmt *mgmt, 1091 struct ieee80211_mgmt *mgmt,
1942 size_t len) 1092 size_t len)
@@ -1953,22 +1103,22 @@ static void ieee80211_rx_mgmt_deauth(struct net_device *dev,
1953 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 1103 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
1954 1104
1955 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) 1105 if (ifsta->flags & IEEE80211_STA_AUTHENTICATED)
1956 printk(KERN_DEBUG "%s: deauthenticated\n", dev->name); 1106 printk(KERN_DEBUG "%s: deauthenticated\n", sdata->dev->name);
1957 1107
1958 if (ifsta->state == IEEE80211_AUTHENTICATE || 1108 if (ifsta->state == IEEE80211_STA_MLME_AUTHENTICATE ||
1959 ifsta->state == IEEE80211_ASSOCIATE || 1109 ifsta->state == IEEE80211_STA_MLME_ASSOCIATE ||
1960 ifsta->state == IEEE80211_ASSOCIATED) { 1110 ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) {
1961 ifsta->state = IEEE80211_AUTHENTICATE; 1111 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
1962 mod_timer(&ifsta->timer, jiffies + 1112 mod_timer(&ifsta->timer, jiffies +
1963 IEEE80211_RETRY_AUTH_INTERVAL); 1113 IEEE80211_RETRY_AUTH_INTERVAL);
1964 } 1114 }
1965 1115
1966 ieee80211_set_disassoc(dev, ifsta, 1); 1116 ieee80211_set_disassoc(sdata, ifsta, true, false, 0);
1967 ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED; 1117 ifsta->flags &= ~IEEE80211_STA_AUTHENTICATED;
1968} 1118}
1969 1119
1970 1120
1971static void ieee80211_rx_mgmt_disassoc(struct net_device *dev, 1121static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1972 struct ieee80211_if_sta *ifsta, 1122 struct ieee80211_if_sta *ifsta,
1973 struct ieee80211_mgmt *mgmt, 1123 struct ieee80211_mgmt *mgmt,
1974 size_t len) 1124 size_t len)
@@ -1985,15 +1135,15 @@ static void ieee80211_rx_mgmt_disassoc(struct net_device *dev,
1985 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 1135 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
1986 1136
1987 if (ifsta->flags & IEEE80211_STA_ASSOCIATED) 1137 if (ifsta->flags & IEEE80211_STA_ASSOCIATED)
1988 printk(KERN_DEBUG "%s: disassociated\n", dev->name); 1138 printk(KERN_DEBUG "%s: disassociated\n", sdata->dev->name);
1989 1139
1990 if (ifsta->state == IEEE80211_ASSOCIATED) { 1140 if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED) {
1991 ifsta->state = IEEE80211_ASSOCIATE; 1141 ifsta->state = IEEE80211_STA_MLME_ASSOCIATE;
1992 mod_timer(&ifsta->timer, jiffies + 1142 mod_timer(&ifsta->timer, jiffies +
1993 IEEE80211_RETRY_AUTH_INTERVAL); 1143 IEEE80211_RETRY_AUTH_INTERVAL);
1994 } 1144 }
1995 1145
1996 ieee80211_set_disassoc(dev, ifsta, 0); 1146 ieee80211_set_disassoc(sdata, ifsta, false, false, 0);
1997} 1147}
1998 1148
1999 1149
@@ -2004,7 +1154,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2004 int reassoc) 1154 int reassoc)
2005{ 1155{
2006 struct ieee80211_local *local = sdata->local; 1156 struct ieee80211_local *local = sdata->local;
2007 struct net_device *dev = sdata->dev;
2008 struct ieee80211_supported_band *sband; 1157 struct ieee80211_supported_band *sband;
2009 struct sta_info *sta; 1158 struct sta_info *sta;
2010 u64 rates, basic_rates; 1159 u64 rates, basic_rates;
@@ -2019,7 +1168,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2019 /* AssocResp and ReassocResp have identical structure, so process both 1168 /* AssocResp and ReassocResp have identical structure, so process both
2020 * of them in this function. */ 1169 * of them in this function. */
2021 1170
2022 if (ifsta->state != IEEE80211_ASSOCIATE) 1171 if (ifsta->state != IEEE80211_STA_MLME_ASSOCIATE)
2023 return; 1172 return;
2024 1173
2025 if (len < 24 + 6) 1174 if (len < 24 + 6)
@@ -2034,12 +1183,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2034 1183
2035 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x " 1184 printk(KERN_DEBUG "%s: RX %sssocResp from %s (capab=0x%x "
2036 "status=%d aid=%d)\n", 1185 "status=%d aid=%d)\n",
2037 dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa), 1186 sdata->dev->name, reassoc ? "Rea" : "A", print_mac(mac, mgmt->sa),
2038 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14)))); 1187 capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
2039 1188
2040 if (status_code != WLAN_STATUS_SUCCESS) { 1189 if (status_code != WLAN_STATUS_SUCCESS) {
2041 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", 1190 printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
2042 dev->name, status_code); 1191 sdata->dev->name, status_code);
2043 /* if this was a reassociation, ensure we try a "full" 1192 /* if this was a reassociation, ensure we try a "full"
2044 * association next time. This works around some broken APs 1193 * association next time. This works around some broken APs
2045 * which do not correctly reject reassociation requests. */ 1194 * which do not correctly reject reassociation requests. */
@@ -2049,7 +1198,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2049 1198
2050 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) 1199 if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
2051 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not " 1200 printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
2052 "set\n", dev->name, aid); 1201 "set\n", sdata->dev->name, aid);
2053 aid &= ~(BIT(15) | BIT(14)); 1202 aid &= ~(BIT(15) | BIT(14));
2054 1203
2055 pos = mgmt->u.assoc_resp.variable; 1204 pos = mgmt->u.assoc_resp.variable;
@@ -2057,11 +1206,11 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2057 1206
2058 if (!elems.supp_rates) { 1207 if (!elems.supp_rates) {
2059 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n", 1208 printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
2060 dev->name); 1209 sdata->dev->name);
2061 return; 1210 return;
2062 } 1211 }
2063 1212
2064 printk(KERN_DEBUG "%s: associated\n", dev->name); 1213 printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
2065 ifsta->aid = aid; 1214 ifsta->aid = aid;
2066 ifsta->ap_capab = capab_info; 1215 ifsta->ap_capab = capab_info;
2067 1216
@@ -2076,17 +1225,17 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2076 /* Add STA entry for the AP */ 1225 /* Add STA entry for the AP */
2077 sta = sta_info_get(local, ifsta->bssid); 1226 sta = sta_info_get(local, ifsta->bssid);
2078 if (!sta) { 1227 if (!sta) {
2079 struct ieee80211_sta_bss *bss; 1228 struct ieee80211_bss *bss;
2080 int err; 1229 int err;
2081 1230
2082 sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC); 1231 sta = sta_info_alloc(sdata, ifsta->bssid, GFP_ATOMIC);
2083 if (!sta) { 1232 if (!sta) {
2084 printk(KERN_DEBUG "%s: failed to alloc STA entry for" 1233 printk(KERN_DEBUG "%s: failed to alloc STA entry for"
2085 " the AP\n", dev->name); 1234 " the AP\n", sdata->dev->name);
2086 rcu_read_unlock(); 1235 rcu_read_unlock();
2087 return; 1236 return;
2088 } 1237 }
2089 bss = ieee80211_rx_bss_get(dev, ifsta->bssid, 1238 bss = ieee80211_rx_bss_get(local, ifsta->bssid,
2090 local->hw.conf.channel->center_freq, 1239 local->hw.conf.channel->center_freq,
2091 ifsta->ssid, ifsta->ssid_len); 1240 ifsta->ssid, ifsta->ssid_len);
2092 if (bss) { 1241 if (bss) {
@@ -2099,7 +1248,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2099 err = sta_info_insert(sta); 1248 err = sta_info_insert(sta);
2100 if (err) { 1249 if (err) {
2101 printk(KERN_DEBUG "%s: failed to insert STA entry for" 1250 printk(KERN_DEBUG "%s: failed to insert STA entry for"
2102 " the AP (error %d)\n", dev->name, err); 1251 " the AP (error %d)\n", sdata->dev->name, err);
2103 rcu_read_unlock(); 1252 rcu_read_unlock();
2104 return; 1253 return;
2105 } 1254 }
@@ -2152,8 +1301,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2152 } 1301 }
2153 } 1302 }
2154 1303
2155 sta->supp_rates[local->hw.conf.channel->band] = rates; 1304 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
2156 sdata->basic_rates = basic_rates; 1305 sdata->bss_conf.basic_rates = basic_rates;
2157 1306
2158 /* cf. IEEE 802.11 9.2.12 */ 1307 /* cf. IEEE 802.11 9.2.12 */
2159 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && 1308 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
@@ -2167,19 +1316,19 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2167 struct ieee80211_ht_bss_info bss_info; 1316 struct ieee80211_ht_bss_info bss_info;
2168 ieee80211_ht_cap_ie_to_ht_info( 1317 ieee80211_ht_cap_ie_to_ht_info(
2169 (struct ieee80211_ht_cap *) 1318 (struct ieee80211_ht_cap *)
2170 elems.ht_cap_elem, &sta->ht_info); 1319 elems.ht_cap_elem, &sta->sta.ht_info);
2171 ieee80211_ht_addt_info_ie_to_ht_bss_info( 1320 ieee80211_ht_addt_info_ie_to_ht_bss_info(
2172 (struct ieee80211_ht_addt_info *) 1321 (struct ieee80211_ht_addt_info *)
2173 elems.ht_info_elem, &bss_info); 1322 elems.ht_info_elem, &bss_info);
2174 ieee80211_handle_ht(local, 1, &sta->ht_info, &bss_info); 1323 ieee80211_handle_ht(local, 1, &sta->sta.ht_info, &bss_info);
2175 } 1324 }
2176 1325
2177 rate_control_rate_init(sta, local); 1326 rate_control_rate_init(sta);
2178 1327
2179 if (elems.wmm_param) { 1328 if (elems.wmm_param) {
2180 set_sta_flags(sta, WLAN_STA_WME); 1329 set_sta_flags(sta, WLAN_STA_WME);
2181 rcu_read_unlock(); 1330 rcu_read_unlock();
2182 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 1331 ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param,
2183 elems.wmm_param_len); 1332 elems.wmm_param_len);
2184 } else 1333 } else
2185 rcu_read_unlock(); 1334 rcu_read_unlock();
@@ -2188,234 +1337,26 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
2188 * ieee80211_set_associated() will tell the driver */ 1337 * ieee80211_set_associated() will tell the driver */
2189 bss_conf->aid = aid; 1338 bss_conf->aid = aid;
2190 bss_conf->assoc_capability = capab_info; 1339 bss_conf->assoc_capability = capab_info;
2191 ieee80211_set_associated(dev, ifsta, 1); 1340 ieee80211_set_associated(sdata, ifsta);
2192 1341
2193 ieee80211_associated(dev, ifsta); 1342 ieee80211_associated(sdata, ifsta);
2194} 1343}
2195 1344
2196 1345
2197/* Caller must hold local->sta_bss_lock */ 1346static int ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
2198static void __ieee80211_rx_bss_hash_add(struct net_device *dev,
2199 struct ieee80211_sta_bss *bss)
2200{
2201 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2202 u8 hash_idx;
2203
2204 if (bss_mesh_cfg(bss))
2205 hash_idx = mesh_id_hash(bss_mesh_id(bss),
2206 bss_mesh_id_len(bss));
2207 else
2208 hash_idx = STA_HASH(bss->bssid);
2209
2210 bss->hnext = local->sta_bss_hash[hash_idx];
2211 local->sta_bss_hash[hash_idx] = bss;
2212}
2213
2214
2215/* Caller must hold local->sta_bss_lock */
2216static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local,
2217 struct ieee80211_sta_bss *bss)
2218{
2219 struct ieee80211_sta_bss *b, *prev = NULL;
2220 b = local->sta_bss_hash[STA_HASH(bss->bssid)];
2221 while (b) {
2222 if (b == bss) {
2223 if (!prev)
2224 local->sta_bss_hash[STA_HASH(bss->bssid)] =
2225 bss->hnext;
2226 else
2227 prev->hnext = bss->hnext;
2228 break;
2229 }
2230 prev = b;
2231 b = b->hnext;
2232 }
2233}
2234
2235
2236static struct ieee80211_sta_bss *
2237ieee80211_rx_bss_add(struct net_device *dev, u8 *bssid, int freq,
2238 u8 *ssid, u8 ssid_len)
2239{
2240 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2241 struct ieee80211_sta_bss *bss;
2242
2243 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
2244 if (!bss)
2245 return NULL;
2246 atomic_inc(&bss->users);
2247 atomic_inc(&bss->users);
2248 memcpy(bss->bssid, bssid, ETH_ALEN);
2249 bss->freq = freq;
2250 if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) {
2251 memcpy(bss->ssid, ssid, ssid_len);
2252 bss->ssid_len = ssid_len;
2253 }
2254
2255 spin_lock_bh(&local->sta_bss_lock);
2256 /* TODO: order by RSSI? */
2257 list_add_tail(&bss->list, &local->sta_bss_list);
2258 __ieee80211_rx_bss_hash_add(dev, bss);
2259 spin_unlock_bh(&local->sta_bss_lock);
2260 return bss;
2261}
2262
2263static struct ieee80211_sta_bss *
2264ieee80211_rx_bss_get(struct net_device *dev, u8 *bssid, int freq,
2265 u8 *ssid, u8 ssid_len)
2266{
2267 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2268 struct ieee80211_sta_bss *bss;
2269
2270 spin_lock_bh(&local->sta_bss_lock);
2271 bss = local->sta_bss_hash[STA_HASH(bssid)];
2272 while (bss) {
2273 if (!bss_mesh_cfg(bss) &&
2274 !memcmp(bss->bssid, bssid, ETH_ALEN) &&
2275 bss->freq == freq &&
2276 bss->ssid_len == ssid_len &&
2277 (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) {
2278 atomic_inc(&bss->users);
2279 break;
2280 }
2281 bss = bss->hnext;
2282 }
2283 spin_unlock_bh(&local->sta_bss_lock);
2284 return bss;
2285}
2286
2287#ifdef CONFIG_MAC80211_MESH
2288static struct ieee80211_sta_bss *
2289ieee80211_rx_mesh_bss_get(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
2290 u8 *mesh_cfg, int freq)
2291{
2292 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2293 struct ieee80211_sta_bss *bss;
2294
2295 spin_lock_bh(&local->sta_bss_lock);
2296 bss = local->sta_bss_hash[mesh_id_hash(mesh_id, mesh_id_len)];
2297 while (bss) {
2298 if (bss_mesh_cfg(bss) &&
2299 !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) &&
2300 bss->freq == freq &&
2301 mesh_id_len == bss->mesh_id_len &&
2302 (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id,
2303 mesh_id_len))) {
2304 atomic_inc(&bss->users);
2305 break;
2306 }
2307 bss = bss->hnext;
2308 }
2309 spin_unlock_bh(&local->sta_bss_lock);
2310 return bss;
2311}
2312
2313static struct ieee80211_sta_bss *
2314ieee80211_rx_mesh_bss_add(struct net_device *dev, u8 *mesh_id, int mesh_id_len,
2315 u8 *mesh_cfg, int mesh_config_len, int freq)
2316{
2317 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2318 struct ieee80211_sta_bss *bss;
2319
2320 if (mesh_config_len != MESH_CFG_LEN)
2321 return NULL;
2322
2323 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
2324 if (!bss)
2325 return NULL;
2326
2327 bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC);
2328 if (!bss->mesh_cfg) {
2329 kfree(bss);
2330 return NULL;
2331 }
2332
2333 if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) {
2334 bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC);
2335 if (!bss->mesh_id) {
2336 kfree(bss->mesh_cfg);
2337 kfree(bss);
2338 return NULL;
2339 }
2340 memcpy(bss->mesh_id, mesh_id, mesh_id_len);
2341 }
2342
2343 atomic_inc(&bss->users);
2344 atomic_inc(&bss->users);
2345 memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN);
2346 bss->mesh_id_len = mesh_id_len;
2347 bss->freq = freq;
2348 spin_lock_bh(&local->sta_bss_lock);
2349 /* TODO: order by RSSI? */
2350 list_add_tail(&bss->list, &local->sta_bss_list);
2351 __ieee80211_rx_bss_hash_add(dev, bss);
2352 spin_unlock_bh(&local->sta_bss_lock);
2353 return bss;
2354}
2355#endif
2356
2357static void ieee80211_rx_bss_free(struct ieee80211_sta_bss *bss)
2358{
2359 kfree(bss->wpa_ie);
2360 kfree(bss->rsn_ie);
2361 kfree(bss->wmm_ie);
2362 kfree(bss->ht_ie);
2363 kfree(bss->ht_add_ie);
2364 kfree(bss_mesh_id(bss));
2365 kfree(bss_mesh_cfg(bss));
2366 kfree(bss);
2367}
2368
2369
2370static void ieee80211_rx_bss_put(struct ieee80211_local *local,
2371 struct ieee80211_sta_bss *bss)
2372{
2373 local_bh_disable();
2374 if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) {
2375 local_bh_enable();
2376 return;
2377 }
2378
2379 __ieee80211_rx_bss_hash_del(local, bss);
2380 list_del(&bss->list);
2381 spin_unlock_bh(&local->sta_bss_lock);
2382 ieee80211_rx_bss_free(bss);
2383}
2384
2385
2386void ieee80211_rx_bss_list_init(struct ieee80211_local *local)
2387{
2388 spin_lock_init(&local->sta_bss_lock);
2389 INIT_LIST_HEAD(&local->sta_bss_list);
2390}
2391
2392
2393void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
2394{
2395 struct ieee80211_sta_bss *bss, *tmp;
2396
2397 list_for_each_entry_safe(bss, tmp, &local->sta_bss_list, list)
2398 ieee80211_rx_bss_put(local, bss);
2399}
2400
2401
2402static int ieee80211_sta_join_ibss(struct net_device *dev,
2403 struct ieee80211_if_sta *ifsta, 1347 struct ieee80211_if_sta *ifsta,
2404 struct ieee80211_sta_bss *bss) 1348 struct ieee80211_bss *bss)
2405{ 1349{
2406 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1350 struct ieee80211_local *local = sdata->local;
2407 int res, rates, i, j; 1351 int res, rates, i, j;
2408 struct sk_buff *skb; 1352 struct sk_buff *skb;
2409 struct ieee80211_mgmt *mgmt; 1353 struct ieee80211_mgmt *mgmt;
2410 u8 *pos; 1354 u8 *pos;
2411 struct ieee80211_sub_if_data *sdata;
2412 struct ieee80211_supported_band *sband; 1355 struct ieee80211_supported_band *sband;
2413 union iwreq_data wrqu; 1356 union iwreq_data wrqu;
2414 1357
2415 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1358 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2416 1359
2417 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2418
2419 /* Remove possible STA entries from other IBSS networks. */ 1360 /* Remove possible STA entries from other IBSS networks. */
2420 sta_info_flush_delayed(sdata); 1361 sta_info_flush_delayed(sdata);
2421 1362
@@ -2433,7 +1374,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2433 sdata->drop_unencrypted = bss->capability & 1374 sdata->drop_unencrypted = bss->capability &
2434 WLAN_CAPABILITY_PRIVACY ? 1 : 0; 1375 WLAN_CAPABILITY_PRIVACY ? 1 : 0;
2435 1376
2436 res = ieee80211_set_freq(dev, bss->freq); 1377 res = ieee80211_set_freq(sdata, bss->freq);
2437 1378
2438 if (res) 1379 if (res)
2439 return res; 1380 return res;
@@ -2446,10 +1387,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2446 mgmt = (struct ieee80211_mgmt *) 1387 mgmt = (struct ieee80211_mgmt *)
2447 skb_put(skb, 24 + sizeof(mgmt->u.beacon)); 1388 skb_put(skb, 24 + sizeof(mgmt->u.beacon));
2448 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); 1389 memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon));
2449 mgmt->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1390 mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2450 IEEE80211_STYPE_PROBE_RESP); 1391 IEEE80211_STYPE_PROBE_RESP);
2451 memset(mgmt->da, 0xff, ETH_ALEN); 1392 memset(mgmt->da, 0xff, ETH_ALEN);
2452 memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); 1393 memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
2453 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN); 1394 memcpy(mgmt->bssid, ifsta->bssid, ETH_ALEN);
2454 mgmt->u.beacon.beacon_int = 1395 mgmt->u.beacon.beacon_int =
2455 cpu_to_le16(local->hw.conf.beacon_int); 1396 cpu_to_le16(local->hw.conf.beacon_int);
@@ -2506,108 +1447,38 @@ static int ieee80211_sta_join_ibss(struct net_device *dev,
2506 } 1447 }
2507 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates; 1448 ifsta->supp_rates_bits[local->hw.conf.channel->band] = rates;
2508 1449
2509 ieee80211_sta_def_wmm_params(dev, bss, 1); 1450 ieee80211_sta_def_wmm_params(sdata, bss);
2510 1451
2511 ifsta->state = IEEE80211_IBSS_JOINED; 1452 ifsta->state = IEEE80211_STA_MLME_IBSS_JOINED;
2512 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 1453 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
2513 1454
1455 ieee80211_led_assoc(local, true);
1456
2514 memset(&wrqu, 0, sizeof(wrqu)); 1457 memset(&wrqu, 0, sizeof(wrqu));
2515 memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); 1458 memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN);
2516 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 1459 wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
2517 1460
2518 return res; 1461 return res;
2519} 1462}
2520 1463
2521u64 ieee80211_sta_get_rates(struct ieee80211_local *local, 1464static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2522 struct ieee802_11_elems *elems,
2523 enum ieee80211_band band)
2524{
2525 struct ieee80211_supported_band *sband;
2526 struct ieee80211_rate *bitrates;
2527 size_t num_rates;
2528 u64 supp_rates;
2529 int i, j;
2530 sband = local->hw.wiphy->bands[band];
2531
2532 if (!sband) {
2533 WARN_ON(1);
2534 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2535 }
2536
2537 bitrates = sband->bitrates;
2538 num_rates = sband->n_bitrates;
2539 supp_rates = 0;
2540 for (i = 0; i < elems->supp_rates_len +
2541 elems->ext_supp_rates_len; i++) {
2542 u8 rate = 0;
2543 int own_rate;
2544 if (i < elems->supp_rates_len)
2545 rate = elems->supp_rates[i];
2546 else if (elems->ext_supp_rates)
2547 rate = elems->ext_supp_rates
2548 [i - elems->supp_rates_len];
2549 own_rate = 5 * (rate & 0x7f);
2550 for (j = 0; j < num_rates; j++)
2551 if (bitrates[j].bitrate == own_rate)
2552 supp_rates |= BIT(j);
2553 }
2554 return supp_rates;
2555}
2556
2557
2558static void ieee80211_rx_bss_info(struct net_device *dev,
2559 struct ieee80211_mgmt *mgmt, 1465 struct ieee80211_mgmt *mgmt,
2560 size_t len, 1466 size_t len,
2561 struct ieee80211_rx_status *rx_status, 1467 struct ieee80211_rx_status *rx_status,
2562 struct ieee802_11_elems *elems, 1468 struct ieee802_11_elems *elems,
2563 int beacon) 1469 bool beacon)
2564{ 1470{
2565 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1471 struct ieee80211_local *local = sdata->local;
2566 int freq, clen; 1472 int freq;
2567 struct ieee80211_sta_bss *bss; 1473 struct ieee80211_bss *bss;
2568 struct sta_info *sta; 1474 struct sta_info *sta;
2569 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2570 u64 beacon_timestamp, rx_timestamp;
2571 struct ieee80211_channel *channel; 1475 struct ieee80211_channel *channel;
1476 u64 beacon_timestamp, rx_timestamp;
1477 u64 supp_rates = 0;
1478 enum ieee80211_band band = rx_status->band;
2572 DECLARE_MAC_BUF(mac); 1479 DECLARE_MAC_BUF(mac);
2573 DECLARE_MAC_BUF(mac2); 1480 DECLARE_MAC_BUF(mac2);
2574 1481
2575 if (!beacon && memcmp(mgmt->da, dev->dev_addr, ETH_ALEN))
2576 return; /* ignore ProbeResp to foreign address */
2577
2578 beacon_timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
2579
2580 if (ieee80211_vif_is_mesh(&sdata->vif) && elems->mesh_id &&
2581 elems->mesh_config && mesh_matches_local(elems, dev)) {
2582 u64 rates = ieee80211_sta_get_rates(local, elems,
2583 rx_status->band);
2584
2585 mesh_neighbour_update(mgmt->sa, rates, dev,
2586 mesh_peer_accepts_plinks(elems, dev));
2587 }
2588
2589 rcu_read_lock();
2590
2591 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && elems->supp_rates &&
2592 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0 &&
2593 (sta = sta_info_get(local, mgmt->sa))) {
2594 u64 prev_rates;
2595 u64 supp_rates = ieee80211_sta_get_rates(local, elems,
2596 rx_status->band);
2597
2598 prev_rates = sta->supp_rates[rx_status->band];
2599 sta->supp_rates[rx_status->band] &= supp_rates;
2600 if (sta->supp_rates[rx_status->band] == 0) {
2601 /* No matching rates - this should not really happen.
2602 * Make sure that at least one rate is marked
2603 * supported to avoid issues with TX rate ctrl. */
2604 sta->supp_rates[rx_status->band] =
2605 sdata->u.sta.supp_rates_bits[rx_status->band];
2606 }
2607 }
2608
2609 rcu_read_unlock();
2610
2611 if (elems->ds_params && elems->ds_params_len == 1) 1482 if (elems->ds_params && elems->ds_params_len == 1)
2612 freq = ieee80211_channel_to_frequency(elems->ds_params[0]); 1483 freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
2613 else 1484 else
@@ -2618,215 +1489,60 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2618 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) 1489 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
2619 return; 1490 return;
2620 1491
2621#ifdef CONFIG_MAC80211_MESH 1492 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && elems->supp_rates &&
2622 if (elems->mesh_config) 1493 memcmp(mgmt->bssid, sdata->u.sta.bssid, ETH_ALEN) == 0) {
2623 bss = ieee80211_rx_mesh_bss_get(dev, elems->mesh_id, 1494 supp_rates = ieee80211_sta_get_rates(local, elems, band);
2624 elems->mesh_id_len, elems->mesh_config, freq);
2625 else
2626#endif
2627 bss = ieee80211_rx_bss_get(dev, mgmt->bssid, freq,
2628 elems->ssid, elems->ssid_len);
2629 if (!bss) {
2630#ifdef CONFIG_MAC80211_MESH
2631 if (elems->mesh_config)
2632 bss = ieee80211_rx_mesh_bss_add(dev, elems->mesh_id,
2633 elems->mesh_id_len, elems->mesh_config,
2634 elems->mesh_config_len, freq);
2635 else
2636#endif
2637 bss = ieee80211_rx_bss_add(dev, mgmt->bssid, freq,
2638 elems->ssid, elems->ssid_len);
2639 if (!bss)
2640 return;
2641 } else {
2642#if 0
2643 /* TODO: order by RSSI? */
2644 spin_lock_bh(&local->sta_bss_lock);
2645 list_move_tail(&bss->list, &local->sta_bss_list);
2646 spin_unlock_bh(&local->sta_bss_lock);
2647#endif
2648 }
2649 1495
2650 /* save the ERP value so that it is available at association time */ 1496 rcu_read_lock();
2651 if (elems->erp_info && elems->erp_info_len >= 1) {
2652 bss->erp_value = elems->erp_info[0];
2653 bss->has_erp_value = 1;
2654 }
2655
2656 if (elems->ht_cap_elem &&
2657 (!bss->ht_ie || bss->ht_ie_len != elems->ht_cap_elem_len ||
2658 memcmp(bss->ht_ie, elems->ht_cap_elem, elems->ht_cap_elem_len))) {
2659 kfree(bss->ht_ie);
2660 bss->ht_ie = kmalloc(elems->ht_cap_elem_len + 2, GFP_ATOMIC);
2661 if (bss->ht_ie) {
2662 memcpy(bss->ht_ie, elems->ht_cap_elem - 2,
2663 elems->ht_cap_elem_len + 2);
2664 bss->ht_ie_len = elems->ht_cap_elem_len + 2;
2665 } else
2666 bss->ht_ie_len = 0;
2667 } else if (!elems->ht_cap_elem && bss->ht_ie) {
2668 kfree(bss->ht_ie);
2669 bss->ht_ie = NULL;
2670 bss->ht_ie_len = 0;
2671 }
2672 1497
2673 if (elems->ht_info_elem && 1498 sta = sta_info_get(local, mgmt->sa);
2674 (!bss->ht_add_ie || 1499 if (sta) {
2675 bss->ht_add_ie_len != elems->ht_info_elem_len || 1500 u64 prev_rates;
2676 memcmp(bss->ht_add_ie, elems->ht_info_elem,
2677 elems->ht_info_elem_len))) {
2678 kfree(bss->ht_add_ie);
2679 bss->ht_add_ie =
2680 kmalloc(elems->ht_info_elem_len + 2, GFP_ATOMIC);
2681 if (bss->ht_add_ie) {
2682 memcpy(bss->ht_add_ie, elems->ht_info_elem - 2,
2683 elems->ht_info_elem_len + 2);
2684 bss->ht_add_ie_len = elems->ht_info_elem_len + 2;
2685 } else
2686 bss->ht_add_ie_len = 0;
2687 } else if (!elems->ht_info_elem && bss->ht_add_ie) {
2688 kfree(bss->ht_add_ie);
2689 bss->ht_add_ie = NULL;
2690 bss->ht_add_ie_len = 0;
2691 }
2692 1501
2693 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int); 1502 prev_rates = sta->sta.supp_rates[band];
2694 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info); 1503 /* make sure mandatory rates are always added */
1504 sta->sta.supp_rates[band] = supp_rates |
1505 ieee80211_mandatory_rates(local, band);
2695 1506
2696 if (elems->tim) { 1507#ifdef CONFIG_MAC80211_IBSS_DEBUG
2697 struct ieee80211_tim_ie *tim_ie = 1508 if (sta->sta.supp_rates[band] != prev_rates)
2698 (struct ieee80211_tim_ie *)elems->tim; 1509 printk(KERN_DEBUG "%s: updated supp_rates set "
2699 bss->dtim_period = tim_ie->dtim_period; 1510 "for %s based on beacon info (0x%llx | "
2700 } 1511 "0x%llx -> 0x%llx)\n",
1512 sdata->dev->name,
1513 print_mac(mac, sta->sta.addr),
1514 (unsigned long long) prev_rates,
1515 (unsigned long long) supp_rates,
1516 (unsigned long long) sta->sta.supp_rates[band]);
1517#endif
1518 } else {
1519 ieee80211_ibss_add_sta(sdata, NULL, mgmt->bssid,
1520 mgmt->sa, supp_rates);
1521 }
2701 1522
2702 /* set default value for buggy APs */ 1523 rcu_read_unlock();
2703 if (!elems->tim || bss->dtim_period == 0)
2704 bss->dtim_period = 1;
2705
2706 bss->supp_rates_len = 0;
2707 if (elems->supp_rates) {
2708 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
2709 if (clen > elems->supp_rates_len)
2710 clen = elems->supp_rates_len;
2711 memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
2712 clen);
2713 bss->supp_rates_len += clen;
2714 }
2715 if (elems->ext_supp_rates) {
2716 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
2717 if (clen > elems->ext_supp_rates_len)
2718 clen = elems->ext_supp_rates_len;
2719 memcpy(&bss->supp_rates[bss->supp_rates_len],
2720 elems->ext_supp_rates, clen);
2721 bss->supp_rates_len += clen;
2722 } 1524 }
2723 1525
2724 bss->band = rx_status->band; 1526 bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
1527 freq, beacon);
1528 if (!bss)
1529 return;
2725 1530
2726 bss->timestamp = beacon_timestamp; 1531 /* was just updated in ieee80211_bss_info_update */
2727 bss->last_update = jiffies; 1532 beacon_timestamp = bss->timestamp;
2728 bss->signal = rx_status->signal;
2729 bss->noise = rx_status->noise;
2730 bss->qual = rx_status->qual;
2731 if (!beacon && !bss->probe_resp)
2732 bss->probe_resp = true;
2733 1533
2734 /* 1534 /*
2735 * In STA mode, the remaining parameters should not be overridden 1535 * In STA mode, the remaining parameters should not be overridden
2736 * by beacons because they're not necessarily accurate there. 1536 * by beacons because they're not necessarily accurate there.
2737 */ 1537 */
2738 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 1538 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2739 bss->probe_resp && beacon) { 1539 bss->last_probe_resp && beacon) {
2740 ieee80211_rx_bss_put(local, bss); 1540 ieee80211_rx_bss_put(local, bss);
2741 return; 1541 return;
2742 } 1542 }
2743 1543
2744 if (elems->wpa &&
2745 (!bss->wpa_ie || bss->wpa_ie_len != elems->wpa_len ||
2746 memcmp(bss->wpa_ie, elems->wpa, elems->wpa_len))) {
2747 kfree(bss->wpa_ie);
2748 bss->wpa_ie = kmalloc(elems->wpa_len + 2, GFP_ATOMIC);
2749 if (bss->wpa_ie) {
2750 memcpy(bss->wpa_ie, elems->wpa - 2, elems->wpa_len + 2);
2751 bss->wpa_ie_len = elems->wpa_len + 2;
2752 } else
2753 bss->wpa_ie_len = 0;
2754 } else if (!elems->wpa && bss->wpa_ie) {
2755 kfree(bss->wpa_ie);
2756 bss->wpa_ie = NULL;
2757 bss->wpa_ie_len = 0;
2758 }
2759
2760 if (elems->rsn &&
2761 (!bss->rsn_ie || bss->rsn_ie_len != elems->rsn_len ||
2762 memcmp(bss->rsn_ie, elems->rsn, elems->rsn_len))) {
2763 kfree(bss->rsn_ie);
2764 bss->rsn_ie = kmalloc(elems->rsn_len + 2, GFP_ATOMIC);
2765 if (bss->rsn_ie) {
2766 memcpy(bss->rsn_ie, elems->rsn - 2, elems->rsn_len + 2);
2767 bss->rsn_ie_len = elems->rsn_len + 2;
2768 } else
2769 bss->rsn_ie_len = 0;
2770 } else if (!elems->rsn && bss->rsn_ie) {
2771 kfree(bss->rsn_ie);
2772 bss->rsn_ie = NULL;
2773 bss->rsn_ie_len = 0;
2774 }
2775
2776 /*
2777 * Cf.
2778 * http://www.wipo.int/pctdb/en/wo.jsp?wo=2007047181&IA=WO2007047181&DISPLAY=DESC
2779 *
2780 * quoting:
2781 *
2782 * In particular, "Wi-Fi CERTIFIED for WMM - Support for Multimedia
2783 * Applications with Quality of Service in Wi-Fi Networks," Wi- Fi
2784 * Alliance (September 1, 2004) is incorporated by reference herein.
2785 * The inclusion of the WMM Parameters in probe responses and
2786 * association responses is mandatory for WMM enabled networks. The
2787 * inclusion of the WMM Parameters in beacons, however, is optional.
2788 */
2789
2790 if (elems->wmm_param &&
2791 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_param_len ||
2792 memcmp(bss->wmm_ie, elems->wmm_param, elems->wmm_param_len))) {
2793 kfree(bss->wmm_ie);
2794 bss->wmm_ie = kmalloc(elems->wmm_param_len + 2, GFP_ATOMIC);
2795 if (bss->wmm_ie) {
2796 memcpy(bss->wmm_ie, elems->wmm_param - 2,
2797 elems->wmm_param_len + 2);
2798 bss->wmm_ie_len = elems->wmm_param_len + 2;
2799 } else
2800 bss->wmm_ie_len = 0;
2801 } else if (elems->wmm_info &&
2802 (!bss->wmm_ie || bss->wmm_ie_len != elems->wmm_info_len ||
2803 memcmp(bss->wmm_ie, elems->wmm_info,
2804 elems->wmm_info_len))) {
2805 /* As for certain AP's Fifth bit is not set in WMM IE in
2806 * beacon frames.So while parsing the beacon frame the
2807 * wmm_info structure is used instead of wmm_param.
2808 * wmm_info structure was never used to set bss->wmm_ie.
2809 * This code fixes this problem by copying the WME
2810 * information from wmm_info to bss->wmm_ie and enabling
2811 * n-band association.
2812 */
2813 kfree(bss->wmm_ie);
2814 bss->wmm_ie = kmalloc(elems->wmm_info_len + 2, GFP_ATOMIC);
2815 if (bss->wmm_ie) {
2816 memcpy(bss->wmm_ie, elems->wmm_info - 2,
2817 elems->wmm_info_len + 2);
2818 bss->wmm_ie_len = elems->wmm_info_len + 2;
2819 } else
2820 bss->wmm_ie_len = 0;
2821 } else if (!elems->wmm_param && !elems->wmm_info && bss->wmm_ie) {
2822 kfree(bss->wmm_ie);
2823 bss->wmm_ie = NULL;
2824 bss->wmm_ie_len = 0;
2825 }
2826
2827 /* check if we need to merge IBSS */ 1544 /* check if we need to merge IBSS */
2828 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && beacon && 1545 if (sdata->vif.type == NL80211_IFTYPE_ADHOC && beacon &&
2829 !local->sta_sw_scanning && !local->sta_hw_scanning &&
2830 bss->capability & WLAN_CAPABILITY_IBSS && 1546 bss->capability & WLAN_CAPABILITY_IBSS &&
2831 bss->freq == local->oper_channel->center_freq && 1547 bss->freq == local->oper_channel->center_freq &&
2832 elems->ssid_len == sdata->u.sta.ssid_len && 1548 elems->ssid_len == sdata->u.sta.ssid_len &&
@@ -2848,7 +1564,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2848 * e.g: at 1 MBit that means mactime is 192 usec earlier 1564 * e.g: at 1 MBit that means mactime is 192 usec earlier
2849 * (=24 bytes * 8 usecs/byte) than the beacon timestamp. 1565 * (=24 bytes * 8 usecs/byte) than the beacon timestamp.
2850 */ 1566 */
2851 int rate = local->hw.wiphy->bands[rx_status->band]-> 1567 int rate = local->hw.wiphy->bands[band]->
2852 bitrates[rx_status->rate_idx].bitrate; 1568 bitrates[rx_status->rate_idx].bitrate;
2853 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate); 1569 rx_timestamp = rx_status->mactime + (24 * 8 * 10 / rate);
2854 } else if (local && local->ops && local->ops->get_tsf) 1570 } else if (local && local->ops && local->ops->get_tsf)
@@ -2871,12 +1587,12 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2871#ifdef CONFIG_MAC80211_IBSS_DEBUG 1587#ifdef CONFIG_MAC80211_IBSS_DEBUG
2872 printk(KERN_DEBUG "%s: beacon TSF higher than " 1588 printk(KERN_DEBUG "%s: beacon TSF higher than "
2873 "local TSF - IBSS merge with BSSID %s\n", 1589 "local TSF - IBSS merge with BSSID %s\n",
2874 dev->name, print_mac(mac, mgmt->bssid)); 1590 sdata->dev->name, print_mac(mac, mgmt->bssid));
2875#endif 1591#endif
2876 ieee80211_sta_join_ibss(dev, &sdata->u.sta, bss); 1592 ieee80211_sta_join_ibss(sdata, &sdata->u.sta, bss);
2877 ieee80211_ibss_add_sta(dev, NULL, 1593 ieee80211_ibss_add_sta(sdata, NULL,
2878 mgmt->bssid, mgmt->sa, 1594 mgmt->bssid, mgmt->sa,
2879 BIT(rx_status->rate_idx)); 1595 supp_rates);
2880 } 1596 }
2881 } 1597 }
2882 1598
@@ -2884,13 +1600,17 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2884} 1600}
2885 1601
2886 1602
2887static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev, 1603static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
2888 struct ieee80211_mgmt *mgmt, 1604 struct ieee80211_mgmt *mgmt,
2889 size_t len, 1605 size_t len,
2890 struct ieee80211_rx_status *rx_status) 1606 struct ieee80211_rx_status *rx_status)
2891{ 1607{
2892 size_t baselen; 1608 size_t baselen;
2893 struct ieee802_11_elems elems; 1609 struct ieee802_11_elems elems;
1610 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
1611
1612 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
1613 return; /* ignore ProbeResp to foreign address */
2894 1614
2895 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; 1615 baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
2896 if (baselen > len) 1616 if (baselen > len)
@@ -2899,20 +1619,27 @@ static void ieee80211_rx_mgmt_probe_resp(struct net_device *dev,
2899 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, 1619 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
2900 &elems); 1620 &elems);
2901 1621
2902 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 0); 1622 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
1623
1624 /* direct probe may be part of the association flow */
1625 if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE,
1626 &ifsta->request)) {
1627 printk(KERN_DEBUG "%s direct probe responded\n",
1628 sdata->dev->name);
1629 ieee80211_authenticate(sdata, ifsta);
1630 }
2903} 1631}
2904 1632
2905 1633
2906static void ieee80211_rx_mgmt_beacon(struct net_device *dev, 1634static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2907 struct ieee80211_mgmt *mgmt, 1635 struct ieee80211_mgmt *mgmt,
2908 size_t len, 1636 size_t len,
2909 struct ieee80211_rx_status *rx_status) 1637 struct ieee80211_rx_status *rx_status)
2910{ 1638{
2911 struct ieee80211_sub_if_data *sdata;
2912 struct ieee80211_if_sta *ifsta; 1639 struct ieee80211_if_sta *ifsta;
2913 size_t baselen; 1640 size_t baselen;
2914 struct ieee802_11_elems elems; 1641 struct ieee802_11_elems elems;
2915 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1642 struct ieee80211_local *local = sdata->local;
2916 struct ieee80211_conf *conf = &local->hw.conf; 1643 struct ieee80211_conf *conf = &local->hw.conf;
2917 u32 changed = 0; 1644 u32 changed = 0;
2918 1645
@@ -2923,10 +1650,9 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2923 1650
2924 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems); 1651 ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
2925 1652
2926 ieee80211_rx_bss_info(dev, mgmt, len, rx_status, &elems, 1); 1653 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
2927 1654
2928 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1655 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2929 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
2930 return; 1656 return;
2931 ifsta = &sdata->u.sta; 1657 ifsta = &sdata->u.sta;
2932 1658
@@ -2934,15 +1660,9 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2934 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0) 1660 memcmp(ifsta->bssid, mgmt->bssid, ETH_ALEN) != 0)
2935 return; 1661 return;
2936 1662
2937 ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param, 1663 ieee80211_sta_wmm_params(local, ifsta, elems.wmm_param,
2938 elems.wmm_param_len); 1664 elems.wmm_param_len);
2939 1665
2940 /* Do not send changes to driver if we are scanning. This removes
2941 * requirement that driver's bss_info_changed function needs to be
2942 * atomic. */
2943 if (local->sta_sw_scanning || local->sta_hw_scanning)
2944 return;
2945
2946 if (elems.erp_info && elems.erp_info_len >= 1) 1666 if (elems.erp_info && elems.erp_info_len >= 1)
2947 changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]); 1667 changed |= ieee80211_handle_erp_ie(sdata, elems.erp_info[0]);
2948 else { 1668 else {
@@ -2966,14 +1686,13 @@ static void ieee80211_rx_mgmt_beacon(struct net_device *dev,
2966} 1686}
2967 1687
2968 1688
2969static void ieee80211_rx_mgmt_probe_req(struct net_device *dev, 1689static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
2970 struct ieee80211_if_sta *ifsta, 1690 struct ieee80211_if_sta *ifsta,
2971 struct ieee80211_mgmt *mgmt, 1691 struct ieee80211_mgmt *mgmt,
2972 size_t len, 1692 size_t len,
2973 struct ieee80211_rx_status *rx_status) 1693 struct ieee80211_rx_status *rx_status)
2974{ 1694{
2975 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1695 struct ieee80211_local *local = sdata->local;
2976 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2977 int tx_last_beacon; 1696 int tx_last_beacon;
2978 struct sk_buff *skb; 1697 struct sk_buff *skb;
2979 struct ieee80211_mgmt *resp; 1698 struct ieee80211_mgmt *resp;
@@ -2984,8 +1703,8 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2984 DECLARE_MAC_BUF(mac3); 1703 DECLARE_MAC_BUF(mac3);
2985#endif 1704#endif
2986 1705
2987 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS || 1706 if (sdata->vif.type != NL80211_IFTYPE_ADHOC ||
2988 ifsta->state != IEEE80211_IBSS_JOINED || 1707 ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED ||
2989 len < 24 + 2 || !ifsta->probe_resp) 1708 len < 24 + 2 || !ifsta->probe_resp)
2990 return; 1709 return;
2991 1710
@@ -2997,7 +1716,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
2997#ifdef CONFIG_MAC80211_IBSS_DEBUG 1716#ifdef CONFIG_MAC80211_IBSS_DEBUG
2998 printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID=" 1717 printk(KERN_DEBUG "%s: RX ProbeReq SA=%s DA=%s BSSID="
2999 "%s (tx_last_beacon=%d)\n", 1718 "%s (tx_last_beacon=%d)\n",
3000 dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da), 1719 sdata->dev->name, print_mac(mac, mgmt->sa), print_mac(mac2, mgmt->da),
3001 print_mac(mac3, mgmt->bssid), tx_last_beacon); 1720 print_mac(mac3, mgmt->bssid), tx_last_beacon);
3002#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 1721#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3003 1722
@@ -3015,7 +1734,7 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
3015#ifdef CONFIG_MAC80211_IBSS_DEBUG 1734#ifdef CONFIG_MAC80211_IBSS_DEBUG
3016 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq " 1735 printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
3017 "from %s\n", 1736 "from %s\n",
3018 dev->name, print_mac(mac, mgmt->sa)); 1737 sdata->dev->name, print_mac(mac, mgmt->sa));
3019#endif 1738#endif
3020 return; 1739 return;
3021 } 1740 }
@@ -3035,74 +1754,15 @@ static void ieee80211_rx_mgmt_probe_req(struct net_device *dev,
3035 memcpy(resp->da, mgmt->sa, ETH_ALEN); 1754 memcpy(resp->da, mgmt->sa, ETH_ALEN);
3036#ifdef CONFIG_MAC80211_IBSS_DEBUG 1755#ifdef CONFIG_MAC80211_IBSS_DEBUG
3037 printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n", 1756 printk(KERN_DEBUG "%s: Sending ProbeResp to %s\n",
3038 dev->name, print_mac(mac, resp->da)); 1757 sdata->dev->name, print_mac(mac, resp->da));
3039#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 1758#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3040 ieee80211_sta_tx(dev, skb, 0); 1759 ieee80211_tx_skb(sdata, skb, 0);
3041}
3042
3043static void ieee80211_rx_mgmt_action(struct net_device *dev,
3044 struct ieee80211_if_sta *ifsta,
3045 struct ieee80211_mgmt *mgmt,
3046 size_t len,
3047 struct ieee80211_rx_status *rx_status)
3048{
3049 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3050 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3051
3052 if (len < IEEE80211_MIN_ACTION_SIZE)
3053 return;
3054
3055 switch (mgmt->u.action.category) {
3056 case WLAN_CATEGORY_SPECTRUM_MGMT:
3057 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
3058 break;
3059 switch (mgmt->u.action.u.chan_switch.action_code) {
3060 case WLAN_ACTION_SPCT_MSR_REQ:
3061 if (len < (IEEE80211_MIN_ACTION_SIZE +
3062 sizeof(mgmt->u.action.u.measurement)))
3063 break;
3064 ieee80211_sta_process_measurement_req(dev, mgmt, len);
3065 break;
3066 }
3067 break;
3068 case WLAN_CATEGORY_BACK:
3069 switch (mgmt->u.action.u.addba_req.action_code) {
3070 case WLAN_ACTION_ADDBA_REQ:
3071 if (len < (IEEE80211_MIN_ACTION_SIZE +
3072 sizeof(mgmt->u.action.u.addba_req)))
3073 break;
3074 ieee80211_sta_process_addba_request(dev, mgmt, len);
3075 break;
3076 case WLAN_ACTION_ADDBA_RESP:
3077 if (len < (IEEE80211_MIN_ACTION_SIZE +
3078 sizeof(mgmt->u.action.u.addba_resp)))
3079 break;
3080 ieee80211_sta_process_addba_resp(dev, mgmt, len);
3081 break;
3082 case WLAN_ACTION_DELBA:
3083 if (len < (IEEE80211_MIN_ACTION_SIZE +
3084 sizeof(mgmt->u.action.u.delba)))
3085 break;
3086 ieee80211_sta_process_delba(dev, mgmt, len);
3087 break;
3088 }
3089 break;
3090 case PLINK_CATEGORY:
3091 if (ieee80211_vif_is_mesh(&sdata->vif))
3092 mesh_rx_plink_frame(dev, mgmt, len, rx_status);
3093 break;
3094 case MESH_PATH_SEL_CATEGORY:
3095 if (ieee80211_vif_is_mesh(&sdata->vif))
3096 mesh_rx_path_sel_frame(dev, mgmt, len);
3097 break;
3098 }
3099} 1760}
3100 1761
3101void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb, 1762void ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
3102 struct ieee80211_rx_status *rx_status) 1763 struct ieee80211_rx_status *rx_status)
3103{ 1764{
3104 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1765 struct ieee80211_local *local = sdata->local;
3105 struct ieee80211_sub_if_data *sdata;
3106 struct ieee80211_if_sta *ifsta; 1766 struct ieee80211_if_sta *ifsta;
3107 struct ieee80211_mgmt *mgmt; 1767 struct ieee80211_mgmt *mgmt;
3108 u16 fc; 1768 u16 fc;
@@ -3110,7 +1770,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3110 if (skb->len < 24) 1770 if (skb->len < 24)
3111 goto fail; 1771 goto fail;
3112 1772
3113 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3114 ifsta = &sdata->u.sta; 1773 ifsta = &sdata->u.sta;
3115 1774
3116 mgmt = (struct ieee80211_mgmt *) skb->data; 1775 mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -3120,7 +1779,6 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3120 case IEEE80211_STYPE_PROBE_REQ: 1779 case IEEE80211_STYPE_PROBE_REQ:
3121 case IEEE80211_STYPE_PROBE_RESP: 1780 case IEEE80211_STYPE_PROBE_RESP:
3122 case IEEE80211_STYPE_BEACON: 1781 case IEEE80211_STYPE_BEACON:
3123 case IEEE80211_STYPE_ACTION:
3124 memcpy(skb->cb, rx_status, sizeof(*rx_status)); 1782 memcpy(skb->cb, rx_status, sizeof(*rx_status));
3125 case IEEE80211_STYPE_AUTH: 1783 case IEEE80211_STYPE_AUTH:
3126 case IEEE80211_STYPE_ASSOC_RESP: 1784 case IEEE80211_STYPE_ASSOC_RESP:
@@ -3136,17 +1794,14 @@ void ieee80211_sta_rx_mgmt(struct net_device *dev, struct sk_buff *skb,
3136 kfree_skb(skb); 1794 kfree_skb(skb);
3137} 1795}
3138 1796
3139 1797static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
3140static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3141 struct sk_buff *skb) 1798 struct sk_buff *skb)
3142{ 1799{
3143 struct ieee80211_rx_status *rx_status; 1800 struct ieee80211_rx_status *rx_status;
3144 struct ieee80211_sub_if_data *sdata;
3145 struct ieee80211_if_sta *ifsta; 1801 struct ieee80211_if_sta *ifsta;
3146 struct ieee80211_mgmt *mgmt; 1802 struct ieee80211_mgmt *mgmt;
3147 u16 fc; 1803 u16 fc;
3148 1804
3149 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3150 ifsta = &sdata->u.sta; 1805 ifsta = &sdata->u.sta;
3151 1806
3152 rx_status = (struct ieee80211_rx_status *) skb->cb; 1807 rx_status = (struct ieee80211_rx_status *) skb->cb;
@@ -3155,17 +1810,17 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3155 1810
3156 switch (fc & IEEE80211_FCTL_STYPE) { 1811 switch (fc & IEEE80211_FCTL_STYPE) {
3157 case IEEE80211_STYPE_PROBE_REQ: 1812 case IEEE80211_STYPE_PROBE_REQ:
3158 ieee80211_rx_mgmt_probe_req(dev, ifsta, mgmt, skb->len, 1813 ieee80211_rx_mgmt_probe_req(sdata, ifsta, mgmt, skb->len,
3159 rx_status); 1814 rx_status);
3160 break; 1815 break;
3161 case IEEE80211_STYPE_PROBE_RESP: 1816 case IEEE80211_STYPE_PROBE_RESP:
3162 ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status); 1817 ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len, rx_status);
3163 break; 1818 break;
3164 case IEEE80211_STYPE_BEACON: 1819 case IEEE80211_STYPE_BEACON:
3165 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status); 1820 ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
3166 break; 1821 break;
3167 case IEEE80211_STYPE_AUTH: 1822 case IEEE80211_STYPE_AUTH:
3168 ieee80211_rx_mgmt_auth(dev, ifsta, mgmt, skb->len); 1823 ieee80211_rx_mgmt_auth(sdata, ifsta, mgmt, skb->len);
3169 break; 1824 break;
3170 case IEEE80211_STYPE_ASSOC_RESP: 1825 case IEEE80211_STYPE_ASSOC_RESP:
3171 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0); 1826 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 0);
@@ -3174,13 +1829,10 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3174 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1); 1829 ieee80211_rx_mgmt_assoc_resp(sdata, ifsta, mgmt, skb->len, 1);
3175 break; 1830 break;
3176 case IEEE80211_STYPE_DEAUTH: 1831 case IEEE80211_STYPE_DEAUTH:
3177 ieee80211_rx_mgmt_deauth(dev, ifsta, mgmt, skb->len); 1832 ieee80211_rx_mgmt_deauth(sdata, ifsta, mgmt, skb->len);
3178 break; 1833 break;
3179 case IEEE80211_STYPE_DISASSOC: 1834 case IEEE80211_STYPE_DISASSOC:
3180 ieee80211_rx_mgmt_disassoc(dev, ifsta, mgmt, skb->len); 1835 ieee80211_rx_mgmt_disassoc(sdata, ifsta, mgmt, skb->len);
3181 break;
3182 case IEEE80211_STYPE_ACTION:
3183 ieee80211_rx_mgmt_action(dev, ifsta, mgmt, skb->len, rx_status);
3184 break; 1836 break;
3185 } 1837 }
3186 1838
@@ -3188,47 +1840,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct net_device *dev,
3188} 1840}
3189 1841
3190 1842
3191ieee80211_rx_result 1843static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
3192ieee80211_sta_rx_scan(struct net_device *dev, struct sk_buff *skb,
3193 struct ieee80211_rx_status *rx_status)
3194{
3195 struct ieee80211_mgmt *mgmt;
3196 __le16 fc;
3197
3198 if (skb->len < 2)
3199 return RX_DROP_UNUSABLE;
3200
3201 mgmt = (struct ieee80211_mgmt *) skb->data;
3202 fc = mgmt->frame_control;
3203
3204 if (ieee80211_is_ctl(fc))
3205 return RX_CONTINUE;
3206
3207 if (skb->len < 24)
3208 return RX_DROP_MONITOR;
3209
3210 if (ieee80211_is_probe_resp(fc)) {
3211 ieee80211_rx_mgmt_probe_resp(dev, mgmt, skb->len, rx_status);
3212 dev_kfree_skb(skb);
3213 return RX_QUEUED;
3214 }
3215
3216 if (ieee80211_is_beacon(fc)) {
3217 ieee80211_rx_mgmt_beacon(dev, mgmt, skb->len, rx_status);
3218 dev_kfree_skb(skb);
3219 return RX_QUEUED;
3220 }
3221
3222 return RX_CONTINUE;
3223}
3224
3225
3226static int ieee80211_sta_active_ibss(struct net_device *dev)
3227{ 1844{
3228 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1845 struct ieee80211_local *local = sdata->local;
3229 int active = 0; 1846 int active = 0;
3230 struct sta_info *sta; 1847 struct sta_info *sta;
3231 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3232 1848
3233 rcu_read_lock(); 1849 rcu_read_lock();
3234 1850
@@ -3247,179 +1863,36 @@ static int ieee80211_sta_active_ibss(struct net_device *dev)
3247} 1863}
3248 1864
3249 1865
3250static void ieee80211_sta_expire(struct net_device *dev, unsigned long exp_time) 1866static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata,
3251{
3252 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3253 struct sta_info *sta, *tmp;
3254 LIST_HEAD(tmp_list);
3255 DECLARE_MAC_BUF(mac);
3256 unsigned long flags;
3257
3258 spin_lock_irqsave(&local->sta_lock, flags);
3259 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
3260 if (time_after(jiffies, sta->last_rx + exp_time)) {
3261#ifdef CONFIG_MAC80211_IBSS_DEBUG
3262 printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
3263 dev->name, print_mac(mac, sta->addr));
3264#endif
3265 __sta_info_unlink(&sta);
3266 if (sta)
3267 list_add(&sta->list, &tmp_list);
3268 }
3269 spin_unlock_irqrestore(&local->sta_lock, flags);
3270
3271 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
3272 sta_info_destroy(sta);
3273}
3274
3275
3276static void ieee80211_sta_merge_ibss(struct net_device *dev,
3277 struct ieee80211_if_sta *ifsta) 1867 struct ieee80211_if_sta *ifsta)
3278{ 1868{
3279 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); 1869 mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL);
3280 1870
3281 ieee80211_sta_expire(dev, IEEE80211_IBSS_INACTIVITY_LIMIT); 1871 ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
3282 if (ieee80211_sta_active_ibss(dev)) 1872 if (ieee80211_sta_active_ibss(sdata))
3283 return; 1873 return;
3284 1874
3285 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other " 1875 printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
3286 "IBSS networks with same SSID (merge)\n", dev->name); 1876 "IBSS networks with same SSID (merge)\n", sdata->dev->name);
3287 ieee80211_sta_req_scan(dev, ifsta->ssid, ifsta->ssid_len); 1877 ieee80211_request_scan(sdata, ifsta->ssid, ifsta->ssid_len);
3288} 1878}
3289 1879
3290 1880
3291#ifdef CONFIG_MAC80211_MESH 1881static void ieee80211_sta_timer(unsigned long data)
3292static void ieee80211_mesh_housekeeping(struct net_device *dev,
3293 struct ieee80211_if_sta *ifsta)
3294{
3295 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3296 bool free_plinks;
3297
3298 ieee80211_sta_expire(dev, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
3299 mesh_path_expire(dev);
3300
3301 free_plinks = mesh_plink_availables(sdata);
3302 if (free_plinks != sdata->u.sta.accepting_plinks)
3303 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
3304
3305 mod_timer(&ifsta->timer, jiffies +
3306 IEEE80211_MESH_HOUSEKEEPING_INTERVAL);
3307}
3308
3309
3310void ieee80211_start_mesh(struct net_device *dev)
3311{
3312 struct ieee80211_if_sta *ifsta;
3313 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3314 ifsta = &sdata->u.sta;
3315 ifsta->state = IEEE80211_MESH_UP;
3316 ieee80211_sta_timer((unsigned long)sdata);
3317 ieee80211_if_config(sdata, IEEE80211_IFCC_BEACON);
3318}
3319#endif
3320
3321
3322void ieee80211_sta_timer(unsigned long data)
3323{ 1882{
3324 struct ieee80211_sub_if_data *sdata = 1883 struct ieee80211_sub_if_data *sdata =
3325 (struct ieee80211_sub_if_data *) data; 1884 (struct ieee80211_sub_if_data *) data;
3326 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 1885 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
3327 struct ieee80211_local *local = wdev_priv(&sdata->wdev); 1886 struct ieee80211_local *local = sdata->local;
3328 1887
3329 set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request); 1888 set_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
3330 queue_work(local->hw.workqueue, &ifsta->work); 1889 queue_work(local->hw.workqueue, &ifsta->work);
3331} 1890}
3332 1891
3333void ieee80211_sta_work(struct work_struct *work) 1892static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata,
3334{
3335 struct ieee80211_sub_if_data *sdata =
3336 container_of(work, struct ieee80211_sub_if_data, u.sta.work);
3337 struct net_device *dev = sdata->dev;
3338 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3339 struct ieee80211_if_sta *ifsta;
3340 struct sk_buff *skb;
3341
3342 if (!netif_running(dev))
3343 return;
3344
3345 if (local->sta_sw_scanning || local->sta_hw_scanning)
3346 return;
3347
3348 if (WARN_ON(sdata->vif.type != IEEE80211_IF_TYPE_STA &&
3349 sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
3350 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
3351 return;
3352 ifsta = &sdata->u.sta;
3353
3354 while ((skb = skb_dequeue(&ifsta->skb_queue)))
3355 ieee80211_sta_rx_queued_mgmt(dev, skb);
3356
3357#ifdef CONFIG_MAC80211_MESH
3358 if (ifsta->preq_queue_len &&
3359 time_after(jiffies,
3360 ifsta->last_preq + msecs_to_jiffies(ifsta->mshcfg.dot11MeshHWMPpreqMinInterval)))
3361 mesh_path_start_discovery(dev);
3362#endif
3363
3364 if (ifsta->state != IEEE80211_AUTHENTICATE &&
3365 ifsta->state != IEEE80211_ASSOCIATE &&
3366 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
3367 if (ifsta->scan_ssid_len)
3368 ieee80211_sta_start_scan(dev, ifsta->scan_ssid, ifsta->scan_ssid_len);
3369 else
3370 ieee80211_sta_start_scan(dev, NULL, 0);
3371 return;
3372 }
3373
3374 if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) {
3375 if (ieee80211_sta_config_auth(dev, ifsta))
3376 return;
3377 clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
3378 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request))
3379 return;
3380
3381 switch (ifsta->state) {
3382 case IEEE80211_DISABLED:
3383 break;
3384 case IEEE80211_AUTHENTICATE:
3385 ieee80211_authenticate(dev, ifsta);
3386 break;
3387 case IEEE80211_ASSOCIATE:
3388 ieee80211_associate(dev, ifsta);
3389 break;
3390 case IEEE80211_ASSOCIATED:
3391 ieee80211_associated(dev, ifsta);
3392 break;
3393 case IEEE80211_IBSS_SEARCH:
3394 ieee80211_sta_find_ibss(dev, ifsta);
3395 break;
3396 case IEEE80211_IBSS_JOINED:
3397 ieee80211_sta_merge_ibss(dev, ifsta);
3398 break;
3399#ifdef CONFIG_MAC80211_MESH
3400 case IEEE80211_MESH_UP:
3401 ieee80211_mesh_housekeeping(dev, ifsta);
3402 break;
3403#endif
3404 default:
3405 WARN_ON(1);
3406 break;
3407 }
3408
3409 if (ieee80211_privacy_mismatch(dev, ifsta)) {
3410 printk(KERN_DEBUG "%s: privacy configuration mismatch and "
3411 "mixed-cell disabled - disassociate\n", dev->name);
3412
3413 ieee80211_send_disassoc(dev, ifsta, WLAN_REASON_UNSPECIFIED);
3414 ieee80211_set_disassoc(dev, ifsta, 0);
3415 }
3416}
3417
3418
3419static void ieee80211_sta_reset_auth(struct net_device *dev,
3420 struct ieee80211_if_sta *ifsta) 1893 struct ieee80211_if_sta *ifsta)
3421{ 1894{
3422 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1895 struct ieee80211_local *local = sdata->local;
3423 1896
3424 if (local->ops->reset_tsf) { 1897 if (local->ops->reset_tsf) {
3425 /* Reset own TSF to allow time synchronization work. */ 1898 /* Reset own TSF to allow time synchronization work. */
@@ -3439,29 +1912,15 @@ static void ieee80211_sta_reset_auth(struct net_device *dev,
3439 ifsta->auth_alg = WLAN_AUTH_OPEN; 1912 ifsta->auth_alg = WLAN_AUTH_OPEN;
3440 ifsta->auth_transaction = -1; 1913 ifsta->auth_transaction = -1;
3441 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED; 1914 ifsta->flags &= ~IEEE80211_STA_ASSOCIATED;
3442 ifsta->auth_tries = ifsta->assoc_tries = 0; 1915 ifsta->assoc_scan_tries = 0;
3443 netif_carrier_off(dev); 1916 ifsta->direct_probe_tries = 0;
1917 ifsta->auth_tries = 0;
1918 ifsta->assoc_tries = 0;
1919 netif_tx_stop_all_queues(sdata->dev);
1920 netif_carrier_off(sdata->dev);
3444} 1921}
3445 1922
3446 1923
3447void ieee80211_sta_req_auth(struct net_device *dev,
3448 struct ieee80211_if_sta *ifsta)
3449{
3450 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3451 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3452
3453 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
3454 return;
3455
3456 if ((ifsta->flags & (IEEE80211_STA_BSSID_SET |
3457 IEEE80211_STA_AUTO_BSSID_SEL)) &&
3458 (ifsta->flags & (IEEE80211_STA_SSID_SET |
3459 IEEE80211_STA_AUTO_SSID_SEL))) {
3460 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
3461 queue_work(local->hw.workqueue, &ifsta->work);
3462 }
3463}
3464
3465static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta, 1924static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
3466 const char *ssid, int ssid_len) 1925 const char *ssid, int ssid_len)
3467{ 1926{
@@ -3492,81 +1951,11 @@ static int ieee80211_sta_match_ssid(struct ieee80211_if_sta *ifsta,
3492 return 0; 1951 return 0;
3493} 1952}
3494 1953
3495static int ieee80211_sta_config_auth(struct net_device *dev, 1954static int ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata,
3496 struct ieee80211_if_sta *ifsta) 1955 struct ieee80211_if_sta *ifsta)
3497{ 1956{
3498 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1957 struct ieee80211_local *local = sdata->local;
3499 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1958 struct ieee80211_bss *bss;
3500 struct ieee80211_sta_bss *bss, *selected = NULL;
3501 int top_rssi = 0, freq;
3502
3503 spin_lock_bh(&local->sta_bss_lock);
3504 freq = local->oper_channel->center_freq;
3505 list_for_each_entry(bss, &local->sta_bss_list, list) {
3506 if (!(bss->capability & WLAN_CAPABILITY_ESS))
3507 continue;
3508
3509 if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
3510 IEEE80211_STA_AUTO_BSSID_SEL |
3511 IEEE80211_STA_AUTO_CHANNEL_SEL)) &&
3512 (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^
3513 !!sdata->default_key))
3514 continue;
3515
3516 if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) &&
3517 bss->freq != freq)
3518 continue;
3519
3520 if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) &&
3521 memcmp(bss->bssid, ifsta->bssid, ETH_ALEN))
3522 continue;
3523
3524 if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) &&
3525 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len))
3526 continue;
3527
3528 if (!selected || top_rssi < bss->signal) {
3529 selected = bss;
3530 top_rssi = bss->signal;
3531 }
3532 }
3533 if (selected)
3534 atomic_inc(&selected->users);
3535 spin_unlock_bh(&local->sta_bss_lock);
3536
3537 if (selected) {
3538 ieee80211_set_freq(dev, selected->freq);
3539 if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
3540 ieee80211_sta_set_ssid(dev, selected->ssid,
3541 selected->ssid_len);
3542 ieee80211_sta_set_bssid(dev, selected->bssid);
3543 ieee80211_sta_def_wmm_params(dev, selected, 0);
3544 ieee80211_rx_bss_put(local, selected);
3545 ifsta->state = IEEE80211_AUTHENTICATE;
3546 ieee80211_sta_reset_auth(dev, ifsta);
3547 return 0;
3548 } else {
3549 if (ifsta->state != IEEE80211_AUTHENTICATE) {
3550 if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL)
3551 ieee80211_sta_start_scan(dev, NULL, 0);
3552 else
3553 ieee80211_sta_start_scan(dev, ifsta->ssid,
3554 ifsta->ssid_len);
3555 ifsta->state = IEEE80211_AUTHENTICATE;
3556 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
3557 } else
3558 ifsta->state = IEEE80211_DISABLED;
3559 }
3560 return -1;
3561}
3562
3563
3564static int ieee80211_sta_create_ibss(struct net_device *dev,
3565 struct ieee80211_if_sta *ifsta)
3566{
3567 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
3568 struct ieee80211_sta_bss *bss;
3569 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3570 struct ieee80211_supported_band *sband; 1959 struct ieee80211_supported_band *sband;
3571 u8 bssid[ETH_ALEN], *pos; 1960 u8 bssid[ETH_ALEN], *pos;
3572 int i; 1961 int i;
@@ -3582,15 +1971,15 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3582 * random number generator get different BSSID. */ 1971 * random number generator get different BSSID. */
3583 get_random_bytes(bssid, ETH_ALEN); 1972 get_random_bytes(bssid, ETH_ALEN);
3584 for (i = 0; i < ETH_ALEN; i++) 1973 for (i = 0; i < ETH_ALEN; i++)
3585 bssid[i] ^= dev->dev_addr[i]; 1974 bssid[i] ^= sdata->dev->dev_addr[i];
3586 bssid[0] &= ~0x01; 1975 bssid[0] &= ~0x01;
3587 bssid[0] |= 0x02; 1976 bssid[0] |= 0x02;
3588#endif 1977#endif
3589 1978
3590 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n", 1979 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %s\n",
3591 dev->name, print_mac(mac, bssid)); 1980 sdata->dev->name, print_mac(mac, bssid));
3592 1981
3593 bss = ieee80211_rx_bss_add(dev, bssid, 1982 bss = ieee80211_rx_bss_add(local, bssid,
3594 local->hw.conf.channel->center_freq, 1983 local->hw.conf.channel->center_freq,
3595 sdata->u.sta.ssid, sdata->u.sta.ssid_len); 1984 sdata->u.sta.ssid, sdata->u.sta.ssid_len);
3596 if (!bss) 1985 if (!bss)
@@ -3617,17 +2006,17 @@ static int ieee80211_sta_create_ibss(struct net_device *dev,
3617 *pos++ = (u8) (rate / 5); 2006 *pos++ = (u8) (rate / 5);
3618 } 2007 }
3619 2008
3620 ret = ieee80211_sta_join_ibss(dev, ifsta, bss); 2009 ret = ieee80211_sta_join_ibss(sdata, ifsta, bss);
3621 ieee80211_rx_bss_put(local, bss); 2010 ieee80211_rx_bss_put(local, bss);
3622 return ret; 2011 return ret;
3623} 2012}
3624 2013
3625 2014
3626static int ieee80211_sta_find_ibss(struct net_device *dev, 2015static int ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata,
3627 struct ieee80211_if_sta *ifsta) 2016 struct ieee80211_if_sta *ifsta)
3628{ 2017{
3629 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2018 struct ieee80211_local *local = sdata->local;
3630 struct ieee80211_sta_bss *bss; 2019 struct ieee80211_bss *bss;
3631 int found = 0; 2020 int found = 0;
3632 u8 bssid[ETH_ALEN]; 2021 u8 bssid[ETH_ALEN];
3633 int active_ibss; 2022 int active_ibss;
@@ -3637,13 +2026,13 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3637 if (ifsta->ssid_len == 0) 2026 if (ifsta->ssid_len == 0)
3638 return -EINVAL; 2027 return -EINVAL;
3639 2028
3640 active_ibss = ieee80211_sta_active_ibss(dev); 2029 active_ibss = ieee80211_sta_active_ibss(sdata);
3641#ifdef CONFIG_MAC80211_IBSS_DEBUG 2030#ifdef CONFIG_MAC80211_IBSS_DEBUG
3642 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", 2031 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
3643 dev->name, active_ibss); 2032 sdata->dev->name, active_ibss);
3644#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2033#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3645 spin_lock_bh(&local->sta_bss_lock); 2034 spin_lock_bh(&local->bss_lock);
3646 list_for_each_entry(bss, &local->sta_bss_list, list) { 2035 list_for_each_entry(bss, &local->bss_list, list) {
3647 if (ifsta->ssid_len != bss->ssid_len || 2036 if (ifsta->ssid_len != bss->ssid_len ||
3648 memcmp(ifsta->ssid, bss->ssid, bss->ssid_len) != 0 2037 memcmp(ifsta->ssid, bss->ssid, bss->ssid_len) != 0
3649 || !(bss->capability & WLAN_CAPABILITY_IBSS)) 2038 || !(bss->capability & WLAN_CAPABILITY_IBSS))
@@ -3657,7 +2046,7 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3657 if (active_ibss || memcmp(bssid, ifsta->bssid, ETH_ALEN) != 0) 2046 if (active_ibss || memcmp(bssid, ifsta->bssid, ETH_ALEN) != 0)
3658 break; 2047 break;
3659 } 2048 }
3660 spin_unlock_bh(&local->sta_bss_lock); 2049 spin_unlock_bh(&local->bss_lock);
3661 2050
3662#ifdef CONFIG_MAC80211_IBSS_DEBUG 2051#ifdef CONFIG_MAC80211_IBSS_DEBUG
3663 if (found) 2052 if (found)
@@ -3675,15 +2064,15 @@ static int ieee80211_sta_find_ibss(struct net_device *dev,
3675 else 2064 else
3676 search_freq = local->hw.conf.channel->center_freq; 2065 search_freq = local->hw.conf.channel->center_freq;
3677 2066
3678 bss = ieee80211_rx_bss_get(dev, bssid, search_freq, 2067 bss = ieee80211_rx_bss_get(local, bssid, search_freq,
3679 ifsta->ssid, ifsta->ssid_len); 2068 ifsta->ssid, ifsta->ssid_len);
3680 if (!bss) 2069 if (!bss)
3681 goto dont_join; 2070 goto dont_join;
3682 2071
3683 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s" 2072 printk(KERN_DEBUG "%s: Selected IBSS BSSID %s"
3684 " based on configured SSID\n", 2073 " based on configured SSID\n",
3685 dev->name, print_mac(mac, bssid)); 2074 sdata->dev->name, print_mac(mac, bssid));
3686 ret = ieee80211_sta_join_ibss(dev, ifsta, bss); 2075 ret = ieee80211_sta_join_ibss(sdata, ifsta, bss);
3687 ieee80211_rx_bss_put(local, bss); 2076 ieee80211_rx_bss_put(local, bss);
3688 return ret; 2077 return ret;
3689 } 2078 }
@@ -3694,17 +2083,17 @@ dont_join:
3694#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2083#endif /* CONFIG_MAC80211_IBSS_DEBUG */
3695 2084
3696 /* Selected IBSS not found in current scan results - try to scan */ 2085 /* Selected IBSS not found in current scan results - try to scan */
3697 if (ifsta->state == IEEE80211_IBSS_JOINED && 2086 if (ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED &&
3698 !ieee80211_sta_active_ibss(dev)) { 2087 !ieee80211_sta_active_ibss(sdata)) {
3699 mod_timer(&ifsta->timer, jiffies + 2088 mod_timer(&ifsta->timer, jiffies +
3700 IEEE80211_IBSS_MERGE_INTERVAL); 2089 IEEE80211_IBSS_MERGE_INTERVAL);
3701 } else if (time_after(jiffies, local->last_scan_completed + 2090 } else if (time_after(jiffies, local->last_scan_completed +
3702 IEEE80211_SCAN_INTERVAL)) { 2091 IEEE80211_SCAN_INTERVAL)) {
3703 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to " 2092 printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
3704 "join\n", dev->name); 2093 "join\n", sdata->dev->name);
3705 return ieee80211_sta_req_scan(dev, ifsta->ssid, 2094 return ieee80211_request_scan(sdata, ifsta->ssid,
3706 ifsta->ssid_len); 2095 ifsta->ssid_len);
3707 } else if (ifsta->state != IEEE80211_IBSS_JOINED) { 2096 } else if (ifsta->state != IEEE80211_STA_MLME_IBSS_JOINED) {
3708 int interval = IEEE80211_SCAN_INTERVAL; 2097 int interval = IEEE80211_SCAN_INTERVAL;
3709 2098
3710 if (time_after(jiffies, ifsta->ibss_join_req + 2099 if (time_after(jiffies, ifsta->ibss_join_req +
@@ -3712,10 +2101,10 @@ dont_join:
3712 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) && 2101 if ((ifsta->flags & IEEE80211_STA_CREATE_IBSS) &&
3713 (!(local->oper_channel->flags & 2102 (!(local->oper_channel->flags &
3714 IEEE80211_CHAN_NO_IBSS))) 2103 IEEE80211_CHAN_NO_IBSS)))
3715 return ieee80211_sta_create_ibss(dev, ifsta); 2104 return ieee80211_sta_create_ibss(sdata, ifsta);
3716 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) { 2105 if (ifsta->flags & IEEE80211_STA_CREATE_IBSS) {
3717 printk(KERN_DEBUG "%s: IBSS not allowed on" 2106 printk(KERN_DEBUG "%s: IBSS not allowed on"
3718 " %d MHz\n", dev->name, 2107 " %d MHz\n", sdata->dev->name,
3719 local->hw.conf.channel->center_freq); 2108 local->hw.conf.channel->center_freq);
3720 } 2109 }
3721 2110
@@ -3724,7 +2113,7 @@ dont_join:
3724 interval = IEEE80211_SCAN_INTERVAL_SLOW; 2113 interval = IEEE80211_SCAN_INTERVAL_SLOW;
3725 } 2114 }
3726 2115
3727 ifsta->state = IEEE80211_IBSS_SEARCH; 2116 ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH;
3728 mod_timer(&ifsta->timer, jiffies + interval); 2117 mod_timer(&ifsta->timer, jiffies + interval);
3729 return 0; 2118 return 0;
3730 } 2119 }
@@ -3733,620 +2122,344 @@ dont_join:
3733} 2122}
3734 2123
3735 2124
3736int ieee80211_sta_set_ssid(struct net_device *dev, char *ssid, size_t len) 2125static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata,
2126 struct ieee80211_if_sta *ifsta)
3737{ 2127{
3738 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2128 struct ieee80211_local *local = sdata->local;
3739 struct ieee80211_if_sta *ifsta; 2129 struct ieee80211_bss *bss, *selected = NULL;
3740 int res; 2130 int top_rssi = 0, freq;
3741 2131
3742 if (len > IEEE80211_MAX_SSID_LEN) 2132 spin_lock_bh(&local->bss_lock);
3743 return -EINVAL; 2133 freq = local->oper_channel->center_freq;
2134 list_for_each_entry(bss, &local->bss_list, list) {
2135 if (!(bss->capability & WLAN_CAPABILITY_ESS))
2136 continue;
3744 2137
3745 ifsta = &sdata->u.sta; 2138 if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
2139 IEEE80211_STA_AUTO_BSSID_SEL |
2140 IEEE80211_STA_AUTO_CHANNEL_SEL)) &&
2141 (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^
2142 !!sdata->default_key))
2143 continue;
3746 2144
3747 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) { 2145 if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) &&
3748 memset(ifsta->ssid, 0, sizeof(ifsta->ssid)); 2146 bss->freq != freq)
3749 memcpy(ifsta->ssid, ssid, len); 2147 continue;
3750 ifsta->ssid_len = len;
3751 ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
3752 2148
3753 res = 0; 2149 if (!(ifsta->flags & IEEE80211_STA_AUTO_BSSID_SEL) &&
3754 /* 2150 memcmp(bss->bssid, ifsta->bssid, ETH_ALEN))
3755 * Hack! MLME code needs to be cleaned up to have different 2151 continue;
3756 * entry points for configuration and internal selection change
3757 */
3758 if (netif_running(sdata->dev))
3759 res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID);
3760 if (res) {
3761 printk(KERN_DEBUG "%s: Failed to config new SSID to "
3762 "the low-level driver\n", dev->name);
3763 return res;
3764 }
3765 }
3766 2152
3767 if (len) 2153 if (!(ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL) &&
3768 ifsta->flags |= IEEE80211_STA_SSID_SET; 2154 !ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len))
3769 else 2155 continue;
3770 ifsta->flags &= ~IEEE80211_STA_SSID_SET;
3771 2156
3772 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && 2157 if (!selected || top_rssi < bss->signal) {
3773 !(ifsta->flags & IEEE80211_STA_BSSID_SET)) { 2158 selected = bss;
3774 ifsta->ibss_join_req = jiffies; 2159 top_rssi = bss->signal;
3775 ifsta->state = IEEE80211_IBSS_SEARCH; 2160 }
3776 return ieee80211_sta_find_ibss(dev, ifsta);
3777 } 2161 }
2162 if (selected)
2163 atomic_inc(&selected->users);
2164 spin_unlock_bh(&local->bss_lock);
3778 2165
3779 return 0; 2166 if (selected) {
3780} 2167 ieee80211_set_freq(sdata, selected->freq);
2168 if (!(ifsta->flags & IEEE80211_STA_SSID_SET))
2169 ieee80211_sta_set_ssid(sdata, selected->ssid,
2170 selected->ssid_len);
2171 ieee80211_sta_set_bssid(sdata, selected->bssid);
2172 ieee80211_sta_def_wmm_params(sdata, selected);
3781 2173
2174 /* Send out direct probe if no probe resp was received or
2175 * the one we have is outdated
2176 */
2177 if (!selected->last_probe_resp ||
2178 time_after(jiffies, selected->last_probe_resp
2179 + IEEE80211_SCAN_RESULT_EXPIRE))
2180 ifsta->state = IEEE80211_STA_MLME_DIRECT_PROBE;
2181 else
2182 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
3782 2183
3783int ieee80211_sta_get_ssid(struct net_device *dev, char *ssid, size_t *len) 2184 ieee80211_rx_bss_put(local, selected);
3784{ 2185 ieee80211_sta_reset_auth(sdata, ifsta);
3785 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 2186 return 0;
3786 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2187 } else {
3787 memcpy(ssid, ifsta->ssid, ifsta->ssid_len); 2188 if (ifsta->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) {
3788 *len = ifsta->ssid_len; 2189 ifsta->assoc_scan_tries++;
3789 return 0; 2190 if (ifsta->flags & IEEE80211_STA_AUTO_SSID_SEL)
2191 ieee80211_start_scan(sdata, NULL, 0);
2192 else
2193 ieee80211_start_scan(sdata, ifsta->ssid,
2194 ifsta->ssid_len);
2195 ifsta->state = IEEE80211_STA_MLME_AUTHENTICATE;
2196 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
2197 } else
2198 ifsta->state = IEEE80211_STA_MLME_DISABLED;
2199 }
2200 return -1;
3790} 2201}
3791 2202
3792 2203
3793int ieee80211_sta_set_bssid(struct net_device *dev, u8 *bssid) 2204static void ieee80211_sta_work(struct work_struct *work)
3794{ 2205{
3795 struct ieee80211_sub_if_data *sdata; 2206 struct ieee80211_sub_if_data *sdata =
2207 container_of(work, struct ieee80211_sub_if_data, u.sta.work);
2208 struct ieee80211_local *local = sdata->local;
3796 struct ieee80211_if_sta *ifsta; 2209 struct ieee80211_if_sta *ifsta;
3797 int res; 2210 struct sk_buff *skb;
3798
3799 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3800 ifsta = &sdata->u.sta;
3801 2211
3802 if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) { 2212 if (!netif_running(sdata->dev))
3803 memcpy(ifsta->bssid, bssid, ETH_ALEN); 2213 return;
3804 res = 0;
3805 /*
3806 * Hack! See also ieee80211_sta_set_ssid.
3807 */
3808 if (netif_running(sdata->dev))
3809 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
3810 if (res) {
3811 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
3812 "the low-level driver\n", dev->name);
3813 return res;
3814 }
3815 }
3816 2214
3817 if (is_valid_ether_addr(bssid)) 2215 if (local->sw_scanning || local->hw_scanning)
3818 ifsta->flags |= IEEE80211_STA_BSSID_SET; 2216 return;
3819 else
3820 ifsta->flags &= ~IEEE80211_STA_BSSID_SET;
3821 2217
3822 return 0; 2218 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION &&
3823} 2219 sdata->vif.type != NL80211_IFTYPE_ADHOC))
2220 return;
2221 ifsta = &sdata->u.sta;
3824 2222
2223 while ((skb = skb_dequeue(&ifsta->skb_queue)))
2224 ieee80211_sta_rx_queued_mgmt(sdata, skb);
3825 2225
3826static void ieee80211_send_nullfunc(struct ieee80211_local *local, 2226 if (ifsta->state != IEEE80211_STA_MLME_DIRECT_PROBE &&
3827 struct ieee80211_sub_if_data *sdata, 2227 ifsta->state != IEEE80211_STA_MLME_AUTHENTICATE &&
3828 int powersave) 2228 ifsta->state != IEEE80211_STA_MLME_ASSOCIATE &&
3829{ 2229 test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
3830 struct sk_buff *skb; 2230 ieee80211_start_scan(sdata, ifsta->scan_ssid,
3831 struct ieee80211_hdr *nullfunc; 2231 ifsta->scan_ssid_len);
3832 __le16 fc; 2232 return;
2233 }
3833 2234
3834 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24); 2235 if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request)) {
3835 if (!skb) { 2236 if (ieee80211_sta_config_auth(sdata, ifsta))
3836 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc " 2237 return;
3837 "frame\n", sdata->dev->name); 2238 clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request);
2239 } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifsta->request))
3838 return; 2240 return;
2241
2242 switch (ifsta->state) {
2243 case IEEE80211_STA_MLME_DISABLED:
2244 break;
2245 case IEEE80211_STA_MLME_DIRECT_PROBE:
2246 ieee80211_direct_probe(sdata, ifsta);
2247 break;
2248 case IEEE80211_STA_MLME_AUTHENTICATE:
2249 ieee80211_authenticate(sdata, ifsta);
2250 break;
2251 case IEEE80211_STA_MLME_ASSOCIATE:
2252 ieee80211_associate(sdata, ifsta);
2253 break;
2254 case IEEE80211_STA_MLME_ASSOCIATED:
2255 ieee80211_associated(sdata, ifsta);
2256 break;
2257 case IEEE80211_STA_MLME_IBSS_SEARCH:
2258 ieee80211_sta_find_ibss(sdata, ifsta);
2259 break;
2260 case IEEE80211_STA_MLME_IBSS_JOINED:
2261 ieee80211_sta_merge_ibss(sdata, ifsta);
2262 break;
2263 default:
2264 WARN_ON(1);
2265 break;
3839 } 2266 }
3840 skb_reserve(skb, local->hw.extra_tx_headroom);
3841 2267
3842 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); 2268 if (ieee80211_privacy_mismatch(sdata, ifsta)) {
3843 memset(nullfunc, 0, 24); 2269 printk(KERN_DEBUG "%s: privacy configuration mismatch and "
3844 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | 2270 "mixed-cell disabled - disassociate\n", sdata->dev->name);
3845 IEEE80211_FCTL_TODS);
3846 if (powersave)
3847 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
3848 nullfunc->frame_control = fc;
3849 memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN);
3850 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
3851 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN);
3852
3853 ieee80211_sta_tx(sdata->dev, skb, 0);
3854}
3855 2271
2272 ieee80211_set_disassoc(sdata, ifsta, false, true,
2273 WLAN_REASON_UNSPECIFIED);
2274 }
2275}
3856 2276
3857static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) 2277static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
3858{ 2278{
3859 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 2279 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3860 ieee80211_vif_is_mesh(&sdata->vif)) 2280 queue_work(sdata->local->hw.workqueue,
3861 ieee80211_sta_timer((unsigned long)sdata); 2281 &sdata->u.sta.work);
3862} 2282}
3863 2283
3864void ieee80211_scan_completed(struct ieee80211_hw *hw) 2284/* interface setup */
2285void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
3865{ 2286{
3866 struct ieee80211_local *local = hw_to_local(hw); 2287 struct ieee80211_if_sta *ifsta;
3867 struct net_device *dev = local->scan_dev;
3868 struct ieee80211_sub_if_data *sdata;
3869 union iwreq_data wrqu;
3870 2288
3871 local->last_scan_completed = jiffies; 2289 ifsta = &sdata->u.sta;
3872 memset(&wrqu, 0, sizeof(wrqu)); 2290 INIT_WORK(&ifsta->work, ieee80211_sta_work);
3873 wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL); 2291 setup_timer(&ifsta->timer, ieee80211_sta_timer,
3874 2292 (unsigned long) sdata);
3875 if (local->sta_hw_scanning) { 2293 skb_queue_head_init(&ifsta->skb_queue);
3876 local->sta_hw_scanning = 0; 2294
3877 if (ieee80211_hw_config(local)) 2295 ifsta->capab = WLAN_CAPABILITY_ESS;
3878 printk(KERN_DEBUG "%s: failed to restore operational " 2296 ifsta->auth_algs = IEEE80211_AUTH_ALG_OPEN |
3879 "channel after scan\n", dev->name); 2297 IEEE80211_AUTH_ALG_SHARED_KEY;
3880 /* Restart STA timer for HW scan case */ 2298 ifsta->flags |= IEEE80211_STA_CREATE_IBSS |
3881 rcu_read_lock(); 2299 IEEE80211_STA_AUTO_BSSID_SEL |
3882 list_for_each_entry_rcu(sdata, &local->interfaces, list) 2300 IEEE80211_STA_AUTO_CHANNEL_SEL;
3883 ieee80211_restart_sta_timer(sdata); 2301 if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4)
3884 rcu_read_unlock(); 2302 ifsta->flags |= IEEE80211_STA_WMM_ENABLED;
2303}
2304
2305/*
2306 * Add a new IBSS station, will also be called by the RX code when,
2307 * in IBSS mode, receiving a frame from a yet-unknown station, hence
2308 * must be callable in atomic context.
2309 */
2310struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
2311 struct sk_buff *skb, u8 *bssid,
2312 u8 *addr, u64 supp_rates)
2313{
2314 struct ieee80211_local *local = sdata->local;
2315 struct sta_info *sta;
2316 DECLARE_MAC_BUF(mac);
2317 int band = local->hw.conf.channel->band;
3885 2318
3886 goto done; 2319 /* TODO: Could consider removing the least recently used entry and
2320 * allow new one to be added. */
2321 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
2322 if (net_ratelimit()) {
2323 printk(KERN_DEBUG "%s: No room for a new IBSS STA "
2324 "entry %s\n", sdata->dev->name, print_mac(mac, addr));
2325 }
2326 return NULL;
3887 } 2327 }
3888 2328
3889 local->sta_sw_scanning = 0; 2329 if (compare_ether_addr(bssid, sdata->u.sta.bssid))
3890 if (ieee80211_hw_config(local)) 2330 return NULL;
3891 printk(KERN_DEBUG "%s: failed to restore operational "
3892 "channel after scan\n", dev->name);
3893 2331
2332#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
2333 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
2334 wiphy_name(local->hw.wiphy), print_mac(mac, addr), sdata->dev->name);
2335#endif
3894 2336
3895 netif_tx_lock_bh(local->mdev); 2337 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
3896 netif_addr_lock(local->mdev); 2338 if (!sta)
3897 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC; 2339 return NULL;
3898 local->ops->configure_filter(local_to_hw(local),
3899 FIF_BCN_PRBRESP_PROMISC,
3900 &local->filter_flags,
3901 local->mdev->mc_count,
3902 local->mdev->mc_list);
3903 2340
3904 netif_addr_unlock(local->mdev); 2341 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
3905 netif_tx_unlock_bh(local->mdev);
3906 2342
3907 rcu_read_lock(); 2343 /* make sure mandatory rates are always added */
3908 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2344 sta->sta.supp_rates[band] = supp_rates |
3909 /* Tell AP we're back */ 2345 ieee80211_mandatory_rates(local, band);
3910 if (sdata->vif.type == IEEE80211_IF_TYPE_STA &&
3911 sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)
3912 ieee80211_send_nullfunc(local, sdata, 0);
3913 2346
3914 ieee80211_restart_sta_timer(sdata); 2347 rate_control_rate_init(sta);
3915 2348
3916 netif_wake_queue(sdata->dev); 2349 if (sta_info_insert(sta))
3917 } 2350 return NULL;
3918 rcu_read_unlock();
3919 2351
3920done: 2352 return sta;
3921 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3922 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
3923 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
3924 if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) ||
3925 (!(ifsta->state == IEEE80211_IBSS_JOINED) &&
3926 !ieee80211_sta_active_ibss(dev)))
3927 ieee80211_sta_find_ibss(dev, ifsta);
3928 }
3929} 2353}
3930EXPORT_SYMBOL(ieee80211_scan_completed);
3931 2354
3932void ieee80211_sta_scan_work(struct work_struct *work) 2355/* configuration hooks */
2356void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata,
2357 struct ieee80211_if_sta *ifsta)
3933{ 2358{
3934 struct ieee80211_local *local = 2359 struct ieee80211_local *local = sdata->local;
3935 container_of(work, struct ieee80211_local, scan_work.work);
3936 struct net_device *dev = local->scan_dev;
3937 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
3938 struct ieee80211_supported_band *sband;
3939 struct ieee80211_channel *chan;
3940 int skip;
3941 unsigned long next_delay = 0;
3942 2360
3943 if (!local->sta_sw_scanning) 2361 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3944 return; 2362 return;
3945 2363
3946 switch (local->scan_state) { 2364 if ((ifsta->flags & (IEEE80211_STA_BSSID_SET |
3947 case SCAN_SET_CHANNEL: 2365 IEEE80211_STA_AUTO_BSSID_SEL)) &&
3948 /* 2366 (ifsta->flags & (IEEE80211_STA_SSID_SET |
3949 * Get current scan band. scan_band may be IEEE80211_NUM_BANDS 2367 IEEE80211_STA_AUTO_SSID_SEL))) {
3950 * after we successfully scanned the last channel of the last
3951 * band (and the last band is supported by the hw)
3952 */
3953 if (local->scan_band < IEEE80211_NUM_BANDS)
3954 sband = local->hw.wiphy->bands[local->scan_band];
3955 else
3956 sband = NULL;
3957
3958 /*
3959 * If we are at an unsupported band and have more bands
3960 * left to scan, advance to the next supported one.
3961 */
3962 while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) {
3963 local->scan_band++;
3964 sband = local->hw.wiphy->bands[local->scan_band];
3965 local->scan_channel_idx = 0;
3966 }
3967
3968 /* if no more bands/channels left, complete scan */
3969 if (!sband || local->scan_channel_idx >= sband->n_channels) {
3970 ieee80211_scan_completed(local_to_hw(local));
3971 return;
3972 }
3973 skip = 0;
3974 chan = &sband->channels[local->scan_channel_idx];
3975
3976 if (chan->flags & IEEE80211_CHAN_DISABLED ||
3977 (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
3978 chan->flags & IEEE80211_CHAN_NO_IBSS))
3979 skip = 1;
3980
3981 if (!skip) {
3982 local->scan_channel = chan;
3983 if (ieee80211_hw_config(local)) {
3984 printk(KERN_DEBUG "%s: failed to set freq to "
3985 "%d MHz for scan\n", dev->name,
3986 chan->center_freq);
3987 skip = 1;
3988 }
3989 }
3990
3991 /* advance state machine to next channel/band */
3992 local->scan_channel_idx++;
3993 if (local->scan_channel_idx >= sband->n_channels) {
3994 /*
3995 * scan_band may end up == IEEE80211_NUM_BANDS, but
3996 * we'll catch that case above and complete the scan
3997 * if that is the case.
3998 */
3999 local->scan_band++;
4000 local->scan_channel_idx = 0;
4001 }
4002
4003 if (skip)
4004 break;
4005 2368
4006 next_delay = IEEE80211_PROBE_DELAY + 2369 if (ifsta->state == IEEE80211_STA_MLME_ASSOCIATED)
4007 usecs_to_jiffies(local->hw.channel_change_time); 2370 ieee80211_set_disassoc(sdata, ifsta, true, true,
4008 local->scan_state = SCAN_SEND_PROBE; 2371 WLAN_REASON_DEAUTH_LEAVING);
4009 break;
4010 case SCAN_SEND_PROBE:
4011 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
4012 local->scan_state = SCAN_SET_CHANNEL;
4013 2372
4014 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN) 2373 set_bit(IEEE80211_STA_REQ_AUTH, &ifsta->request);
4015 break; 2374 queue_work(local->hw.workqueue, &ifsta->work);
4016 ieee80211_send_probe_req(dev, NULL, local->scan_ssid,
4017 local->scan_ssid_len);
4018 next_delay = IEEE80211_CHANNEL_TIME;
4019 break;
4020 } 2375 }
4021
4022 if (local->sta_sw_scanning)
4023 queue_delayed_work(local->hw.workqueue, &local->scan_work,
4024 next_delay);
4025} 2376}
4026 2377
4027 2378int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
4028static int ieee80211_sta_start_scan(struct net_device *dev,
4029 u8 *ssid, size_t ssid_len)
4030{ 2379{
4031 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2380 struct ieee80211_if_sta *ifsta;
4032 struct ieee80211_sub_if_data *sdata; 2381 int res;
4033 2382
4034 if (ssid_len > IEEE80211_MAX_SSID_LEN) 2383 if (len > IEEE80211_MAX_SSID_LEN)
4035 return -EINVAL; 2384 return -EINVAL;
4036 2385
4037 /* MLME-SCAN.request (page 118) page 144 (11.1.3.1) 2386 ifsta = &sdata->u.sta;
4038 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS
4039 * BSSID: MACAddress
4040 * SSID
4041 * ScanType: ACTIVE, PASSIVE
4042 * ProbeDelay: delay (in microseconds) to be used prior to transmitting
4043 * a Probe frame during active scanning
4044 * ChannelList
4045 * MinChannelTime (>= ProbeDelay), in TU
4046 * MaxChannelTime: (>= MinChannelTime), in TU
4047 */
4048
4049 /* MLME-SCAN.confirm
4050 * BSSDescriptionSet
4051 * ResultCode: SUCCESS, INVALID_PARAMETERS
4052 */
4053 2387
4054 if (local->sta_sw_scanning || local->sta_hw_scanning) { 2388 if (ifsta->ssid_len != len || memcmp(ifsta->ssid, ssid, len) != 0) {
4055 if (local->scan_dev == dev) 2389 memset(ifsta->ssid, 0, sizeof(ifsta->ssid));
4056 return 0; 2390 memcpy(ifsta->ssid, ssid, len);
4057 return -EBUSY; 2391 ifsta->ssid_len = len;
4058 } 2392 ifsta->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
4059 2393
4060 if (local->ops->hw_scan) { 2394 res = 0;
4061 int rc = local->ops->hw_scan(local_to_hw(local), 2395 /*
4062 ssid, ssid_len); 2396 * Hack! MLME code needs to be cleaned up to have different
4063 if (!rc) { 2397 * entry points for configuration and internal selection change
4064 local->sta_hw_scanning = 1; 2398 */
4065 local->scan_dev = dev; 2399 if (netif_running(sdata->dev))
2400 res = ieee80211_if_config(sdata, IEEE80211_IFCC_SSID);
2401 if (res) {
2402 printk(KERN_DEBUG "%s: Failed to config new SSID to "
2403 "the low-level driver\n", sdata->dev->name);
2404 return res;
4066 } 2405 }
4067 return rc;
4068 } 2406 }
4069 2407
4070 local->sta_sw_scanning = 1; 2408 if (len)
2409 ifsta->flags |= IEEE80211_STA_SSID_SET;
2410 else
2411 ifsta->flags &= ~IEEE80211_STA_SSID_SET;
4071 2412
4072 rcu_read_lock(); 2413 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
4073 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2414 !(ifsta->flags & IEEE80211_STA_BSSID_SET)) {
4074 netif_stop_queue(sdata->dev); 2415 ifsta->ibss_join_req = jiffies;
4075 if (sdata->vif.type == IEEE80211_IF_TYPE_STA && 2416 ifsta->state = IEEE80211_STA_MLME_IBSS_SEARCH;
4076 (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED)) 2417 return ieee80211_sta_find_ibss(sdata, ifsta);
4077 ieee80211_send_nullfunc(local, sdata, 1);
4078 } 2418 }
4079 rcu_read_unlock();
4080
4081 if (ssid) {
4082 local->scan_ssid_len = ssid_len;
4083 memcpy(local->scan_ssid, ssid, ssid_len);
4084 } else
4085 local->scan_ssid_len = 0;
4086 local->scan_state = SCAN_SET_CHANNEL;
4087 local->scan_channel_idx = 0;
4088 local->scan_band = IEEE80211_BAND_2GHZ;
4089 local->scan_dev = dev;
4090
4091 netif_addr_lock_bh(local->mdev);
4092 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
4093 local->ops->configure_filter(local_to_hw(local),
4094 FIF_BCN_PRBRESP_PROMISC,
4095 &local->filter_flags,
4096 local->mdev->mc_count,
4097 local->mdev->mc_list);
4098 netif_addr_unlock_bh(local->mdev);
4099
4100 /* TODO: start scan as soon as all nullfunc frames are ACKed */
4101 queue_delayed_work(local->hw.workqueue, &local->scan_work,
4102 IEEE80211_CHANNEL_TIME);
4103 2419
4104 return 0; 2420 return 0;
4105} 2421}
4106 2422
4107 2423int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len)
4108int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len)
4109{ 2424{
4110 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4111 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2425 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4112 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2426 memcpy(ssid, ifsta->ssid, ifsta->ssid_len);
4113 2427 *len = ifsta->ssid_len;
4114 if (sdata->vif.type != IEEE80211_IF_TYPE_STA)
4115 return ieee80211_sta_start_scan(dev, ssid, ssid_len);
4116
4117 if (local->sta_sw_scanning || local->sta_hw_scanning) {
4118 if (local->scan_dev == dev)
4119 return 0;
4120 return -EBUSY;
4121 }
4122
4123 ifsta->scan_ssid_len = ssid_len;
4124 if (ssid_len)
4125 memcpy(ifsta->scan_ssid, ssid, ssid_len);
4126 set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request);
4127 queue_work(local->hw.workqueue, &ifsta->work);
4128 return 0; 2428 return 0;
4129} 2429}
4130 2430
4131static char * 2431int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
4132ieee80211_sta_scan_result(struct net_device *dev,
4133 struct iw_request_info *info,
4134 struct ieee80211_sta_bss *bss,
4135 char *current_ev, char *end_buf)
4136{ 2432{
4137 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2433 struct ieee80211_if_sta *ifsta;
4138 struct iw_event iwe; 2434 int res;
4139
4140 if (time_after(jiffies,
4141 bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE))
4142 return current_ev;
4143
4144 memset(&iwe, 0, sizeof(iwe));
4145 iwe.cmd = SIOCGIWAP;
4146 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
4147 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
4148 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4149 IW_EV_ADDR_LEN);
4150
4151 memset(&iwe, 0, sizeof(iwe));
4152 iwe.cmd = SIOCGIWESSID;
4153 if (bss_mesh_cfg(bss)) {
4154 iwe.u.data.length = bss_mesh_id_len(bss);
4155 iwe.u.data.flags = 1;
4156 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4157 &iwe, bss_mesh_id(bss));
4158 } else {
4159 iwe.u.data.length = bss->ssid_len;
4160 iwe.u.data.flags = 1;
4161 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4162 &iwe, bss->ssid);
4163 }
4164
4165 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
4166 || bss_mesh_cfg(bss)) {
4167 memset(&iwe, 0, sizeof(iwe));
4168 iwe.cmd = SIOCGIWMODE;
4169 if (bss_mesh_cfg(bss))
4170 iwe.u.mode = IW_MODE_MESH;
4171 else if (bss->capability & WLAN_CAPABILITY_ESS)
4172 iwe.u.mode = IW_MODE_MASTER;
4173 else
4174 iwe.u.mode = IW_MODE_ADHOC;
4175 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
4176 &iwe, IW_EV_UINT_LEN);
4177 }
4178
4179 memset(&iwe, 0, sizeof(iwe));
4180 iwe.cmd = SIOCGIWFREQ;
4181 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
4182 iwe.u.freq.e = 0;
4183 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4184 IW_EV_FREQ_LEN);
4185
4186 memset(&iwe, 0, sizeof(iwe));
4187 iwe.cmd = SIOCGIWFREQ;
4188 iwe.u.freq.m = bss->freq;
4189 iwe.u.freq.e = 6;
4190 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4191 IW_EV_FREQ_LEN);
4192 memset(&iwe, 0, sizeof(iwe));
4193 iwe.cmd = IWEVQUAL;
4194 iwe.u.qual.qual = bss->qual;
4195 iwe.u.qual.level = bss->signal;
4196 iwe.u.qual.noise = bss->noise;
4197 iwe.u.qual.updated = local->wstats_flags;
4198 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
4199 IW_EV_QUAL_LEN);
4200
4201 memset(&iwe, 0, sizeof(iwe));
4202 iwe.cmd = SIOCGIWENCODE;
4203 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
4204 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
4205 else
4206 iwe.u.data.flags = IW_ENCODE_DISABLED;
4207 iwe.u.data.length = 0;
4208 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4209 &iwe, "");
4210
4211 if (bss && bss->wpa_ie) {
4212 memset(&iwe, 0, sizeof(iwe));
4213 iwe.cmd = IWEVGENIE;
4214 iwe.u.data.length = bss->wpa_ie_len;
4215 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4216 &iwe, bss->wpa_ie);
4217 }
4218
4219 if (bss && bss->rsn_ie) {
4220 memset(&iwe, 0, sizeof(iwe));
4221 iwe.cmd = IWEVGENIE;
4222 iwe.u.data.length = bss->rsn_ie_len;
4223 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4224 &iwe, bss->rsn_ie);
4225 }
4226
4227 if (bss && bss->ht_ie) {
4228 memset(&iwe, 0, sizeof(iwe));
4229 iwe.cmd = IWEVGENIE;
4230 iwe.u.data.length = bss->ht_ie_len;
4231 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
4232 &iwe, bss->ht_ie);
4233 }
4234
4235 if (bss && bss->supp_rates_len > 0) {
4236 /* display all supported rates in readable format */
4237 char *p = current_ev + iwe_stream_lcp_len(info);
4238 int i;
4239
4240 memset(&iwe, 0, sizeof(iwe));
4241 iwe.cmd = SIOCGIWRATE;
4242 /* Those two flags are ignored... */
4243 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
4244
4245 for (i = 0; i < bss->supp_rates_len; i++) {
4246 iwe.u.bitrate.value = ((bss->supp_rates[i] &
4247 0x7f) * 500000);
4248 p = iwe_stream_add_value(info, current_ev, p,
4249 end_buf, &iwe, IW_EV_PARAM_LEN);
4250 }
4251 current_ev = p;
4252 }
4253 2435
4254 if (bss) { 2436 ifsta = &sdata->u.sta;
4255 char *buf;
4256 buf = kmalloc(30, GFP_ATOMIC);
4257 if (buf) {
4258 memset(&iwe, 0, sizeof(iwe));
4259 iwe.cmd = IWEVCUSTOM;
4260 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp));
4261 iwe.u.data.length = strlen(buf);
4262 current_ev = iwe_stream_add_point(info, current_ev,
4263 end_buf,
4264 &iwe, buf);
4265 memset(&iwe, 0, sizeof(iwe));
4266 iwe.cmd = IWEVCUSTOM;
4267 sprintf(buf, " Last beacon: %dms ago",
4268 jiffies_to_msecs(jiffies - bss->last_update));
4269 iwe.u.data.length = strlen(buf);
4270 current_ev = iwe_stream_add_point(info, current_ev,
4271 end_buf, &iwe, buf);
4272 kfree(buf);
4273 }
4274 }
4275 2437
4276 if (bss_mesh_cfg(bss)) { 2438 if (memcmp(ifsta->bssid, bssid, ETH_ALEN) != 0) {
4277 char *buf; 2439 memcpy(ifsta->bssid, bssid, ETH_ALEN);
4278 u8 *cfg = bss_mesh_cfg(bss); 2440 res = 0;
4279 buf = kmalloc(50, GFP_ATOMIC); 2441 /*
4280 if (buf) { 2442 * Hack! See also ieee80211_sta_set_ssid.
4281 memset(&iwe, 0, sizeof(iwe)); 2443 */
4282 iwe.cmd = IWEVCUSTOM; 2444 if (netif_running(sdata->dev))
4283 sprintf(buf, "Mesh network (version %d)", cfg[0]); 2445 res = ieee80211_if_config(sdata, IEEE80211_IFCC_BSSID);
4284 iwe.u.data.length = strlen(buf); 2446 if (res) {
4285 current_ev = iwe_stream_add_point(info, current_ev, 2447 printk(KERN_DEBUG "%s: Failed to config new BSSID to "
4286 end_buf, 2448 "the low-level driver\n", sdata->dev->name);
4287 &iwe, buf); 2449 return res;
4288 sprintf(buf, "Path Selection Protocol ID: "
4289 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
4290 cfg[4]);
4291 iwe.u.data.length = strlen(buf);
4292 current_ev = iwe_stream_add_point(info, current_ev,
4293 end_buf,
4294 &iwe, buf);
4295 sprintf(buf, "Path Selection Metric ID: "
4296 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
4297 cfg[8]);
4298 iwe.u.data.length = strlen(buf);
4299 current_ev = iwe_stream_add_point(info, current_ev,
4300 end_buf,
4301 &iwe, buf);
4302 sprintf(buf, "Congestion Control Mode ID: "
4303 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
4304 cfg[11], cfg[12]);
4305 iwe.u.data.length = strlen(buf);
4306 current_ev = iwe_stream_add_point(info, current_ev,
4307 end_buf,
4308 &iwe, buf);
4309 sprintf(buf, "Channel Precedence: "
4310 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
4311 cfg[15], cfg[16]);
4312 iwe.u.data.length = strlen(buf);
4313 current_ev = iwe_stream_add_point(info, current_ev,
4314 end_buf,
4315 &iwe, buf);
4316 kfree(buf);
4317 } 2450 }
4318 } 2451 }
4319 2452
4320 return current_ev; 2453 if (is_valid_ether_addr(bssid))
4321} 2454 ifsta->flags |= IEEE80211_STA_BSSID_SET;
4322 2455 else
2456 ifsta->flags &= ~IEEE80211_STA_BSSID_SET;
4323 2457
4324int ieee80211_sta_scan_results(struct net_device *dev, 2458 return 0;
4325 struct iw_request_info *info,
4326 char *buf, size_t len)
4327{
4328 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4329 char *current_ev = buf;
4330 char *end_buf = buf + len;
4331 struct ieee80211_sta_bss *bss;
4332
4333 spin_lock_bh(&local->sta_bss_lock);
4334 list_for_each_entry(bss, &local->sta_bss_list, list) {
4335 if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
4336 spin_unlock_bh(&local->sta_bss_lock);
4337 return -E2BIG;
4338 }
4339 current_ev = ieee80211_sta_scan_result(dev, info, bss,
4340 current_ev, end_buf);
4341 }
4342 spin_unlock_bh(&local->sta_bss_lock);
4343 return current_ev - buf;
4344} 2459}
4345 2460
4346 2461int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len)
4347int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4348{ 2462{
4349 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4350 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2463 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4351 2464
4352 kfree(ifsta->extra_ie); 2465 kfree(ifsta->extra_ie);
@@ -4365,92 +2478,60 @@ int ieee80211_sta_set_extra_ie(struct net_device *dev, char *ie, size_t len)
4365 return 0; 2478 return 0;
4366} 2479}
4367 2480
4368 2481int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason)
4369struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
4370 struct sk_buff *skb, u8 *bssid,
4371 u8 *addr, u64 supp_rates)
4372{
4373 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
4374 struct sta_info *sta;
4375 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4376 DECLARE_MAC_BUF(mac);
4377 int band = local->hw.conf.channel->band;
4378
4379 /* TODO: Could consider removing the least recently used entry and
4380 * allow new one to be added. */
4381 if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
4382 if (net_ratelimit()) {
4383 printk(KERN_DEBUG "%s: No room for a new IBSS STA "
4384 "entry %s\n", dev->name, print_mac(mac, addr));
4385 }
4386 return NULL;
4387 }
4388
4389 if (compare_ether_addr(bssid, sdata->u.sta.bssid))
4390 return NULL;
4391
4392#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
4393 printk(KERN_DEBUG "%s: Adding new IBSS station %s (dev=%s)\n",
4394 wiphy_name(local->hw.wiphy), print_mac(mac, addr), dev->name);
4395#endif
4396
4397 sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
4398 if (!sta)
4399 return NULL;
4400
4401 set_sta_flags(sta, WLAN_STA_AUTHORIZED);
4402
4403 if (supp_rates)
4404 sta->supp_rates[band] = supp_rates;
4405 else
4406 sta->supp_rates[band] = sdata->u.sta.supp_rates_bits[band];
4407
4408 rate_control_rate_init(sta, local);
4409
4410 if (sta_info_insert(sta))
4411 return NULL;
4412
4413 return sta;
4414}
4415
4416
4417int ieee80211_sta_deauthenticate(struct net_device *dev, u16 reason)
4418{ 2482{
4419 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4420 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2483 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4421 2484
4422 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n", 2485 printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
4423 dev->name, reason); 2486 sdata->dev->name, reason);
4424 2487
4425 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 2488 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
4426 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) 2489 sdata->vif.type != NL80211_IFTYPE_ADHOC)
4427 return -EINVAL; 2490 return -EINVAL;
4428 2491
4429 ieee80211_send_deauth(dev, ifsta, reason); 2492 ieee80211_set_disassoc(sdata, ifsta, true, true, reason);
4430 ieee80211_set_disassoc(dev, ifsta, 1);
4431 return 0; 2493 return 0;
4432} 2494}
4433 2495
4434 2496int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason)
4435int ieee80211_sta_disassociate(struct net_device *dev, u16 reason)
4436{ 2497{
4437 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
4438 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 2498 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
4439 2499
4440 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n", 2500 printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
4441 dev->name, reason); 2501 sdata->dev->name, reason);
4442 2502
4443 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 2503 if (sdata->vif.type != NL80211_IFTYPE_STATION)
4444 return -EINVAL; 2504 return -EINVAL;
4445 2505
4446 if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED)) 2506 if (!(ifsta->flags & IEEE80211_STA_ASSOCIATED))
4447 return -1; 2507 return -1;
4448 2508
4449 ieee80211_send_disassoc(dev, ifsta, reason); 2509 ieee80211_set_disassoc(sdata, ifsta, false, true, reason);
4450 ieee80211_set_disassoc(dev, ifsta, 0);
4451 return 0; 2510 return 0;
4452} 2511}
4453 2512
2513/* scan finished notification */
2514void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
2515{
2516 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
2517 struct ieee80211_if_sta *ifsta;
2518
2519 if (sdata && sdata->vif.type == NL80211_IFTYPE_ADHOC) {
2520 ifsta = &sdata->u.sta;
2521 if (!(ifsta->flags & IEEE80211_STA_BSSID_SET) ||
2522 (!(ifsta->state == IEEE80211_STA_MLME_IBSS_JOINED) &&
2523 !ieee80211_sta_active_ibss(sdata)))
2524 ieee80211_sta_find_ibss(sdata, ifsta);
2525 }
2526
2527 /* Restart STA timers */
2528 rcu_read_lock();
2529 list_for_each_entry_rcu(sdata, &local->interfaces, list)
2530 ieee80211_restart_sta_timer(sdata);
2531 rcu_read_unlock();
2532}
2533
2534/* driver notification call */
4454void ieee80211_notify_mac(struct ieee80211_hw *hw, 2535void ieee80211_notify_mac(struct ieee80211_hw *hw,
4455 enum ieee80211_notification_types notif_type) 2536 enum ieee80211_notification_types notif_type)
4456{ 2537{
@@ -4461,10 +2542,10 @@ void ieee80211_notify_mac(struct ieee80211_hw *hw,
4461 case IEEE80211_NOTIFY_RE_ASSOC: 2542 case IEEE80211_NOTIFY_RE_ASSOC:
4462 rcu_read_lock(); 2543 rcu_read_lock();
4463 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2544 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
4464 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 2545 if (sdata->vif.type != NL80211_IFTYPE_STATION)
4465 continue; 2546 continue;
4466 2547
4467 ieee80211_sta_req_auth(sdata->dev, &sdata->u.sta); 2548 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
4468 } 2549 }
4469 rcu_read_unlock(); 2550 rcu_read_unlock();
4470 break; 2551 break;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 0388c090dfe9..5d786720d935 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -12,6 +12,7 @@
12#include <linux/rtnetlink.h> 12#include <linux/rtnetlink.h>
13#include "rate.h" 13#include "rate.h"
14#include "ieee80211_i.h" 14#include "ieee80211_i.h"
15#include "debugfs.h"
15 16
16struct rate_control_alg { 17struct rate_control_alg {
17 struct list_head list; 18 struct list_head list;
@@ -127,19 +128,46 @@ static void ieee80211_rate_control_ops_put(struct rate_control_ops *ops)
127 module_put(ops->module); 128 module_put(ops->module);
128} 129}
129 130
131#ifdef CONFIG_MAC80211_DEBUGFS
132static ssize_t rcname_read(struct file *file, char __user *userbuf,
133 size_t count, loff_t *ppos)
134{
135 struct rate_control_ref *ref = file->private_data;
136 int len = strlen(ref->ops->name);
137
138 return simple_read_from_buffer(userbuf, count, ppos,
139 ref->ops->name, len);
140}
141
142static const struct file_operations rcname_ops = {
143 .read = rcname_read,
144 .open = mac80211_open_file_generic,
145};
146#endif
147
130struct rate_control_ref *rate_control_alloc(const char *name, 148struct rate_control_ref *rate_control_alloc(const char *name,
131 struct ieee80211_local *local) 149 struct ieee80211_local *local)
132{ 150{
151 struct dentry *debugfsdir = NULL;
133 struct rate_control_ref *ref; 152 struct rate_control_ref *ref;
134 153
135 ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL); 154 ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
136 if (!ref) 155 if (!ref)
137 goto fail_ref; 156 goto fail_ref;
138 kref_init(&ref->kref); 157 kref_init(&ref->kref);
158 ref->local = local;
139 ref->ops = ieee80211_rate_control_ops_get(name); 159 ref->ops = ieee80211_rate_control_ops_get(name);
140 if (!ref->ops) 160 if (!ref->ops)
141 goto fail_ops; 161 goto fail_ops;
142 ref->priv = ref->ops->alloc(local); 162
163#ifdef CONFIG_MAC80211_DEBUGFS
164 debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
165 local->debugfs.rcdir = debugfsdir;
166 local->debugfs.rcname = debugfs_create_file("name", 0400, debugfsdir,
167 ref, &rcname_ops);
168#endif
169
170 ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
143 if (!ref->priv) 171 if (!ref->priv)
144 goto fail_priv; 172 goto fail_priv;
145 return ref; 173 return ref;
@@ -158,29 +186,46 @@ static void rate_control_release(struct kref *kref)
158 186
159 ctrl_ref = container_of(kref, struct rate_control_ref, kref); 187 ctrl_ref = container_of(kref, struct rate_control_ref, kref);
160 ctrl_ref->ops->free(ctrl_ref->priv); 188 ctrl_ref->ops->free(ctrl_ref->priv);
189
190#ifdef CONFIG_MAC80211_DEBUGFS
191 debugfs_remove(ctrl_ref->local->debugfs.rcname);
192 ctrl_ref->local->debugfs.rcname = NULL;
193 debugfs_remove(ctrl_ref->local->debugfs.rcdir);
194 ctrl_ref->local->debugfs.rcdir = NULL;
195#endif
196
161 ieee80211_rate_control_ops_put(ctrl_ref->ops); 197 ieee80211_rate_control_ops_put(ctrl_ref->ops);
162 kfree(ctrl_ref); 198 kfree(ctrl_ref);
163} 199}
164 200
165void rate_control_get_rate(struct net_device *dev, 201void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
166 struct ieee80211_supported_band *sband, 202 struct ieee80211_supported_band *sband,
167 struct sk_buff *skb, 203 struct sta_info *sta, struct sk_buff *skb,
168 struct rate_selection *sel) 204 struct rate_selection *sel)
169{ 205{
170 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 206 struct rate_control_ref *ref = sdata->local->rate_ctrl;
171 struct rate_control_ref *ref = local->rate_ctrl; 207 void *priv_sta = NULL;
172 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 208 struct ieee80211_sta *ista = NULL;
173 struct sta_info *sta;
174 int i; 209 int i;
175 210
176 rcu_read_lock();
177 sta = sta_info_get(local, hdr->addr1);
178
179 sel->rate_idx = -1; 211 sel->rate_idx = -1;
180 sel->nonerp_idx = -1; 212 sel->nonerp_idx = -1;
181 sel->probe_idx = -1; 213 sel->probe_idx = -1;
214 sel->max_rate_idx = sdata->max_ratectrl_rateidx;
215
216 if (sta) {
217 ista = &sta->sta;
218 priv_sta = sta->rate_ctrl_priv;
219 }
220
221 if (sta && sdata->force_unicast_rateidx > -1)
222 sel->rate_idx = sdata->force_unicast_rateidx;
223 else
224 ref->ops->get_rate(ref->priv, sband, ista, priv_sta, skb, sel);
182 225
183 ref->ops->get_rate(ref->priv, dev, sband, skb, sel); 226 if (sdata->max_ratectrl_rateidx > -1 &&
227 sel->rate_idx > sdata->max_ratectrl_rateidx)
228 sel->rate_idx = sdata->max_ratectrl_rateidx;
184 229
185 BUG_ON(sel->rate_idx < 0); 230 BUG_ON(sel->rate_idx < 0);
186 231
@@ -191,13 +236,11 @@ void rate_control_get_rate(struct net_device *dev,
191 if (sband->bitrates[sel->rate_idx].bitrate < rate->bitrate) 236 if (sband->bitrates[sel->rate_idx].bitrate < rate->bitrate)
192 break; 237 break;
193 238
194 if (rate_supported(sta, sband->band, i) && 239 if (rate_supported(ista, sband->band, i) &&
195 !(rate->flags & IEEE80211_RATE_ERP_G)) 240 !(rate->flags & IEEE80211_RATE_ERP_G))
196 sel->nonerp_idx = i; 241 sel->nonerp_idx = i;
197 } 242 }
198 } 243 }
199
200 rcu_read_unlock();
201} 244}
202 245
203struct rate_control_ref *rate_control_get(struct rate_control_ref *ref) 246struct rate_control_ref *rate_control_get(struct rate_control_ref *ref)
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index ede7ab56f65b..eb94e584d24e 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -19,77 +19,48 @@
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "sta_info.h" 20#include "sta_info.h"
21 21
22/**
23 * struct rate_selection - rate selection for rate control algos
24 * @rate: selected transmission rate index
25 * @nonerp: Non-ERP rate to use instead if ERP cannot be used
26 * @probe: rate for probing (or -1)
27 *
28 */
29struct rate_selection {
30 s8 rate_idx, nonerp_idx, probe_idx;
31};
32
33struct rate_control_ops {
34 struct module *module;
35 const char *name;
36 void (*tx_status)(void *priv, struct net_device *dev,
37 struct sk_buff *skb);
38 void (*get_rate)(void *priv, struct net_device *dev,
39 struct ieee80211_supported_band *band,
40 struct sk_buff *skb,
41 struct rate_selection *sel);
42 void (*rate_init)(void *priv, void *priv_sta,
43 struct ieee80211_local *local, struct sta_info *sta);
44 void (*clear)(void *priv);
45
46 void *(*alloc)(struct ieee80211_local *local);
47 void (*free)(void *priv);
48 void *(*alloc_sta)(void *priv, gfp_t gfp);
49 void (*free_sta)(void *priv, void *priv_sta);
50
51 int (*add_attrs)(void *priv, struct kobject *kobj);
52 void (*remove_attrs)(void *priv, struct kobject *kobj);
53 void (*add_sta_debugfs)(void *priv, void *priv_sta,
54 struct dentry *dir);
55 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
56};
57
58struct rate_control_ref { 22struct rate_control_ref {
23 struct ieee80211_local *local;
59 struct rate_control_ops *ops; 24 struct rate_control_ops *ops;
60 void *priv; 25 void *priv;
61 struct kref kref; 26 struct kref kref;
62}; 27};
63 28
64int ieee80211_rate_control_register(struct rate_control_ops *ops);
65void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
66
67/* Get a reference to the rate control algorithm. If `name' is NULL, get the 29/* Get a reference to the rate control algorithm. If `name' is NULL, get the
68 * first available algorithm. */ 30 * first available algorithm. */
69struct rate_control_ref *rate_control_alloc(const char *name, 31struct rate_control_ref *rate_control_alloc(const char *name,
70 struct ieee80211_local *local); 32 struct ieee80211_local *local);
71void rate_control_get_rate(struct net_device *dev, 33void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
72 struct ieee80211_supported_band *sband, 34 struct ieee80211_supported_band *sband,
73 struct sk_buff *skb, 35 struct sta_info *sta, struct sk_buff *skb,
74 struct rate_selection *sel); 36 struct rate_selection *sel);
75struct rate_control_ref *rate_control_get(struct rate_control_ref *ref); 37struct rate_control_ref *rate_control_get(struct rate_control_ref *ref);
76void rate_control_put(struct rate_control_ref *ref); 38void rate_control_put(struct rate_control_ref *ref);
77 39
78static inline void rate_control_tx_status(struct net_device *dev, 40static inline void rate_control_tx_status(struct ieee80211_local *local,
41 struct ieee80211_supported_band *sband,
42 struct sta_info *sta,
79 struct sk_buff *skb) 43 struct sk_buff *skb)
80{ 44{
81 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
82 struct rate_control_ref *ref = local->rate_ctrl; 45 struct rate_control_ref *ref = local->rate_ctrl;
46 struct ieee80211_sta *ista = &sta->sta;
47 void *priv_sta = sta->rate_ctrl_priv;
83 48
84 ref->ops->tx_status(ref->priv, dev, skb); 49 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
85} 50}
86 51
87 52
88static inline void rate_control_rate_init(struct sta_info *sta, 53static inline void rate_control_rate_init(struct sta_info *sta)
89 struct ieee80211_local *local)
90{ 54{
55 struct ieee80211_local *local = sta->sdata->local;
91 struct rate_control_ref *ref = sta->rate_ctrl; 56 struct rate_control_ref *ref = sta->rate_ctrl;
92 ref->ops->rate_init(ref->priv, sta->rate_ctrl_priv, local, sta); 57 struct ieee80211_sta *ista = &sta->sta;
58 void *priv_sta = sta->rate_ctrl_priv;
59 struct ieee80211_supported_band *sband;
60
61 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
62
63 ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
93} 64}
94 65
95 66
@@ -100,15 +71,19 @@ static inline void rate_control_clear(struct ieee80211_local *local)
100} 71}
101 72
102static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, 73static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
74 struct ieee80211_sta *sta,
103 gfp_t gfp) 75 gfp_t gfp)
104{ 76{
105 return ref->ops->alloc_sta(ref->priv, gfp); 77 return ref->ops->alloc_sta(ref->priv, sta, gfp);
106} 78}
107 79
108static inline void rate_control_free_sta(struct rate_control_ref *ref, 80static inline void rate_control_free_sta(struct sta_info *sta)
109 void *priv)
110{ 81{
111 ref->ops->free_sta(ref->priv, priv); 82 struct rate_control_ref *ref = sta->rate_ctrl;
83 struct ieee80211_sta *ista = &sta->sta;
84 void *priv_sta = sta->rate_ctrl_priv;
85
86 ref->ops->free_sta(ref->priv, ista, priv_sta);
112} 87}
113 88
114static inline void rate_control_add_sta_debugfs(struct sta_info *sta) 89static inline void rate_control_add_sta_debugfs(struct sta_info *sta)
@@ -130,31 +105,6 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
130#endif 105#endif
131} 106}
132 107
133static inline int rate_supported(struct sta_info *sta,
134 enum ieee80211_band band,
135 int index)
136{
137 return (sta == NULL || sta->supp_rates[band] & BIT(index));
138}
139
140static inline s8
141rate_lowest_index(struct ieee80211_local *local,
142 struct ieee80211_supported_band *sband,
143 struct sta_info *sta)
144{
145 int i;
146
147 for (i = 0; i < sband->n_bitrates; i++)
148 if (rate_supported(sta, sband->band, i))
149 return i;
150
151 /* warn when we cannot find a rate. */
152 WARN_ON(1);
153
154 return 0;
155}
156
157
158/* functions for rate control related to a device */ 108/* functions for rate control related to a device */
159int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local, 109int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
160 const char *name); 110 const char *name);
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h
index 0a9135b974b5..01d64d53f3b9 100644
--- a/net/mac80211/rc80211_pid.h
+++ b/net/mac80211/rc80211_pid.h
@@ -124,7 +124,6 @@ struct rc_pid_events_file_info {
124 * struct rc_pid_debugfs_entries - tunable parameters 124 * struct rc_pid_debugfs_entries - tunable parameters
125 * 125 *
126 * Algorithm parameters, tunable via debugfs. 126 * Algorithm parameters, tunable via debugfs.
127 * @dir: the debugfs directory for a specific phy
128 * @target: target percentage for failed frames 127 * @target: target percentage for failed frames
129 * @sampling_period: error sampling interval in milliseconds 128 * @sampling_period: error sampling interval in milliseconds
130 * @coeff_p: absolute value of the proportional coefficient 129 * @coeff_p: absolute value of the proportional coefficient
@@ -143,7 +142,6 @@ struct rc_pid_events_file_info {
143 * ordering of rates) 142 * ordering of rates)
144 */ 143 */
145struct rc_pid_debugfs_entries { 144struct rc_pid_debugfs_entries {
146 struct dentry *dir;
147 struct dentry *target; 145 struct dentry *target;
148 struct dentry *sampling_period; 146 struct dentry *sampling_period;
149 struct dentry *coeff_p; 147 struct dentry *coeff_p;
@@ -180,6 +178,8 @@ struct rc_pid_sta_info {
180 u32 tx_num_failed; 178 u32 tx_num_failed;
181 u32 tx_num_xmit; 179 u32 tx_num_xmit;
182 180
181 int txrate_idx;
182
183 /* Average failed frames percentage error (i.e. actual vs. target 183 /* Average failed frames percentage error (i.e. actual vs. target
184 * percentage), scaled by RC_PID_SMOOTHING. This value is computed 184 * percentage), scaled by RC_PID_SMOOTHING. This value is computed
185 * using using an exponential weighted average technique: 185 * using using an exponential weighted average technique:
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index a914ba73ccf5..86eb374e3b87 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -68,17 +68,14 @@
68 * exhibited a worse failed frames behaviour and we'll choose the highest rate 68 * exhibited a worse failed frames behaviour and we'll choose the highest rate
69 * whose failed frames behaviour is not worse than the one of the original rate 69 * whose failed frames behaviour is not worse than the one of the original rate
70 * target. While at it, check that the new rate is valid. */ 70 * target. While at it, check that the new rate is valid. */
71static void rate_control_pid_adjust_rate(struct ieee80211_local *local, 71static void rate_control_pid_adjust_rate(struct ieee80211_supported_band *sband,
72 struct sta_info *sta, int adj, 72 struct ieee80211_sta *sta,
73 struct rc_pid_sta_info *spinfo, int adj,
73 struct rc_pid_rateinfo *rinfo) 74 struct rc_pid_rateinfo *rinfo)
74{ 75{
75 struct ieee80211_sub_if_data *sdata;
76 struct ieee80211_supported_band *sband;
77 int cur_sorted, new_sorted, probe, tmp, n_bitrates, band; 76 int cur_sorted, new_sorted, probe, tmp, n_bitrates, band;
78 int cur = sta->txrate_idx; 77 int cur = spinfo->txrate_idx;
79 78
80 sdata = sta->sdata;
81 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
82 band = sband->band; 79 band = sband->band;
83 n_bitrates = sband->n_bitrates; 80 n_bitrates = sband->n_bitrates;
84 81
@@ -111,7 +108,7 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
111 /* Fit the rate found to the nearest supported rate. */ 108 /* Fit the rate found to the nearest supported rate. */
112 do { 109 do {
113 if (rate_supported(sta, band, rinfo[tmp].index)) { 110 if (rate_supported(sta, band, rinfo[tmp].index)) {
114 sta->txrate_idx = rinfo[tmp].index; 111 spinfo->txrate_idx = rinfo[tmp].index;
115 break; 112 break;
116 } 113 }
117 if (adj < 0) 114 if (adj < 0)
@@ -121,9 +118,9 @@ static void rate_control_pid_adjust_rate(struct ieee80211_local *local,
121 } while (tmp < n_bitrates && tmp >= 0); 118 } while (tmp < n_bitrates && tmp >= 0);
122 119
123#ifdef CONFIG_MAC80211_DEBUGFS 120#ifdef CONFIG_MAC80211_DEBUGFS
124 rate_control_pid_event_rate_change( 121 rate_control_pid_event_rate_change(&spinfo->events,
125 &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events, 122 spinfo->txrate_idx,
126 sta->txrate_idx, sband->bitrates[sta->txrate_idx].bitrate); 123 sband->bitrates[spinfo->txrate_idx].bitrate);
127#endif 124#endif
128} 125}
129 126
@@ -145,15 +142,11 @@ static void rate_control_pid_normalize(struct rc_pid_info *pinfo, int l)
145} 142}
146 143
147static void rate_control_pid_sample(struct rc_pid_info *pinfo, 144static void rate_control_pid_sample(struct rc_pid_info *pinfo,
148 struct ieee80211_local *local, 145 struct ieee80211_supported_band *sband,
149 struct sta_info *sta) 146 struct ieee80211_sta *sta,
147 struct rc_pid_sta_info *spinfo)
150{ 148{
151#ifdef CONFIG_MAC80211_MESH
152 struct ieee80211_sub_if_data *sdata = sta->sdata;
153#endif
154 struct rc_pid_sta_info *spinfo = sta->rate_ctrl_priv;
155 struct rc_pid_rateinfo *rinfo = pinfo->rinfo; 149 struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
156 struct ieee80211_supported_band *sband;
157 u32 pf; 150 u32 pf;
158 s32 err_avg; 151 s32 err_avg;
159 u32 err_prop; 152 u32 err_prop;
@@ -162,9 +155,6 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
162 int adj, i, j, tmp; 155 int adj, i, j, tmp;
163 unsigned long period; 156 unsigned long period;
164 157
165 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
166 spinfo = sta->rate_ctrl_priv;
167
168 /* In case nothing happened during the previous control interval, turn 158 /* In case nothing happened during the previous control interval, turn
169 * the sharpening factor on. */ 159 * the sharpening factor on. */
170 period = (HZ * pinfo->sampling_period + 500) / 1000; 160 period = (HZ * pinfo->sampling_period + 500) / 1000;
@@ -180,14 +170,15 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
180 if (unlikely(spinfo->tx_num_xmit == 0)) 170 if (unlikely(spinfo->tx_num_xmit == 0))
181 pf = spinfo->last_pf; 171 pf = spinfo->last_pf;
182 else { 172 else {
173 /* XXX: BAD HACK!!! */
174 struct sta_info *si = container_of(sta, struct sta_info, sta);
175
183 pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit; 176 pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit;
184#ifdef CONFIG_MAC80211_MESH 177
185 if (pf == 100 && 178 if (ieee80211_vif_is_mesh(&si->sdata->vif) && pf == 100)
186 sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) 179 mesh_plink_broken(si);
187 mesh_plink_broken(sta);
188#endif
189 pf <<= RC_PID_ARITH_SHIFT; 180 pf <<= RC_PID_ARITH_SHIFT;
190 sta->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9) 181 si->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9)
191 >> RC_PID_ARITH_SHIFT; 182 >> RC_PID_ARITH_SHIFT;
192 } 183 }
193 184
@@ -195,16 +186,16 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
195 spinfo->tx_num_failed = 0; 186 spinfo->tx_num_failed = 0;
196 187
197 /* If we just switched rate, update the rate behaviour info. */ 188 /* If we just switched rate, update the rate behaviour info. */
198 if (pinfo->oldrate != sta->txrate_idx) { 189 if (pinfo->oldrate != spinfo->txrate_idx) {
199 190
200 i = rinfo[pinfo->oldrate].rev_index; 191 i = rinfo[pinfo->oldrate].rev_index;
201 j = rinfo[sta->txrate_idx].rev_index; 192 j = rinfo[spinfo->txrate_idx].rev_index;
202 193
203 tmp = (pf - spinfo->last_pf); 194 tmp = (pf - spinfo->last_pf);
204 tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT); 195 tmp = RC_PID_DO_ARITH_RIGHT_SHIFT(tmp, RC_PID_ARITH_SHIFT);
205 196
206 rinfo[j].diff = rinfo[i].diff + tmp; 197 rinfo[j].diff = rinfo[i].diff + tmp;
207 pinfo->oldrate = sta->txrate_idx; 198 pinfo->oldrate = spinfo->txrate_idx;
208 } 199 }
209 rate_control_pid_normalize(pinfo, sband->n_bitrates); 200 rate_control_pid_normalize(pinfo, sband->n_bitrates);
210 201
@@ -233,43 +224,26 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
233 224
234 /* Change rate. */ 225 /* Change rate. */
235 if (adj) 226 if (adj)
236 rate_control_pid_adjust_rate(local, sta, adj, rinfo); 227 rate_control_pid_adjust_rate(sband, sta, spinfo, adj, rinfo);
237} 228}
238 229
239static void rate_control_pid_tx_status(void *priv, struct net_device *dev, 230static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_band *sband,
231 struct ieee80211_sta *sta, void *priv_sta,
240 struct sk_buff *skb) 232 struct sk_buff *skb)
241{ 233{
242 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
243 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
244 struct ieee80211_sub_if_data *sdata;
245 struct rc_pid_info *pinfo = priv; 234 struct rc_pid_info *pinfo = priv;
246 struct sta_info *sta; 235 struct rc_pid_sta_info *spinfo = priv_sta;
247 struct rc_pid_sta_info *spinfo;
248 unsigned long period; 236 unsigned long period;
249 struct ieee80211_supported_band *sband;
250 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 237 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
251 238
252 rcu_read_lock(); 239 if (!spinfo)
253 240 return;
254 sta = sta_info_get(local, hdr->addr1);
255 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
256
257 if (!sta)
258 goto unlock;
259
260 /* Don't update the state if we're not controlling the rate. */
261 sdata = sta->sdata;
262 if (sdata->force_unicast_rateidx > -1) {
263 sta->txrate_idx = sdata->max_ratectrl_rateidx;
264 goto unlock;
265 }
266 241
267 /* Ignore all frames that were sent with a different rate than the rate 242 /* Ignore all frames that were sent with a different rate than the rate
268 * we currently advise mac80211 to use. */ 243 * we currently advise mac80211 to use. */
269 if (info->tx_rate_idx != sta->txrate_idx) 244 if (info->tx_rate_idx != spinfo->txrate_idx)
270 goto unlock; 245 return;
271 246
272 spinfo = sta->rate_ctrl_priv;
273 spinfo->tx_num_xmit++; 247 spinfo->tx_num_xmit++;
274 248
275#ifdef CONFIG_MAC80211_DEBUGFS 249#ifdef CONFIG_MAC80211_DEBUGFS
@@ -287,93 +261,68 @@ static void rate_control_pid_tx_status(void *priv, struct net_device *dev,
287 spinfo->tx_num_xmit++; 261 spinfo->tx_num_xmit++;
288 } 262 }
289 263
290 if (info->status.excessive_retries) {
291 sta->tx_retry_failed++;
292 sta->tx_num_consecutive_failures++;
293 sta->tx_num_mpdu_fail++;
294 } else {
295 sta->tx_num_consecutive_failures = 0;
296 sta->tx_num_mpdu_ok++;
297 }
298 sta->tx_retry_count += info->status.retry_count;
299 sta->tx_num_mpdu_fail += info->status.retry_count;
300
301 /* Update PID controller state. */ 264 /* Update PID controller state. */
302 period = (HZ * pinfo->sampling_period + 500) / 1000; 265 period = (HZ * pinfo->sampling_period + 500) / 1000;
303 if (!period) 266 if (!period)
304 period = 1; 267 period = 1;
305 if (time_after(jiffies, spinfo->last_sample + period)) 268 if (time_after(jiffies, spinfo->last_sample + period))
306 rate_control_pid_sample(pinfo, local, sta); 269 rate_control_pid_sample(pinfo, sband, sta, spinfo);
307
308 unlock:
309 rcu_read_unlock();
310} 270}
311 271
312static void rate_control_pid_get_rate(void *priv, struct net_device *dev, 272static void
313 struct ieee80211_supported_band *sband, 273rate_control_pid_get_rate(void *priv, struct ieee80211_supported_band *sband,
314 struct sk_buff *skb, 274 struct ieee80211_sta *sta, void *priv_sta,
315 struct rate_selection *sel) 275 struct sk_buff *skb,
276 struct rate_selection *sel)
316{ 277{
317 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
318 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 278 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
319 struct ieee80211_sub_if_data *sdata; 279 struct rc_pid_sta_info *spinfo = priv_sta;
320 struct sta_info *sta;
321 int rateidx; 280 int rateidx;
322 u16 fc; 281 u16 fc;
323 282
324 rcu_read_lock();
325
326 sta = sta_info_get(local, hdr->addr1);
327
328 /* Send management frames and broadcast/multicast data using lowest 283 /* Send management frames and broadcast/multicast data using lowest
329 * rate. */ 284 * rate. */
330 fc = le16_to_cpu(hdr->frame_control); 285 fc = le16_to_cpu(hdr->frame_control);
331 if ((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA || 286 if (!sta || !spinfo ||
332 is_multicast_ether_addr(hdr->addr1) || !sta) { 287 (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
333 sel->rate_idx = rate_lowest_index(local, sband, sta); 288 is_multicast_ether_addr(hdr->addr1)) {
334 rcu_read_unlock(); 289 sel->rate_idx = rate_lowest_index(sband, sta);
335 return; 290 return;
336 } 291 }
337 292
338 /* If a forced rate is in effect, select it. */ 293 rateidx = spinfo->txrate_idx;
339 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
340 if (sdata->force_unicast_rateidx > -1)
341 sta->txrate_idx = sdata->force_unicast_rateidx;
342
343 rateidx = sta->txrate_idx;
344 294
345 if (rateidx >= sband->n_bitrates) 295 if (rateidx >= sband->n_bitrates)
346 rateidx = sband->n_bitrates - 1; 296 rateidx = sband->n_bitrates - 1;
347 297
348 sta->last_txrate_idx = rateidx;
349
350 rcu_read_unlock();
351
352 sel->rate_idx = rateidx; 298 sel->rate_idx = rateidx;
353 299
354#ifdef CONFIG_MAC80211_DEBUGFS 300#ifdef CONFIG_MAC80211_DEBUGFS
355 rate_control_pid_event_tx_rate( 301 rate_control_pid_event_tx_rate(&spinfo->events,
356 &((struct rc_pid_sta_info *) sta->rate_ctrl_priv)->events,
357 rateidx, sband->bitrates[rateidx].bitrate); 302 rateidx, sband->bitrates[rateidx].bitrate);
358#endif 303#endif
359} 304}
360 305
361static void rate_control_pid_rate_init(void *priv, void *priv_sta, 306static void
362 struct ieee80211_local *local, 307rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
363 struct sta_info *sta) 308 struct ieee80211_sta *sta, void *priv_sta)
364{ 309{
310 struct rc_pid_sta_info *spinfo = priv_sta;
311 struct sta_info *si;
312
365 /* TODO: This routine should consider using RSSI from previous packets 313 /* TODO: This routine should consider using RSSI from previous packets
366 * as we need to have IEEE 802.1X auth succeed immediately after assoc.. 314 * as we need to have IEEE 802.1X auth succeed immediately after assoc..
367 * Until that method is implemented, we will use the lowest supported 315 * Until that method is implemented, we will use the lowest supported
368 * rate as a workaround. */ 316 * rate as a workaround. */
369 struct ieee80211_supported_band *sband;
370 317
371 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 318 spinfo->txrate_idx = rate_lowest_index(sband, sta);
372 sta->txrate_idx = rate_lowest_index(local, sband, sta); 319 /* HACK */
373 sta->fail_avg = 0; 320 si = container_of(sta, struct sta_info, sta);
321 si->fail_avg = 0;
374} 322}
375 323
376static void *rate_control_pid_alloc(struct ieee80211_local *local) 324static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
325 struct dentry *debugfsdir)
377{ 326{
378 struct rc_pid_info *pinfo; 327 struct rc_pid_info *pinfo;
379 struct rc_pid_rateinfo *rinfo; 328 struct rc_pid_rateinfo *rinfo;
@@ -384,7 +333,7 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
384 struct rc_pid_debugfs_entries *de; 333 struct rc_pid_debugfs_entries *de;
385#endif 334#endif
386 335
387 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 336 sband = hw->wiphy->bands[hw->conf.channel->band];
388 337
389 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC); 338 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
390 if (!pinfo) 339 if (!pinfo)
@@ -439,30 +388,28 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
439 388
440#ifdef CONFIG_MAC80211_DEBUGFS 389#ifdef CONFIG_MAC80211_DEBUGFS
441 de = &pinfo->dentries; 390 de = &pinfo->dentries;
442 de->dir = debugfs_create_dir("rc80211_pid",
443 local->hw.wiphy->debugfsdir);
444 de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR, 391 de->target = debugfs_create_u32("target_pf", S_IRUSR | S_IWUSR,
445 de->dir, &pinfo->target); 392 debugfsdir, &pinfo->target);
446 de->sampling_period = debugfs_create_u32("sampling_period", 393 de->sampling_period = debugfs_create_u32("sampling_period",
447 S_IRUSR | S_IWUSR, de->dir, 394 S_IRUSR | S_IWUSR, debugfsdir,
448 &pinfo->sampling_period); 395 &pinfo->sampling_period);
449 de->coeff_p = debugfs_create_u32("coeff_p", S_IRUSR | S_IWUSR, 396 de->coeff_p = debugfs_create_u32("coeff_p", S_IRUSR | S_IWUSR,
450 de->dir, &pinfo->coeff_p); 397 debugfsdir, &pinfo->coeff_p);
451 de->coeff_i = debugfs_create_u32("coeff_i", S_IRUSR | S_IWUSR, 398 de->coeff_i = debugfs_create_u32("coeff_i", S_IRUSR | S_IWUSR,
452 de->dir, &pinfo->coeff_i); 399 debugfsdir, &pinfo->coeff_i);
453 de->coeff_d = debugfs_create_u32("coeff_d", S_IRUSR | S_IWUSR, 400 de->coeff_d = debugfs_create_u32("coeff_d", S_IRUSR | S_IWUSR,
454 de->dir, &pinfo->coeff_d); 401 debugfsdir, &pinfo->coeff_d);
455 de->smoothing_shift = debugfs_create_u32("smoothing_shift", 402 de->smoothing_shift = debugfs_create_u32("smoothing_shift",
456 S_IRUSR | S_IWUSR, de->dir, 403 S_IRUSR | S_IWUSR, debugfsdir,
457 &pinfo->smoothing_shift); 404 &pinfo->smoothing_shift);
458 de->sharpen_factor = debugfs_create_u32("sharpen_factor", 405 de->sharpen_factor = debugfs_create_u32("sharpen_factor",
459 S_IRUSR | S_IWUSR, de->dir, 406 S_IRUSR | S_IWUSR, debugfsdir,
460 &pinfo->sharpen_factor); 407 &pinfo->sharpen_factor);
461 de->sharpen_duration = debugfs_create_u32("sharpen_duration", 408 de->sharpen_duration = debugfs_create_u32("sharpen_duration",
462 S_IRUSR | S_IWUSR, de->dir, 409 S_IRUSR | S_IWUSR, debugfsdir,
463 &pinfo->sharpen_duration); 410 &pinfo->sharpen_duration);
464 de->norm_offset = debugfs_create_u32("norm_offset", 411 de->norm_offset = debugfs_create_u32("norm_offset",
465 S_IRUSR | S_IWUSR, de->dir, 412 S_IRUSR | S_IWUSR, debugfsdir,
466 &pinfo->norm_offset); 413 &pinfo->norm_offset);
467#endif 414#endif
468 415
@@ -484,7 +431,6 @@ static void rate_control_pid_free(void *priv)
484 debugfs_remove(de->coeff_p); 431 debugfs_remove(de->coeff_p);
485 debugfs_remove(de->sampling_period); 432 debugfs_remove(de->sampling_period);
486 debugfs_remove(de->target); 433 debugfs_remove(de->target);
487 debugfs_remove(de->dir);
488#endif 434#endif
489 435
490 kfree(pinfo->rinfo); 436 kfree(pinfo->rinfo);
@@ -495,7 +441,8 @@ static void rate_control_pid_clear(void *priv)
495{ 441{
496} 442}
497 443
498static void *rate_control_pid_alloc_sta(void *priv, gfp_t gfp) 444static void *rate_control_pid_alloc_sta(void *priv, struct ieee80211_sta *sta,
445 gfp_t gfp)
499{ 446{
500 struct rc_pid_sta_info *spinfo; 447 struct rc_pid_sta_info *spinfo;
501 448
@@ -513,10 +460,10 @@ static void *rate_control_pid_alloc_sta(void *priv, gfp_t gfp)
513 return spinfo; 460 return spinfo;
514} 461}
515 462
516static void rate_control_pid_free_sta(void *priv, void *priv_sta) 463static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta,
464 void *priv_sta)
517{ 465{
518 struct rc_pid_sta_info *spinfo = priv_sta; 466 kfree(priv_sta);
519 kfree(spinfo);
520} 467}
521 468
522static struct rate_control_ops mac80211_rcpid = { 469static struct rate_control_ops mac80211_rcpid = {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 6db854505193..77e7b014872b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -143,6 +143,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
143 /* IEEE80211_RADIOTAP_FLAGS */ 143 /* IEEE80211_RADIOTAP_FLAGS */
144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 144 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
145 *pos |= IEEE80211_RADIOTAP_F_FCS; 145 *pos |= IEEE80211_RADIOTAP_F_FCS;
146 if (status->flag & RX_FLAG_SHORTPRE)
147 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
146 pos++; 148 pos++;
147 149
148 /* IEEE80211_RADIOTAP_RATE */ 150 /* IEEE80211_RADIOTAP_RATE */
@@ -155,8 +157,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
155 if (status->band == IEEE80211_BAND_5GHZ) 157 if (status->band == IEEE80211_BAND_5GHZ)
156 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM | 158 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
157 IEEE80211_CHAN_5GHZ); 159 IEEE80211_CHAN_5GHZ);
160 else if (rate->flags & IEEE80211_RATE_ERP_G)
161 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_OFDM |
162 IEEE80211_CHAN_2GHZ);
158 else 163 else
159 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_DYN | 164 *(__le16 *)pos = cpu_to_le16(IEEE80211_CHAN_CCK |
160 IEEE80211_CHAN_2GHZ); 165 IEEE80211_CHAN_2GHZ);
161 pos += 2; 166 pos += 2;
162 167
@@ -290,7 +295,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
290 if (!netif_running(sdata->dev)) 295 if (!netif_running(sdata->dev))
291 continue; 296 continue;
292 297
293 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR) 298 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
294 continue; 299 continue;
295 300
296 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 301 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
@@ -398,12 +403,12 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
398 struct ieee80211_local *local = rx->local; 403 struct ieee80211_local *local = rx->local;
399 struct sk_buff *skb = rx->skb; 404 struct sk_buff *skb = rx->skb;
400 405
401 if (unlikely(local->sta_hw_scanning)) 406 if (unlikely(local->hw_scanning))
402 return ieee80211_sta_rx_scan(rx->dev, skb, rx->status); 407 return ieee80211_scan_rx(rx->sdata, skb, rx->status);
403 408
404 if (unlikely(local->sta_sw_scanning)) { 409 if (unlikely(local->sw_scanning)) {
405 /* drop all the other packets during a software scan anyway */ 410 /* drop all the other packets during a software scan anyway */
406 if (ieee80211_sta_rx_scan(rx->dev, skb, rx->status) 411 if (ieee80211_scan_rx(rx->sdata, skb, rx->status)
407 != RX_QUEUED) 412 != RX_QUEUED)
408 dev_kfree_skb(skb); 413 dev_kfree_skb(skb);
409 return RX_QUEUED; 414 return RX_QUEUED;
@@ -461,7 +466,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
461 466
462 if (ieee80211_is_data(hdr->frame_control) && 467 if (ieee80211_is_data(hdr->frame_control) &&
463 is_multicast_ether_addr(hdr->addr1) && 468 is_multicast_ether_addr(hdr->addr1) &&
464 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->dev)) 469 mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
465 return RX_DROP_MONITOR; 470 return RX_DROP_MONITOR;
466#undef msh_h_get 471#undef msh_h_get
467 472
@@ -496,8 +501,8 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
496 /* Drop disallowed frame classes based on STA auth/assoc state; 501 /* Drop disallowed frame classes based on STA auth/assoc state;
497 * IEEE 802.11, Chap 5.5. 502 * IEEE 802.11, Chap 5.5.
498 * 503 *
499 * 80211.o does filtering only based on association state, i.e., it 504 * mac80211 filters only based on association state, i.e. it drops
500 * drops Class 3 frames from not associated stations. hostapd sends 505 * Class 3 frames from not associated stations. hostapd sends
501 * deauth/disassoc frames when needed. In addition, hostapd is 506 * deauth/disassoc frames when needed. In addition, hostapd is
502 * responsible for filtering on both auth and assoc states. 507 * responsible for filtering on both auth and assoc states.
503 */ 508 */
@@ -507,7 +512,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
507 512
508 if (unlikely((ieee80211_is_data(hdr->frame_control) || 513 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
509 ieee80211_is_pspoll(hdr->frame_control)) && 514 ieee80211_is_pspoll(hdr->frame_control)) &&
510 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 515 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
511 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) { 516 (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC)))) {
512 if ((!ieee80211_has_fromds(hdr->frame_control) && 517 if ((!ieee80211_has_fromds(hdr->frame_control) &&
513 !ieee80211_has_tods(hdr->frame_control) && 518 !ieee80211_has_tods(hdr->frame_control) &&
@@ -645,32 +650,28 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
645 return result; 650 return result;
646} 651}
647 652
648static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta) 653static void ap_sta_ps_start(struct sta_info *sta)
649{ 654{
650 struct ieee80211_sub_if_data *sdata; 655 struct ieee80211_sub_if_data *sdata = sta->sdata;
651 DECLARE_MAC_BUF(mac); 656 DECLARE_MAC_BUF(mac);
652 657
653 sdata = sta->sdata;
654
655 atomic_inc(&sdata->bss->num_sta_ps); 658 atomic_inc(&sdata->bss->num_sta_ps);
656 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL); 659 set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
657#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 660#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
658 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", 661 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
659 dev->name, print_mac(mac, sta->addr), sta->aid); 662 sdata->dev->name, print_mac(mac, sta->sta.addr), sta->sta.aid);
660#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 663#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
661} 664}
662 665
663static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) 666static int ap_sta_ps_end(struct sta_info *sta)
664{ 667{
665 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 668 struct ieee80211_sub_if_data *sdata = sta->sdata;
669 struct ieee80211_local *local = sdata->local;
666 struct sk_buff *skb; 670 struct sk_buff *skb;
667 int sent = 0; 671 int sent = 0;
668 struct ieee80211_sub_if_data *sdata;
669 struct ieee80211_tx_info *info; 672 struct ieee80211_tx_info *info;
670 DECLARE_MAC_BUF(mac); 673 DECLARE_MAC_BUF(mac);
671 674
672 sdata = sta->sdata;
673
674 atomic_dec(&sdata->bss->num_sta_ps); 675 atomic_dec(&sdata->bss->num_sta_ps);
675 676
676 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL); 677 clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
@@ -680,7 +681,7 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
680 681
681#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 682#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
682 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", 683 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n",
683 dev->name, print_mac(mac, sta->addr), sta->aid); 684 sdata->dev->name, print_mac(mac, sta->sta.addr), sta->sta.aid);
684#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 685#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
685 686
686 /* Send all buffered frames to the station */ 687 /* Send all buffered frames to the station */
@@ -696,8 +697,8 @@ static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
696 sent++; 697 sent++;
697#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 698#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
698 printk(KERN_DEBUG "%s: STA %s aid %d send PS frame " 699 printk(KERN_DEBUG "%s: STA %s aid %d send PS frame "
699 "since STA not sleeping anymore\n", dev->name, 700 "since STA not sleeping anymore\n", sdata->dev->name,
700 print_mac(mac, sta->addr), sta->aid); 701 print_mac(mac, sta->sta.addr), sta->sta.aid);
701#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 702#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
702 info->flags |= IEEE80211_TX_CTL_REQUEUE; 703 info->flags |= IEEE80211_TX_CTL_REQUEUE;
703 dev_queue_xmit(skb); 704 dev_queue_xmit(skb);
@@ -710,7 +711,6 @@ static ieee80211_rx_result debug_noinline
710ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 711ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
711{ 712{
712 struct sta_info *sta = rx->sta; 713 struct sta_info *sta = rx->sta;
713 struct net_device *dev = rx->dev;
714 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 714 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
715 715
716 if (!sta) 716 if (!sta)
@@ -719,14 +719,14 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
719 /* Update last_rx only for IBSS packets which are for the current 719 /* Update last_rx only for IBSS packets which are for the current
720 * BSSID to avoid keeping the current IBSS network alive in cases where 720 * BSSID to avoid keeping the current IBSS network alive in cases where
721 * other STAs are using different BSSID. */ 721 * other STAs are using different BSSID. */
722 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 722 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
723 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 723 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
724 IEEE80211_IF_TYPE_IBSS); 724 NL80211_IFTYPE_ADHOC);
725 if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0) 725 if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0)
726 sta->last_rx = jiffies; 726 sta->last_rx = jiffies;
727 } else 727 } else
728 if (!is_multicast_ether_addr(hdr->addr1) || 728 if (!is_multicast_ether_addr(hdr->addr1) ||
729 rx->sdata->vif.type == IEEE80211_IF_TYPE_STA) { 729 rx->sdata->vif.type == NL80211_IFTYPE_STATION) {
730 /* Update last_rx only for unicast frames in order to prevent 730 /* Update last_rx only for unicast frames in order to prevent
731 * the Probe Request frames (the only broadcast frames from a 731 * the Probe Request frames (the only broadcast frames from a
732 * STA in infrastructure mode) from keeping a connection alive. 732 * STA in infrastructure mode) from keeping a connection alive.
@@ -746,16 +746,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
746 sta->last_noise = rx->status->noise; 746 sta->last_noise = rx->status->noise;
747 747
748 if (!ieee80211_has_morefrags(hdr->frame_control) && 748 if (!ieee80211_has_morefrags(hdr->frame_control) &&
749 (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP || 749 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
750 rx->sdata->vif.type == IEEE80211_IF_TYPE_VLAN)) { 750 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
751 /* Change STA power saving mode only in the end of a frame 751 /* Change STA power saving mode only in the end of a frame
752 * exchange sequence */ 752 * exchange sequence */
753 if (test_sta_flags(sta, WLAN_STA_PS) && 753 if (test_sta_flags(sta, WLAN_STA_PS) &&
754 !ieee80211_has_pm(hdr->frame_control)) 754 !ieee80211_has_pm(hdr->frame_control))
755 rx->sent_ps_buffered += ap_sta_ps_end(dev, sta); 755 rx->sent_ps_buffered += ap_sta_ps_end(sta);
756 else if (!test_sta_flags(sta, WLAN_STA_PS) && 756 else if (!test_sta_flags(sta, WLAN_STA_PS) &&
757 ieee80211_has_pm(hdr->frame_control)) 757 ieee80211_has_pm(hdr->frame_control))
758 ap_sta_ps_start(dev, sta); 758 ap_sta_ps_start(sta);
759 } 759 }
760 760
761 /* Drop data::nullfunc frames silently, since they are used only to 761 /* Drop data::nullfunc frames silently, since they are used only to
@@ -816,7 +816,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
816 816
817static inline struct ieee80211_fragment_entry * 817static inline struct ieee80211_fragment_entry *
818ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 818ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
819 u16 fc, unsigned int frag, unsigned int seq, 819 unsigned int frag, unsigned int seq,
820 int rx_queue, struct ieee80211_hdr *hdr) 820 int rx_queue, struct ieee80211_hdr *hdr)
821{ 821{
822 struct ieee80211_fragment_entry *entry; 822 struct ieee80211_fragment_entry *entry;
@@ -825,7 +825,6 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
825 idx = sdata->fragment_next; 825 idx = sdata->fragment_next;
826 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 826 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
827 struct ieee80211_hdr *f_hdr; 827 struct ieee80211_hdr *f_hdr;
828 u16 f_fc;
829 828
830 idx--; 829 idx--;
831 if (idx < 0) 830 if (idx < 0)
@@ -837,10 +836,13 @@ ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
837 entry->last_frag + 1 != frag) 836 entry->last_frag + 1 != frag)
838 continue; 837 continue;
839 838
840 f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data; 839 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
841 f_fc = le16_to_cpu(f_hdr->frame_control);
842 840
843 if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) || 841 /*
842 * Check ftype and addresses are equal, else check next fragment
843 */
844 if (((hdr->frame_control ^ f_hdr->frame_control) &
845 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
844 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || 846 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
845 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 847 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
846 continue; 848 continue;
@@ -860,16 +862,18 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
860{ 862{
861 struct ieee80211_hdr *hdr; 863 struct ieee80211_hdr *hdr;
862 u16 sc; 864 u16 sc;
865 __le16 fc;
863 unsigned int frag, seq; 866 unsigned int frag, seq;
864 struct ieee80211_fragment_entry *entry; 867 struct ieee80211_fragment_entry *entry;
865 struct sk_buff *skb; 868 struct sk_buff *skb;
866 DECLARE_MAC_BUF(mac); 869 DECLARE_MAC_BUF(mac);
867 870
868 hdr = (struct ieee80211_hdr *) rx->skb->data; 871 hdr = (struct ieee80211_hdr *)rx->skb->data;
872 fc = hdr->frame_control;
869 sc = le16_to_cpu(hdr->seq_ctrl); 873 sc = le16_to_cpu(hdr->seq_ctrl);
870 frag = sc & IEEE80211_SCTL_FRAG; 874 frag = sc & IEEE80211_SCTL_FRAG;
871 875
872 if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) || 876 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
873 (rx->skb)->len < 24 || 877 (rx->skb)->len < 24 ||
874 is_multicast_ether_addr(hdr->addr1))) { 878 is_multicast_ether_addr(hdr->addr1))) {
875 /* not fragmented */ 879 /* not fragmented */
@@ -884,7 +888,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
884 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 888 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
885 rx->queue, &(rx->skb)); 889 rx->queue, &(rx->skb));
886 if (rx->key && rx->key->conf.alg == ALG_CCMP && 890 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
887 (rx->fc & IEEE80211_FCTL_PROTECTED)) { 891 ieee80211_has_protected(fc)) {
888 /* Store CCMP PN so that we can verify that the next 892 /* Store CCMP PN so that we can verify that the next
889 * fragment has a sequential PN value. */ 893 * fragment has a sequential PN value. */
890 entry->ccmp = 1; 894 entry->ccmp = 1;
@@ -898,8 +902,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
898 /* This is a fragment for a frame that should already be pending in 902 /* This is a fragment for a frame that should already be pending in
899 * fragment cache. Add this fragment to the end of the pending entry. 903 * fragment cache. Add this fragment to the end of the pending entry.
900 */ 904 */
901 entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, 905 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr);
902 rx->queue, hdr);
903 if (!entry) { 906 if (!entry) {
904 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 907 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
905 return RX_DROP_MONITOR; 908 return RX_DROP_MONITOR;
@@ -924,11 +927,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
924 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 927 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
925 } 928 }
926 929
927 skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc)); 930 skb_pull(rx->skb, ieee80211_hdrlen(fc));
928 __skb_queue_tail(&entry->skb_list, rx->skb); 931 __skb_queue_tail(&entry->skb_list, rx->skb);
929 entry->last_frag = frag; 932 entry->last_frag = frag;
930 entry->extra_len += rx->skb->len; 933 entry->extra_len += rx->skb->len;
931 if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { 934 if (ieee80211_has_morefrags(fc)) {
932 rx->skb = NULL; 935 rx->skb = NULL;
933 return RX_QUEUED; 936 return RX_QUEUED;
934 } 937 }
@@ -968,15 +971,14 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
968 struct sk_buff *skb; 971 struct sk_buff *skb;
969 int no_pending_pkts; 972 int no_pending_pkts;
970 DECLARE_MAC_BUF(mac); 973 DECLARE_MAC_BUF(mac);
974 __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
971 975
972 if (likely(!rx->sta || 976 if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
973 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL ||
974 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL ||
975 !(rx->flags & IEEE80211_RX_RA_MATCH))) 977 !(rx->flags & IEEE80211_RX_RA_MATCH)))
976 return RX_CONTINUE; 978 return RX_CONTINUE;
977 979
978 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) && 980 if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
979 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) 981 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
980 return RX_DROP_UNUSABLE; 982 return RX_DROP_UNUSABLE;
981 983
982 skb = skb_dequeue(&rx->sta->tx_filtered); 984 skb = skb_dequeue(&rx->sta->tx_filtered);
@@ -1000,7 +1002,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1000 1002
1001#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1003#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
1002 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", 1004 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
1003 print_mac(mac, rx->sta->addr), rx->sta->aid, 1005 print_mac(mac, rx->sta->sta.addr), rx->sta->sta.aid,
1004 skb_queue_len(&rx->sta->ps_tx_buf)); 1006 skb_queue_len(&rx->sta->ps_tx_buf));
1005#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1007#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1006 1008
@@ -1025,7 +1027,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
1025 */ 1027 */
1026 printk(KERN_DEBUG "%s: STA %s sent PS Poll even " 1028 printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
1027 "though there are no buffered frames for it\n", 1029 "though there are no buffered frames for it\n",
1028 rx->dev->name, print_mac(mac, rx->sta->addr)); 1030 rx->dev->name, print_mac(mac, rx->sta->sta.addr));
1029#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1031#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
1030 } 1032 }
1031 1033
@@ -1050,7 +1052,6 @@ ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
1050 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN); 1052 ieee80211_hdrlen(hdr->frame_control) - IEEE80211_QOS_CTL_LEN);
1051 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN); 1053 hdr = (struct ieee80211_hdr *)skb_pull(rx->skb, IEEE80211_QOS_CTL_LEN);
1052 /* change frame type to non QOS */ 1054 /* change frame type to non QOS */
1053 rx->fc &= ~IEEE80211_STYPE_QOS_DATA;
1054 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1055 hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1055 1056
1056 return RX_CONTINUE; 1057 return RX_CONTINUE;
@@ -1067,7 +1068,7 @@ ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
1067} 1068}
1068 1069
1069static int 1070static int
1070ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx) 1071ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
1071{ 1072{
1072 /* 1073 /*
1073 * Pass through unencrypted frames if the hardware has 1074 * Pass through unencrypted frames if the hardware has
@@ -1077,9 +1078,8 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx)
1077 return 0; 1078 return 0;
1078 1079
1079 /* Drop unencrypted frames if key is set. */ 1080 /* Drop unencrypted frames if key is set. */
1080 if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && 1081 if (unlikely(!ieee80211_has_protected(fc) &&
1081 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 1082 !ieee80211_is_nullfunc(fc) &&
1082 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC &&
1083 (rx->key || rx->sdata->drop_unencrypted))) 1083 (rx->key || rx->sdata->drop_unencrypted)))
1084 return -EACCES; 1084 return -EACCES;
1085 1085
@@ -1091,7 +1091,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1091{ 1091{
1092 struct net_device *dev = rx->dev; 1092 struct net_device *dev = rx->dev;
1093 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 1093 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
1094 u16 fc, hdrlen, ethertype; 1094 u16 hdrlen, ethertype;
1095 u8 *payload; 1095 u8 *payload;
1096 u8 dst[ETH_ALEN]; 1096 u8 dst[ETH_ALEN];
1097 u8 src[ETH_ALEN] __aligned(2); 1097 u8 src[ETH_ALEN] __aligned(2);
@@ -1102,16 +1102,10 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1102 DECLARE_MAC_BUF(mac3); 1102 DECLARE_MAC_BUF(mac3);
1103 DECLARE_MAC_BUF(mac4); 1103 DECLARE_MAC_BUF(mac4);
1104 1104
1105 fc = rx->fc; 1105 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1106
1107 if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
1108 return -1; 1106 return -1;
1109 1107
1110 hdrlen = ieee80211_get_hdrlen(fc); 1108 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1111
1112 if (ieee80211_vif_is_mesh(&sdata->vif))
1113 hdrlen += ieee80211_get_mesh_hdrlen(
1114 (struct ieee80211s_hdr *) (skb->data + hdrlen));
1115 1109
1116 /* convert IEEE 802.11 header + possible LLC headers into Ethernet 1110 /* convert IEEE 802.11 header + possible LLC headers into Ethernet
1117 * header 1111 * header
@@ -1122,42 +1116,38 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1122 * 1 0 BSSID SA DA n/a 1116 * 1 0 BSSID SA DA n/a
1123 * 1 1 RA TA DA SA 1117 * 1 1 RA TA DA SA
1124 */ 1118 */
1125 1119 memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
1126 switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 1120 memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
1127 case IEEE80211_FCTL_TODS: 1121
1128 /* BSSID SA DA */ 1122 switch (hdr->frame_control &
1129 memcpy(dst, hdr->addr3, ETH_ALEN); 1123 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
1130 memcpy(src, hdr->addr2, ETH_ALEN); 1124 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS):
1131 1125 if (unlikely(sdata->vif.type != NL80211_IFTYPE_AP &&
1132 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && 1126 sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
1133 sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
1134 return -1; 1127 return -1;
1135 break; 1128 break;
1136 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): 1129 case __constant_cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
1137 /* RA TA DA SA */ 1130 if (unlikely(sdata->vif.type != NL80211_IFTYPE_WDS &&
1138 memcpy(dst, hdr->addr3, ETH_ALEN); 1131 sdata->vif.type != NL80211_IFTYPE_MESH_POINT))
1139 memcpy(src, hdr->addr4, ETH_ALEN);
1140
1141 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS &&
1142 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT))
1143 return -1; 1132 return -1;
1133 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1134 struct ieee80211s_hdr *meshdr = (struct ieee80211s_hdr *)
1135 (skb->data + hdrlen);
1136 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
1137 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
1138 memcpy(dst, meshdr->eaddr1, ETH_ALEN);
1139 memcpy(src, meshdr->eaddr2, ETH_ALEN);
1140 }
1141 }
1144 break; 1142 break;
1145 case IEEE80211_FCTL_FROMDS: 1143 case __constant_cpu_to_le16(IEEE80211_FCTL_FROMDS):
1146 /* DA BSSID SA */ 1144 if (sdata->vif.type != NL80211_IFTYPE_STATION ||
1147 memcpy(dst, hdr->addr1, ETH_ALEN);
1148 memcpy(src, hdr->addr3, ETH_ALEN);
1149
1150 if (sdata->vif.type != IEEE80211_IF_TYPE_STA ||
1151 (is_multicast_ether_addr(dst) && 1145 (is_multicast_ether_addr(dst) &&
1152 !compare_ether_addr(src, dev->dev_addr))) 1146 !compare_ether_addr(src, dev->dev_addr)))
1153 return -1; 1147 return -1;
1154 break; 1148 break;
1155 case 0: 1149 case __constant_cpu_to_le16(0):
1156 /* DA SA BSSID */ 1150 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
1157 memcpy(dst, hdr->addr1, ETH_ALEN);
1158 memcpy(src, hdr->addr2, ETH_ALEN);
1159
1160 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS)
1161 return -1; 1151 return -1;
1162 break; 1152 break;
1163 } 1153 }
@@ -1193,7 +1183,7 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1193/* 1183/*
1194 * requires that rx->skb is a frame with ethernet header 1184 * requires that rx->skb is a frame with ethernet header
1195 */ 1185 */
1196static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx) 1186static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
1197{ 1187{
1198 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 1188 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
1199 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1189 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
@@ -1209,7 +1199,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx)
1209 return true; 1199 return true;
1210 1200
1211 if (ieee80211_802_1x_port_control(rx) || 1201 if (ieee80211_802_1x_port_control(rx) ||
1212 ieee80211_drop_unencrypted(rx)) 1202 ieee80211_drop_unencrypted(rx, fc))
1213 return false; 1203 return false;
1214 1204
1215 return true; 1205 return true;
@@ -1231,8 +1221,9 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1231 skb = rx->skb; 1221 skb = rx->skb;
1232 xmit_skb = NULL; 1222 xmit_skb = NULL;
1233 1223
1234 if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP || 1224 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
1235 sdata->vif.type == IEEE80211_IF_TYPE_VLAN) && 1225 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
1226 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
1236 (rx->flags & IEEE80211_RX_RA_MATCH)) { 1227 (rx->flags & IEEE80211_RX_RA_MATCH)) {
1237 if (is_multicast_ether_addr(ehdr->h_dest)) { 1228 if (is_multicast_ether_addr(ehdr->h_dest)) {
1238 /* 1229 /*
@@ -1279,20 +1270,21 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1279{ 1270{
1280 struct net_device *dev = rx->dev; 1271 struct net_device *dev = rx->dev;
1281 struct ieee80211_local *local = rx->local; 1272 struct ieee80211_local *local = rx->local;
1282 u16 fc, ethertype; 1273 u16 ethertype;
1283 u8 *payload; 1274 u8 *payload;
1284 struct sk_buff *skb = rx->skb, *frame = NULL; 1275 struct sk_buff *skb = rx->skb, *frame = NULL;
1276 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1277 __le16 fc = hdr->frame_control;
1285 const struct ethhdr *eth; 1278 const struct ethhdr *eth;
1286 int remaining, err; 1279 int remaining, err;
1287 u8 dst[ETH_ALEN]; 1280 u8 dst[ETH_ALEN];
1288 u8 src[ETH_ALEN]; 1281 u8 src[ETH_ALEN];
1289 DECLARE_MAC_BUF(mac); 1282 DECLARE_MAC_BUF(mac);
1290 1283
1291 fc = rx->fc; 1284 if (unlikely(!ieee80211_is_data(fc)))
1292 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1293 return RX_CONTINUE; 1285 return RX_CONTINUE;
1294 1286
1295 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1287 if (unlikely(!ieee80211_is_data_present(fc)))
1296 return RX_DROP_MONITOR; 1288 return RX_DROP_MONITOR;
1297 1289
1298 if (!(rx->flags & IEEE80211_RX_AMSDU)) 1290 if (!(rx->flags & IEEE80211_RX_AMSDU))
@@ -1374,7 +1366,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1374 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); 1366 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
1375 } 1367 }
1376 1368
1377 if (!ieee80211_frame_allowed(rx)) { 1369 if (!ieee80211_frame_allowed(rx, fc)) {
1378 if (skb == frame) /* last frame */ 1370 if (skb == frame) /* last frame */
1379 return RX_DROP_UNUSABLE; 1371 return RX_DROP_UNUSABLE;
1380 dev_kfree_skb(frame); 1372 dev_kfree_skb(frame);
@@ -1387,7 +1379,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1387 return RX_QUEUED; 1379 return RX_QUEUED;
1388} 1380}
1389 1381
1390static ieee80211_rx_result debug_noinline 1382static ieee80211_rx_result
1391ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 1383ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1392{ 1384{
1393 struct ieee80211_hdr *hdr; 1385 struct ieee80211_hdr *hdr;
@@ -1406,6 +1398,25 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1406 /* illegal frame */ 1398 /* illegal frame */
1407 return RX_DROP_MONITOR; 1399 return RX_DROP_MONITOR;
1408 1400
1401 if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){
1402 struct ieee80211_sub_if_data *sdata;
1403 struct mesh_path *mppath;
1404
1405 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1406 rcu_read_lock();
1407 mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
1408 if (!mppath) {
1409 mpp_path_add(mesh_hdr->eaddr2, hdr->addr4, sdata);
1410 } else {
1411 spin_lock_bh(&mppath->state_lock);
1412 mppath->exp_time = jiffies;
1413 if (compare_ether_addr(mppath->mpp, hdr->addr4) != 0)
1414 memcpy(mppath->mpp, hdr->addr4, ETH_ALEN);
1415 spin_unlock_bh(&mppath->state_lock);
1416 }
1417 rcu_read_unlock();
1418 }
1419
1409 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0) 1420 if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
1410 return RX_CONTINUE; 1421 return RX_CONTINUE;
1411 1422
@@ -1413,7 +1424,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
1413 1424
1414 if (rx->flags & IEEE80211_RX_RA_MATCH) { 1425 if (rx->flags & IEEE80211_RX_RA_MATCH) {
1415 if (!mesh_hdr->ttl) 1426 if (!mesh_hdr->ttl)
1416 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.sta, 1427 IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.mesh,
1417 dropped_frames_ttl); 1428 dropped_frames_ttl);
1418 else { 1429 else {
1419 struct ieee80211_hdr *fwd_hdr; 1430 struct ieee80211_hdr *fwd_hdr;
@@ -1448,21 +1459,21 @@ static ieee80211_rx_result debug_noinline
1448ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1459ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1449{ 1460{
1450 struct net_device *dev = rx->dev; 1461 struct net_device *dev = rx->dev;
1451 u16 fc; 1462 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1463 __le16 fc = hdr->frame_control;
1452 int err; 1464 int err;
1453 1465
1454 fc = rx->fc; 1466 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
1455 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
1456 return RX_CONTINUE; 1467 return RX_CONTINUE;
1457 1468
1458 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1469 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
1459 return RX_DROP_MONITOR; 1470 return RX_DROP_MONITOR;
1460 1471
1461 err = ieee80211_data_to_8023(rx); 1472 err = ieee80211_data_to_8023(rx);
1462 if (unlikely(err)) 1473 if (unlikely(err))
1463 return RX_DROP_UNUSABLE; 1474 return RX_DROP_UNUSABLE;
1464 1475
1465 if (!ieee80211_frame_allowed(rx)) 1476 if (!ieee80211_frame_allowed(rx, fc))
1466 return RX_DROP_MONITOR; 1477 return RX_DROP_MONITOR;
1467 1478
1468 rx->skb->dev = dev; 1479 rx->skb->dev = dev;
@@ -1520,22 +1531,97 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1520} 1531}
1521 1532
1522static ieee80211_rx_result debug_noinline 1533static ieee80211_rx_result debug_noinline
1534ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1535{
1536 struct ieee80211_local *local = rx->local;
1537 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1538 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
1539 int len = rx->skb->len;
1540
1541 if (!ieee80211_is_action(mgmt->frame_control))
1542 return RX_CONTINUE;
1543
1544 if (!rx->sta)
1545 return RX_DROP_MONITOR;
1546
1547 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1548 return RX_DROP_MONITOR;
1549
1550 /* all categories we currently handle have action_code */
1551 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1552 return RX_DROP_MONITOR;
1553
1554 /*
1555 * FIXME: revisit this, I'm sure we should handle most
1556 * of these frames in other modes as well!
1557 */
1558 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1559 sdata->vif.type != NL80211_IFTYPE_ADHOC)
1560 return RX_CONTINUE;
1561
1562 switch (mgmt->u.action.category) {
1563 case WLAN_CATEGORY_BACK:
1564 switch (mgmt->u.action.u.addba_req.action_code) {
1565 case WLAN_ACTION_ADDBA_REQ:
1566 if (len < (IEEE80211_MIN_ACTION_SIZE +
1567 sizeof(mgmt->u.action.u.addba_req)))
1568 return RX_DROP_MONITOR;
1569 ieee80211_process_addba_request(local, rx->sta, mgmt, len);
1570 break;
1571 case WLAN_ACTION_ADDBA_RESP:
1572 if (len < (IEEE80211_MIN_ACTION_SIZE +
1573 sizeof(mgmt->u.action.u.addba_resp)))
1574 return RX_DROP_MONITOR;
1575 ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
1576 break;
1577 case WLAN_ACTION_DELBA:
1578 if (len < (IEEE80211_MIN_ACTION_SIZE +
1579 sizeof(mgmt->u.action.u.delba)))
1580 return RX_DROP_MONITOR;
1581 ieee80211_process_delba(sdata, rx->sta, mgmt, len);
1582 break;
1583 }
1584 break;
1585 case WLAN_CATEGORY_SPECTRUM_MGMT:
1586 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1587 return RX_DROP_MONITOR;
1588 switch (mgmt->u.action.u.measurement.action_code) {
1589 case WLAN_ACTION_SPCT_MSR_REQ:
1590 if (len < (IEEE80211_MIN_ACTION_SIZE +
1591 sizeof(mgmt->u.action.u.measurement)))
1592 return RX_DROP_MONITOR;
1593 ieee80211_process_measurement_req(sdata, mgmt, len);
1594 break;
1595 }
1596 break;
1597 default:
1598 return RX_CONTINUE;
1599 }
1600
1601 rx->sta->rx_packets++;
1602 dev_kfree_skb(rx->skb);
1603 return RX_QUEUED;
1604}
1605
1606static ieee80211_rx_result debug_noinline
1523ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 1607ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1524{ 1608{
1525 struct ieee80211_sub_if_data *sdata; 1609 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
1526 1610
1527 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 1611 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
1528 return RX_DROP_MONITOR; 1612 return RX_DROP_MONITOR;
1529 1613
1530 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1614 if (ieee80211_vif_is_mesh(&sdata->vif))
1531 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || 1615 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1532 sdata->vif.type == IEEE80211_IF_TYPE_IBSS || 1616
1533 sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) && 1617 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
1534 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) 1618 sdata->vif.type != NL80211_IFTYPE_ADHOC)
1535 ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->status); 1619 return RX_DROP_MONITOR;
1536 else 1620
1621 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
1537 return RX_DROP_MONITOR; 1622 return RX_DROP_MONITOR;
1538 1623
1624 ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1539 return RX_QUEUED; 1625 return RX_QUEUED;
1540} 1626}
1541 1627
@@ -1565,7 +1651,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1565 if (!ieee80211_has_protected(hdr->frame_control)) 1651 if (!ieee80211_has_protected(hdr->frame_control))
1566 goto ignore; 1652 goto ignore;
1567 1653
1568 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) { 1654 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
1569 /* 1655 /*
1570 * APs with pairwise keys should never receive Michael MIC 1656 * APs with pairwise keys should never receive Michael MIC
1571 * errors for non-zero keyidx because these are reserved for 1657 * errors for non-zero keyidx because these are reserved for
@@ -1579,7 +1665,7 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
1579 !ieee80211_is_auth(hdr->frame_control)) 1665 !ieee80211_is_auth(hdr->frame_control))
1580 goto ignore; 1666 goto ignore;
1581 1667
1582 mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); 1668 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr);
1583 ignore: 1669 ignore:
1584 dev_kfree_skb(rx->skb); 1670 dev_kfree_skb(rx->skb);
1585 rx->skb = NULL; 1671 rx->skb = NULL;
@@ -1635,7 +1721,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx)
1635 if (!netif_running(sdata->dev)) 1721 if (!netif_running(sdata->dev))
1636 continue; 1722 continue;
1637 1723
1638 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR || 1724 if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
1639 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 1725 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
1640 continue; 1726 continue;
1641 1727
@@ -1698,6 +1784,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1698 CALL_RXH(ieee80211_rx_h_mesh_fwding); 1784 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1699 CALL_RXH(ieee80211_rx_h_data) 1785 CALL_RXH(ieee80211_rx_h_data)
1700 CALL_RXH(ieee80211_rx_h_ctrl) 1786 CALL_RXH(ieee80211_rx_h_ctrl)
1787 CALL_RXH(ieee80211_rx_h_action)
1701 CALL_RXH(ieee80211_rx_h_mgmt) 1788 CALL_RXH(ieee80211_rx_h_mgmt)
1702 1789
1703#undef CALL_RXH 1790#undef CALL_RXH
@@ -1733,7 +1820,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1733 int multicast = is_multicast_ether_addr(hdr->addr1); 1820 int multicast = is_multicast_ether_addr(hdr->addr1);
1734 1821
1735 switch (sdata->vif.type) { 1822 switch (sdata->vif.type) {
1736 case IEEE80211_IF_TYPE_STA: 1823 case NL80211_IFTYPE_STATION:
1737 if (!bssid) 1824 if (!bssid)
1738 return 0; 1825 return 0;
1739 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1826 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
@@ -1748,14 +1835,10 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1748 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1835 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1749 } 1836 }
1750 break; 1837 break;
1751 case IEEE80211_IF_TYPE_IBSS: 1838 case NL80211_IFTYPE_ADHOC:
1752 if (!bssid) 1839 if (!bssid)
1753 return 0; 1840 return 0;
1754 if (ieee80211_is_beacon(hdr->frame_control)) { 1841 if (ieee80211_is_beacon(hdr->frame_control)) {
1755 if (!rx->sta)
1756 rx->sta = ieee80211_ibss_add_sta(sdata->dev,
1757 rx->skb, bssid, hdr->addr2,
1758 BIT(rx->status->rate_idx));
1759 return 1; 1842 return 1;
1760 } 1843 }
1761 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1844 else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
@@ -1769,11 +1852,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1769 return 0; 1852 return 0;
1770 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1853 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1771 } else if (!rx->sta) 1854 } else if (!rx->sta)
1772 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, 1855 rx->sta = ieee80211_ibss_add_sta(sdata, rx->skb,
1773 bssid, hdr->addr2, 1856 bssid, hdr->addr2,
1774 BIT(rx->status->rate_idx)); 1857 BIT(rx->status->rate_idx));
1775 break; 1858 break;
1776 case IEEE80211_IF_TYPE_MESH_POINT: 1859 case NL80211_IFTYPE_MESH_POINT:
1777 if (!multicast && 1860 if (!multicast &&
1778 compare_ether_addr(sdata->dev->dev_addr, 1861 compare_ether_addr(sdata->dev->dev_addr,
1779 hdr->addr1) != 0) { 1862 hdr->addr1) != 0) {
@@ -1783,8 +1866,8 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1783 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1866 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1784 } 1867 }
1785 break; 1868 break;
1786 case IEEE80211_IF_TYPE_VLAN: 1869 case NL80211_IFTYPE_AP_VLAN:
1787 case IEEE80211_IF_TYPE_AP: 1870 case NL80211_IFTYPE_AP:
1788 if (!bssid) { 1871 if (!bssid) {
1789 if (compare_ether_addr(sdata->dev->dev_addr, 1872 if (compare_ether_addr(sdata->dev->dev_addr,
1790 hdr->addr1)) 1873 hdr->addr1))
@@ -1796,16 +1879,17 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
1796 rx->flags &= ~IEEE80211_RX_RA_MATCH; 1879 rx->flags &= ~IEEE80211_RX_RA_MATCH;
1797 } 1880 }
1798 break; 1881 break;
1799 case IEEE80211_IF_TYPE_WDS: 1882 case NL80211_IFTYPE_WDS:
1800 if (bssid || !ieee80211_is_data(hdr->frame_control)) 1883 if (bssid || !ieee80211_is_data(hdr->frame_control))
1801 return 0; 1884 return 0;
1802 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 1885 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
1803 return 0; 1886 return 0;
1804 break; 1887 break;
1805 case IEEE80211_IF_TYPE_MNTR: 1888 case NL80211_IFTYPE_MONITOR:
1806 /* take everything */ 1889 /* take everything */
1807 break; 1890 break;
1808 case IEEE80211_IF_TYPE_INVALID: 1891 case NL80211_IFTYPE_UNSPECIFIED:
1892 case __NL80211_IFTYPE_AFTER_LAST:
1809 /* should never get here */ 1893 /* should never get here */
1810 WARN_ON(1); 1894 WARN_ON(1);
1811 break; 1895 break;
@@ -1827,23 +1911,20 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1827 struct ieee80211_sub_if_data *sdata; 1911 struct ieee80211_sub_if_data *sdata;
1828 struct ieee80211_hdr *hdr; 1912 struct ieee80211_hdr *hdr;
1829 struct ieee80211_rx_data rx; 1913 struct ieee80211_rx_data rx;
1830 u16 type;
1831 int prepares; 1914 int prepares;
1832 struct ieee80211_sub_if_data *prev = NULL; 1915 struct ieee80211_sub_if_data *prev = NULL;
1833 struct sk_buff *skb_new; 1916 struct sk_buff *skb_new;
1834 u8 *bssid; 1917 u8 *bssid;
1835 1918
1836 hdr = (struct ieee80211_hdr *) skb->data; 1919 hdr = (struct ieee80211_hdr *)skb->data;
1837 memset(&rx, 0, sizeof(rx)); 1920 memset(&rx, 0, sizeof(rx));
1838 rx.skb = skb; 1921 rx.skb = skb;
1839 rx.local = local; 1922 rx.local = local;
1840 1923
1841 rx.status = status; 1924 rx.status = status;
1842 rx.rate = rate; 1925 rx.rate = rate;
1843 rx.fc = le16_to_cpu(hdr->frame_control);
1844 type = rx.fc & IEEE80211_FCTL_FTYPE;
1845 1926
1846 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) 1927 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control))
1847 local->dot11ReceivedFragmentCount++; 1928 local->dot11ReceivedFragmentCount++;
1848 1929
1849 rx.sta = sta_info_get(local, hdr->addr2); 1930 rx.sta = sta_info_get(local, hdr->addr2);
@@ -1857,7 +1938,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1857 return; 1938 return;
1858 } 1939 }
1859 1940
1860 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning)) 1941 if (unlikely(local->sw_scanning || local->hw_scanning))
1861 rx.flags |= IEEE80211_RX_IN_SCAN; 1942 rx.flags |= IEEE80211_RX_IN_SCAN;
1862 1943
1863 ieee80211_parse_qos(&rx); 1944 ieee80211_parse_qos(&rx);
@@ -1869,7 +1950,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1869 if (!netif_running(sdata->dev)) 1950 if (!netif_running(sdata->dev))
1870 continue; 1951 continue;
1871 1952
1872 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) 1953 if (sdata->vif.type == NL80211_IFTYPE_MONITOR)
1873 continue; 1954 continue;
1874 1955
1875 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 1956 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
@@ -1904,14 +1985,12 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
1904 prev->dev->name); 1985 prev->dev->name);
1905 continue; 1986 continue;
1906 } 1987 }
1907 rx.fc = le16_to_cpu(hdr->frame_control);
1908 ieee80211_invoke_rx_handlers(prev, &rx, skb_new); 1988 ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
1909 prev = sdata; 1989 prev = sdata;
1910 } 1990 }
1911 if (prev) { 1991 if (prev)
1912 rx.fc = le16_to_cpu(hdr->frame_control);
1913 ieee80211_invoke_rx_handlers(prev, &rx, skb); 1992 ieee80211_invoke_rx_handlers(prev, &rx, skb);
1914 } else 1993 else
1915 dev_kfree_skb(skb); 1994 dev_kfree_skb(skb);
1916} 1995}
1917 1996
@@ -2080,7 +2159,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2080 /* if this mpdu is fragmented - terminate rx aggregation session */ 2159 /* if this mpdu is fragmented - terminate rx aggregation session */
2081 sc = le16_to_cpu(hdr->seq_ctrl); 2160 sc = le16_to_cpu(hdr->seq_ctrl);
2082 if (sc & IEEE80211_SCTL_FRAG) { 2161 if (sc & IEEE80211_SCTL_FRAG) {
2083 ieee80211_sta_stop_rx_ba_session(sta->sdata->dev, sta->addr, 2162 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2084 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); 2163 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2085 ret = 1; 2164 ret = 1;
2086 goto end_reorder; 2165 goto end_reorder;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
new file mode 100644
index 000000000000..8e6685e7ae85
--- /dev/null
+++ b/net/mac80211/scan.c
@@ -0,0 +1,937 @@
1/*
2 * Scanning implementation
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2004, Instant802 Networks, Inc.
6 * Copyright 2005, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15/* TODO:
16 * order BSS list by RSSI(?) ("quality of AP")
17 * scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE,
18 * SSID)
19 */
20
21#include <linux/wireless.h>
22#include <linux/if_arp.h>
23#include <net/mac80211.h>
24#include <net/iw_handler.h>
25
26#include "ieee80211_i.h"
27#include "mesh.h"
28
29#define IEEE80211_PROBE_DELAY (HZ / 33)
30#define IEEE80211_CHANNEL_TIME (HZ / 33)
31#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5)
32
33void ieee80211_rx_bss_list_init(struct ieee80211_local *local)
34{
35 spin_lock_init(&local->bss_lock);
36 INIT_LIST_HEAD(&local->bss_list);
37}
38
39void ieee80211_rx_bss_list_deinit(struct ieee80211_local *local)
40{
41 struct ieee80211_bss *bss, *tmp;
42
43 list_for_each_entry_safe(bss, tmp, &local->bss_list, list)
44 ieee80211_rx_bss_put(local, bss);
45}
46
47struct ieee80211_bss *
48ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
49 u8 *ssid, u8 ssid_len)
50{
51 struct ieee80211_bss *bss;
52
53 spin_lock_bh(&local->bss_lock);
54 bss = local->bss_hash[STA_HASH(bssid)];
55 while (bss) {
56 if (!bss_mesh_cfg(bss) &&
57 !memcmp(bss->bssid, bssid, ETH_ALEN) &&
58 bss->freq == freq &&
59 bss->ssid_len == ssid_len &&
60 (ssid_len == 0 || !memcmp(bss->ssid, ssid, ssid_len))) {
61 atomic_inc(&bss->users);
62 break;
63 }
64 bss = bss->hnext;
65 }
66 spin_unlock_bh(&local->bss_lock);
67 return bss;
68}
69
70/* Caller must hold local->bss_lock */
71static void __ieee80211_rx_bss_hash_add(struct ieee80211_local *local,
72 struct ieee80211_bss *bss)
73{
74 u8 hash_idx;
75
76 if (bss_mesh_cfg(bss))
77 hash_idx = mesh_id_hash(bss_mesh_id(bss),
78 bss_mesh_id_len(bss));
79 else
80 hash_idx = STA_HASH(bss->bssid);
81
82 bss->hnext = local->bss_hash[hash_idx];
83 local->bss_hash[hash_idx] = bss;
84}
85
86/* Caller must hold local->bss_lock */
87static void __ieee80211_rx_bss_hash_del(struct ieee80211_local *local,
88 struct ieee80211_bss *bss)
89{
90 struct ieee80211_bss *b, *prev = NULL;
91 b = local->bss_hash[STA_HASH(bss->bssid)];
92 while (b) {
93 if (b == bss) {
94 if (!prev)
95 local->bss_hash[STA_HASH(bss->bssid)] =
96 bss->hnext;
97 else
98 prev->hnext = bss->hnext;
99 break;
100 }
101 prev = b;
102 b = b->hnext;
103 }
104}
105
106struct ieee80211_bss *
107ieee80211_rx_bss_add(struct ieee80211_local *local, u8 *bssid, int freq,
108 u8 *ssid, u8 ssid_len)
109{
110 struct ieee80211_bss *bss;
111
112 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
113 if (!bss)
114 return NULL;
115 atomic_set(&bss->users, 2);
116 memcpy(bss->bssid, bssid, ETH_ALEN);
117 bss->freq = freq;
118 if (ssid && ssid_len <= IEEE80211_MAX_SSID_LEN) {
119 memcpy(bss->ssid, ssid, ssid_len);
120 bss->ssid_len = ssid_len;
121 }
122
123 spin_lock_bh(&local->bss_lock);
124 /* TODO: order by RSSI? */
125 list_add_tail(&bss->list, &local->bss_list);
126 __ieee80211_rx_bss_hash_add(local, bss);
127 spin_unlock_bh(&local->bss_lock);
128 return bss;
129}
130
131#ifdef CONFIG_MAC80211_MESH
132static struct ieee80211_bss *
133ieee80211_rx_mesh_bss_get(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
134 u8 *mesh_cfg, int freq)
135{
136 struct ieee80211_bss *bss;
137
138 spin_lock_bh(&local->bss_lock);
139 bss = local->bss_hash[mesh_id_hash(mesh_id, mesh_id_len)];
140 while (bss) {
141 if (bss_mesh_cfg(bss) &&
142 !memcmp(bss_mesh_cfg(bss), mesh_cfg, MESH_CFG_CMP_LEN) &&
143 bss->freq == freq &&
144 mesh_id_len == bss->mesh_id_len &&
145 (mesh_id_len == 0 || !memcmp(bss->mesh_id, mesh_id,
146 mesh_id_len))) {
147 atomic_inc(&bss->users);
148 break;
149 }
150 bss = bss->hnext;
151 }
152 spin_unlock_bh(&local->bss_lock);
153 return bss;
154}
155
156static struct ieee80211_bss *
157ieee80211_rx_mesh_bss_add(struct ieee80211_local *local, u8 *mesh_id, int mesh_id_len,
158 u8 *mesh_cfg, int mesh_config_len, int freq)
159{
160 struct ieee80211_bss *bss;
161
162 if (mesh_config_len != MESH_CFG_LEN)
163 return NULL;
164
165 bss = kzalloc(sizeof(*bss), GFP_ATOMIC);
166 if (!bss)
167 return NULL;
168
169 bss->mesh_cfg = kmalloc(MESH_CFG_CMP_LEN, GFP_ATOMIC);
170 if (!bss->mesh_cfg) {
171 kfree(bss);
172 return NULL;
173 }
174
175 if (mesh_id_len && mesh_id_len <= IEEE80211_MAX_MESH_ID_LEN) {
176 bss->mesh_id = kmalloc(mesh_id_len, GFP_ATOMIC);
177 if (!bss->mesh_id) {
178 kfree(bss->mesh_cfg);
179 kfree(bss);
180 return NULL;
181 }
182 memcpy(bss->mesh_id, mesh_id, mesh_id_len);
183 }
184
185 atomic_set(&bss->users, 2);
186 memcpy(bss->mesh_cfg, mesh_cfg, MESH_CFG_CMP_LEN);
187 bss->mesh_id_len = mesh_id_len;
188 bss->freq = freq;
189 spin_lock_bh(&local->bss_lock);
190 /* TODO: order by RSSI? */
191 list_add_tail(&bss->list, &local->bss_list);
192 __ieee80211_rx_bss_hash_add(local, bss);
193 spin_unlock_bh(&local->bss_lock);
194 return bss;
195}
196#endif
197
198static void ieee80211_rx_bss_free(struct ieee80211_bss *bss)
199{
200 kfree(bss->ies);
201 kfree(bss_mesh_id(bss));
202 kfree(bss_mesh_cfg(bss));
203 kfree(bss);
204}
205
206void ieee80211_rx_bss_put(struct ieee80211_local *local,
207 struct ieee80211_bss *bss)
208{
209 local_bh_disable();
210 if (!atomic_dec_and_lock(&bss->users, &local->bss_lock)) {
211 local_bh_enable();
212 return;
213 }
214
215 __ieee80211_rx_bss_hash_del(local, bss);
216 list_del(&bss->list);
217 spin_unlock_bh(&local->bss_lock);
218 ieee80211_rx_bss_free(bss);
219}
220
221struct ieee80211_bss *
222ieee80211_bss_info_update(struct ieee80211_local *local,
223 struct ieee80211_rx_status *rx_status,
224 struct ieee80211_mgmt *mgmt,
225 size_t len,
226 struct ieee802_11_elems *elems,
227 int freq, bool beacon)
228{
229 struct ieee80211_bss *bss;
230 int clen;
231
232#ifdef CONFIG_MAC80211_MESH
233 if (elems->mesh_config)
234 bss = ieee80211_rx_mesh_bss_get(local, elems->mesh_id,
235 elems->mesh_id_len, elems->mesh_config, freq);
236 else
237#endif
238 bss = ieee80211_rx_bss_get(local, mgmt->bssid, freq,
239 elems->ssid, elems->ssid_len);
240 if (!bss) {
241#ifdef CONFIG_MAC80211_MESH
242 if (elems->mesh_config)
243 bss = ieee80211_rx_mesh_bss_add(local, elems->mesh_id,
244 elems->mesh_id_len, elems->mesh_config,
245 elems->mesh_config_len, freq);
246 else
247#endif
248 bss = ieee80211_rx_bss_add(local, mgmt->bssid, freq,
249 elems->ssid, elems->ssid_len);
250 if (!bss)
251 return NULL;
252 } else {
253#if 0
254 /* TODO: order by RSSI? */
255 spin_lock_bh(&local->bss_lock);
256 list_move_tail(&bss->list, &local->bss_list);
257 spin_unlock_bh(&local->bss_lock);
258#endif
259 }
260
261 /* save the ERP value so that it is available at association time */
262 if (elems->erp_info && elems->erp_info_len >= 1) {
263 bss->erp_value = elems->erp_info[0];
264 bss->has_erp_value = 1;
265 }
266
267 bss->beacon_int = le16_to_cpu(mgmt->u.beacon.beacon_int);
268 bss->capability = le16_to_cpu(mgmt->u.beacon.capab_info);
269
270 if (elems->tim) {
271 struct ieee80211_tim_ie *tim_ie =
272 (struct ieee80211_tim_ie *)elems->tim;
273 bss->dtim_period = tim_ie->dtim_period;
274 }
275
276 /* set default value for buggy APs */
277 if (!elems->tim || bss->dtim_period == 0)
278 bss->dtim_period = 1;
279
280 bss->supp_rates_len = 0;
281 if (elems->supp_rates) {
282 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
283 if (clen > elems->supp_rates_len)
284 clen = elems->supp_rates_len;
285 memcpy(&bss->supp_rates[bss->supp_rates_len], elems->supp_rates,
286 clen);
287 bss->supp_rates_len += clen;
288 }
289 if (elems->ext_supp_rates) {
290 clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
291 if (clen > elems->ext_supp_rates_len)
292 clen = elems->ext_supp_rates_len;
293 memcpy(&bss->supp_rates[bss->supp_rates_len],
294 elems->ext_supp_rates, clen);
295 bss->supp_rates_len += clen;
296 }
297
298 bss->band = rx_status->band;
299
300 bss->timestamp = le64_to_cpu(mgmt->u.beacon.timestamp);
301 bss->last_update = jiffies;
302 bss->signal = rx_status->signal;
303 bss->noise = rx_status->noise;
304 bss->qual = rx_status->qual;
305 bss->wmm_used = elems->wmm_param || elems->wmm_info;
306
307 if (!beacon)
308 bss->last_probe_resp = jiffies;
309
310 /*
311 * For probe responses, or if we don't have any information yet,
312 * use the IEs from the beacon.
313 */
314 if (!bss->ies || !beacon) {
315 if (bss->ies == NULL || bss->ies_len < elems->total_len) {
316 kfree(bss->ies);
317 bss->ies = kmalloc(elems->total_len, GFP_ATOMIC);
318 }
319 if (bss->ies) {
320 memcpy(bss->ies, elems->ie_start, elems->total_len);
321 bss->ies_len = elems->total_len;
322 } else
323 bss->ies_len = 0;
324 }
325
326 return bss;
327}
328
329ieee80211_rx_result
330ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
331 struct ieee80211_rx_status *rx_status)
332{
333 struct ieee80211_mgmt *mgmt;
334 struct ieee80211_bss *bss;
335 u8 *elements;
336 struct ieee80211_channel *channel;
337 size_t baselen;
338 int freq;
339 __le16 fc;
340 bool presp, beacon = false;
341 struct ieee802_11_elems elems;
342
343 if (skb->len < 2)
344 return RX_DROP_UNUSABLE;
345
346 mgmt = (struct ieee80211_mgmt *) skb->data;
347 fc = mgmt->frame_control;
348
349 if (ieee80211_is_ctl(fc))
350 return RX_CONTINUE;
351
352 if (skb->len < 24)
353 return RX_DROP_MONITOR;
354
355 presp = ieee80211_is_probe_resp(fc);
356 if (presp) {
357 /* ignore ProbeResp to foreign address */
358 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
359 return RX_DROP_MONITOR;
360
361 presp = true;
362 elements = mgmt->u.probe_resp.variable;
363 baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
364 } else {
365 beacon = ieee80211_is_beacon(fc);
366 baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
367 elements = mgmt->u.beacon.variable;
368 }
369
370 if (!presp && !beacon)
371 return RX_CONTINUE;
372
373 if (baselen > skb->len)
374 return RX_DROP_MONITOR;
375
376 ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
377
378 if (elems.ds_params && elems.ds_params_len == 1)
379 freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
380 else
381 freq = rx_status->freq;
382
383 channel = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
384
385 if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
386 return RX_DROP_MONITOR;
387
388 bss = ieee80211_bss_info_update(sdata->local, rx_status,
389 mgmt, skb->len, &elems,
390 freq, beacon);
391 ieee80211_rx_bss_put(sdata->local, bss);
392
393 dev_kfree_skb(skb);
394 return RX_QUEUED;
395}
396
397static void ieee80211_send_nullfunc(struct ieee80211_local *local,
398 struct ieee80211_sub_if_data *sdata,
399 int powersave)
400{
401 struct sk_buff *skb;
402 struct ieee80211_hdr *nullfunc;
403 __le16 fc;
404
405 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
406 if (!skb) {
407 printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
408 "frame\n", sdata->dev->name);
409 return;
410 }
411 skb_reserve(skb, local->hw.extra_tx_headroom);
412
413 nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
414 memset(nullfunc, 0, 24);
415 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
416 IEEE80211_FCTL_TODS);
417 if (powersave)
418 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
419 nullfunc->frame_control = fc;
420 memcpy(nullfunc->addr1, sdata->u.sta.bssid, ETH_ALEN);
421 memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
422 memcpy(nullfunc->addr3, sdata->u.sta.bssid, ETH_ALEN);
423
424 ieee80211_tx_skb(sdata, skb, 0);
425}
426
427void ieee80211_scan_completed(struct ieee80211_hw *hw)
428{
429 struct ieee80211_local *local = hw_to_local(hw);
430 struct ieee80211_sub_if_data *sdata;
431 union iwreq_data wrqu;
432
433 if (WARN_ON(!local->hw_scanning && !local->sw_scanning))
434 return;
435
436 local->last_scan_completed = jiffies;
437 memset(&wrqu, 0, sizeof(wrqu));
438
439 /*
440 * local->scan_sdata could have been NULLed by the interface
441 * down code in case we were scanning on an interface that is
442 * being taken down.
443 */
444 sdata = local->scan_sdata;
445 if (sdata)
446 wireless_send_event(sdata->dev, SIOCGIWSCAN, &wrqu, NULL);
447
448 if (local->hw_scanning) {
449 local->hw_scanning = false;
450 if (ieee80211_hw_config(local))
451 printk(KERN_DEBUG "%s: failed to restore operational "
452 "channel after scan\n", wiphy_name(local->hw.wiphy));
453
454 goto done;
455 }
456
457 local->sw_scanning = false;
458 if (ieee80211_hw_config(local))
459 printk(KERN_DEBUG "%s: failed to restore operational "
460 "channel after scan\n", wiphy_name(local->hw.wiphy));
461
462
463 netif_tx_lock_bh(local->mdev);
464 netif_addr_lock(local->mdev);
465 local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
466 local->ops->configure_filter(local_to_hw(local),
467 FIF_BCN_PRBRESP_PROMISC,
468 &local->filter_flags,
469 local->mdev->mc_count,
470 local->mdev->mc_list);
471
472 netif_addr_unlock(local->mdev);
473 netif_tx_unlock_bh(local->mdev);
474
475 rcu_read_lock();
476 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
477 /* Tell AP we're back */
478 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
479 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) {
480 ieee80211_send_nullfunc(local, sdata, 0);
481 netif_tx_wake_all_queues(sdata->dev);
482 }
483 } else
484 netif_tx_wake_all_queues(sdata->dev);
485 }
486 rcu_read_unlock();
487
488 done:
489 ieee80211_mlme_notify_scan_completed(local);
490 ieee80211_mesh_notify_scan_completed(local);
491}
492EXPORT_SYMBOL(ieee80211_scan_completed);
493
494
495void ieee80211_scan_work(struct work_struct *work)
496{
497 struct ieee80211_local *local =
498 container_of(work, struct ieee80211_local, scan_work.work);
499 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
500 struct ieee80211_supported_band *sband;
501 struct ieee80211_channel *chan;
502 int skip;
503 unsigned long next_delay = 0;
504
505 /*
506 * Avoid re-scheduling when the sdata is going away.
507 */
508 if (!netif_running(sdata->dev))
509 return;
510
511 switch (local->scan_state) {
512 case SCAN_SET_CHANNEL:
513 /*
514 * Get current scan band. scan_band may be IEEE80211_NUM_BANDS
515 * after we successfully scanned the last channel of the last
516 * band (and the last band is supported by the hw)
517 */
518 if (local->scan_band < IEEE80211_NUM_BANDS)
519 sband = local->hw.wiphy->bands[local->scan_band];
520 else
521 sband = NULL;
522
523 /*
524 * If we are at an unsupported band and have more bands
525 * left to scan, advance to the next supported one.
526 */
527 while (!sband && local->scan_band < IEEE80211_NUM_BANDS - 1) {
528 local->scan_band++;
529 sband = local->hw.wiphy->bands[local->scan_band];
530 local->scan_channel_idx = 0;
531 }
532
533 /* if no more bands/channels left, complete scan */
534 if (!sband || local->scan_channel_idx >= sband->n_channels) {
535 ieee80211_scan_completed(local_to_hw(local));
536 return;
537 }
538 skip = 0;
539 chan = &sband->channels[local->scan_channel_idx];
540
541 if (chan->flags & IEEE80211_CHAN_DISABLED ||
542 (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
543 chan->flags & IEEE80211_CHAN_NO_IBSS))
544 skip = 1;
545
546 if (!skip) {
547 local->scan_channel = chan;
548 if (ieee80211_hw_config(local)) {
549 printk(KERN_DEBUG "%s: failed to set freq to "
550 "%d MHz for scan\n", wiphy_name(local->hw.wiphy),
551 chan->center_freq);
552 skip = 1;
553 }
554 }
555
556 /* advance state machine to next channel/band */
557 local->scan_channel_idx++;
558 if (local->scan_channel_idx >= sband->n_channels) {
559 /*
560 * scan_band may end up == IEEE80211_NUM_BANDS, but
561 * we'll catch that case above and complete the scan
562 * if that is the case.
563 */
564 local->scan_band++;
565 local->scan_channel_idx = 0;
566 }
567
568 if (skip)
569 break;
570
571 next_delay = IEEE80211_PROBE_DELAY +
572 usecs_to_jiffies(local->hw.channel_change_time);
573 local->scan_state = SCAN_SEND_PROBE;
574 break;
575 case SCAN_SEND_PROBE:
576 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
577 local->scan_state = SCAN_SET_CHANNEL;
578
579 if (local->scan_channel->flags & IEEE80211_CHAN_PASSIVE_SCAN)
580 break;
581 ieee80211_send_probe_req(sdata, NULL, local->scan_ssid,
582 local->scan_ssid_len);
583 next_delay = IEEE80211_CHANNEL_TIME;
584 break;
585 }
586
587 queue_delayed_work(local->hw.workqueue, &local->scan_work,
588 next_delay);
589}
590
591
592int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
593 u8 *ssid, size_t ssid_len)
594{
595 struct ieee80211_local *local = scan_sdata->local;
596 struct ieee80211_sub_if_data *sdata;
597
598 if (ssid_len > IEEE80211_MAX_SSID_LEN)
599 return -EINVAL;
600
601 /* MLME-SCAN.request (page 118) page 144 (11.1.3.1)
602 * BSSType: INFRASTRUCTURE, INDEPENDENT, ANY_BSS
603 * BSSID: MACAddress
604 * SSID
605 * ScanType: ACTIVE, PASSIVE
606 * ProbeDelay: delay (in microseconds) to be used prior to transmitting
607 * a Probe frame during active scanning
608 * ChannelList
609 * MinChannelTime (>= ProbeDelay), in TU
610 * MaxChannelTime: (>= MinChannelTime), in TU
611 */
612
613 /* MLME-SCAN.confirm
614 * BSSDescriptionSet
615 * ResultCode: SUCCESS, INVALID_PARAMETERS
616 */
617
618 if (local->sw_scanning || local->hw_scanning) {
619 if (local->scan_sdata == scan_sdata)
620 return 0;
621 return -EBUSY;
622 }
623
624 if (local->ops->hw_scan) {
625 int rc;
626
627 local->hw_scanning = true;
628 rc = local->ops->hw_scan(local_to_hw(local), ssid, ssid_len);
629 if (rc) {
630 local->hw_scanning = false;
631 return rc;
632 }
633 local->scan_sdata = scan_sdata;
634 return 0;
635 }
636
637 local->sw_scanning = true;
638
639 rcu_read_lock();
640 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
641 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
642 if (sdata->u.sta.flags & IEEE80211_STA_ASSOCIATED) {
643 netif_tx_stop_all_queues(sdata->dev);
644 ieee80211_send_nullfunc(local, sdata, 1);
645 }
646 } else
647 netif_tx_stop_all_queues(sdata->dev);
648 }
649 rcu_read_unlock();
650
651 if (ssid) {
652 local->scan_ssid_len = ssid_len;
653 memcpy(local->scan_ssid, ssid, ssid_len);
654 } else
655 local->scan_ssid_len = 0;
656 local->scan_state = SCAN_SET_CHANNEL;
657 local->scan_channel_idx = 0;
658 local->scan_band = IEEE80211_BAND_2GHZ;
659 local->scan_sdata = scan_sdata;
660
661 netif_addr_lock_bh(local->mdev);
662 local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
663 local->ops->configure_filter(local_to_hw(local),
664 FIF_BCN_PRBRESP_PROMISC,
665 &local->filter_flags,
666 local->mdev->mc_count,
667 local->mdev->mc_list);
668 netif_addr_unlock_bh(local->mdev);
669
670 /* TODO: start scan as soon as all nullfunc frames are ACKed */
671 queue_delayed_work(local->hw.workqueue, &local->scan_work,
672 IEEE80211_CHANNEL_TIME);
673
674 return 0;
675}
676
677
678int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
679 u8 *ssid, size_t ssid_len)
680{
681 struct ieee80211_local *local = sdata->local;
682 struct ieee80211_if_sta *ifsta;
683
684 if (sdata->vif.type != NL80211_IFTYPE_STATION)
685 return ieee80211_start_scan(sdata, ssid, ssid_len);
686
687 /*
688 * STA has a state machine that might need to defer scanning
689 * while it's trying to associate/authenticate, therefore we
690 * queue it up to the state machine in that case.
691 */
692
693 if (local->sw_scanning || local->hw_scanning) {
694 if (local->scan_sdata == sdata)
695 return 0;
696 return -EBUSY;
697 }
698
699 ifsta = &sdata->u.sta;
700
701 ifsta->scan_ssid_len = ssid_len;
702 if (ssid_len)
703 memcpy(ifsta->scan_ssid, ssid, ssid_len);
704 set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request);
705 queue_work(local->hw.workqueue, &ifsta->work);
706
707 return 0;
708}
709
710
711static void ieee80211_scan_add_ies(struct iw_request_info *info,
712 struct ieee80211_bss *bss,
713 char **current_ev, char *end_buf)
714{
715 u8 *pos, *end, *next;
716 struct iw_event iwe;
717
718 if (bss == NULL || bss->ies == NULL)
719 return;
720
721 /*
722 * If needed, fragment the IEs buffer (at IE boundaries) into short
723 * enough fragments to fit into IW_GENERIC_IE_MAX octet messages.
724 */
725 pos = bss->ies;
726 end = pos + bss->ies_len;
727
728 while (end - pos > IW_GENERIC_IE_MAX) {
729 next = pos + 2 + pos[1];
730 while (next + 2 + next[1] - pos < IW_GENERIC_IE_MAX)
731 next = next + 2 + next[1];
732
733 memset(&iwe, 0, sizeof(iwe));
734 iwe.cmd = IWEVGENIE;
735 iwe.u.data.length = next - pos;
736 *current_ev = iwe_stream_add_point(info, *current_ev,
737 end_buf, &iwe, pos);
738
739 pos = next;
740 }
741
742 if (end > pos) {
743 memset(&iwe, 0, sizeof(iwe));
744 iwe.cmd = IWEVGENIE;
745 iwe.u.data.length = end - pos;
746 *current_ev = iwe_stream_add_point(info, *current_ev,
747 end_buf, &iwe, pos);
748 }
749}
750
751
752static char *
753ieee80211_scan_result(struct ieee80211_local *local,
754 struct iw_request_info *info,
755 struct ieee80211_bss *bss,
756 char *current_ev, char *end_buf)
757{
758 struct iw_event iwe;
759 char *buf;
760
761 if (time_after(jiffies,
762 bss->last_update + IEEE80211_SCAN_RESULT_EXPIRE))
763 return current_ev;
764
765 memset(&iwe, 0, sizeof(iwe));
766 iwe.cmd = SIOCGIWAP;
767 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
768 memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
769 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
770 IW_EV_ADDR_LEN);
771
772 memset(&iwe, 0, sizeof(iwe));
773 iwe.cmd = SIOCGIWESSID;
774 if (bss_mesh_cfg(bss)) {
775 iwe.u.data.length = bss_mesh_id_len(bss);
776 iwe.u.data.flags = 1;
777 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
778 &iwe, bss_mesh_id(bss));
779 } else {
780 iwe.u.data.length = bss->ssid_len;
781 iwe.u.data.flags = 1;
782 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
783 &iwe, bss->ssid);
784 }
785
786 if (bss->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)
787 || bss_mesh_cfg(bss)) {
788 memset(&iwe, 0, sizeof(iwe));
789 iwe.cmd = SIOCGIWMODE;
790 if (bss_mesh_cfg(bss))
791 iwe.u.mode = IW_MODE_MESH;
792 else if (bss->capability & WLAN_CAPABILITY_ESS)
793 iwe.u.mode = IW_MODE_MASTER;
794 else
795 iwe.u.mode = IW_MODE_ADHOC;
796 current_ev = iwe_stream_add_event(info, current_ev, end_buf,
797 &iwe, IW_EV_UINT_LEN);
798 }
799
800 memset(&iwe, 0, sizeof(iwe));
801 iwe.cmd = SIOCGIWFREQ;
802 iwe.u.freq.m = ieee80211_frequency_to_channel(bss->freq);
803 iwe.u.freq.e = 0;
804 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
805 IW_EV_FREQ_LEN);
806
807 memset(&iwe, 0, sizeof(iwe));
808 iwe.cmd = SIOCGIWFREQ;
809 iwe.u.freq.m = bss->freq;
810 iwe.u.freq.e = 6;
811 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
812 IW_EV_FREQ_LEN);
813 memset(&iwe, 0, sizeof(iwe));
814 iwe.cmd = IWEVQUAL;
815 iwe.u.qual.qual = bss->qual;
816 iwe.u.qual.level = bss->signal;
817 iwe.u.qual.noise = bss->noise;
818 iwe.u.qual.updated = local->wstats_flags;
819 current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe,
820 IW_EV_QUAL_LEN);
821
822 memset(&iwe, 0, sizeof(iwe));
823 iwe.cmd = SIOCGIWENCODE;
824 if (bss->capability & WLAN_CAPABILITY_PRIVACY)
825 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
826 else
827 iwe.u.data.flags = IW_ENCODE_DISABLED;
828 iwe.u.data.length = 0;
829 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
830 &iwe, "");
831
832 ieee80211_scan_add_ies(info, bss, &current_ev, end_buf);
833
834 if (bss->supp_rates_len > 0) {
835 /* display all supported rates in readable format */
836 char *p = current_ev + iwe_stream_lcp_len(info);
837 int i;
838
839 memset(&iwe, 0, sizeof(iwe));
840 iwe.cmd = SIOCGIWRATE;
841 /* Those two flags are ignored... */
842 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
843
844 for (i = 0; i < bss->supp_rates_len; i++) {
845 iwe.u.bitrate.value = ((bss->supp_rates[i] &
846 0x7f) * 500000);
847 p = iwe_stream_add_value(info, current_ev, p,
848 end_buf, &iwe, IW_EV_PARAM_LEN);
849 }
850 current_ev = p;
851 }
852
853 buf = kmalloc(30, GFP_ATOMIC);
854 if (buf) {
855 memset(&iwe, 0, sizeof(iwe));
856 iwe.cmd = IWEVCUSTOM;
857 sprintf(buf, "tsf=%016llx", (unsigned long long)(bss->timestamp));
858 iwe.u.data.length = strlen(buf);
859 current_ev = iwe_stream_add_point(info, current_ev, end_buf,
860 &iwe, buf);
861 memset(&iwe, 0, sizeof(iwe));
862 iwe.cmd = IWEVCUSTOM;
863 sprintf(buf, " Last beacon: %dms ago",
864 jiffies_to_msecs(jiffies - bss->last_update));
865 iwe.u.data.length = strlen(buf);
866 current_ev = iwe_stream_add_point(info, current_ev,
867 end_buf, &iwe, buf);
868 kfree(buf);
869 }
870
871 if (bss_mesh_cfg(bss)) {
872 u8 *cfg = bss_mesh_cfg(bss);
873 buf = kmalloc(50, GFP_ATOMIC);
874 if (buf) {
875 memset(&iwe, 0, sizeof(iwe));
876 iwe.cmd = IWEVCUSTOM;
877 sprintf(buf, "Mesh network (version %d)", cfg[0]);
878 iwe.u.data.length = strlen(buf);
879 current_ev = iwe_stream_add_point(info, current_ev,
880 end_buf,
881 &iwe, buf);
882 sprintf(buf, "Path Selection Protocol ID: "
883 "0x%02X%02X%02X%02X", cfg[1], cfg[2], cfg[3],
884 cfg[4]);
885 iwe.u.data.length = strlen(buf);
886 current_ev = iwe_stream_add_point(info, current_ev,
887 end_buf,
888 &iwe, buf);
889 sprintf(buf, "Path Selection Metric ID: "
890 "0x%02X%02X%02X%02X", cfg[5], cfg[6], cfg[7],
891 cfg[8]);
892 iwe.u.data.length = strlen(buf);
893 current_ev = iwe_stream_add_point(info, current_ev,
894 end_buf,
895 &iwe, buf);
896 sprintf(buf, "Congestion Control Mode ID: "
897 "0x%02X%02X%02X%02X", cfg[9], cfg[10],
898 cfg[11], cfg[12]);
899 iwe.u.data.length = strlen(buf);
900 current_ev = iwe_stream_add_point(info, current_ev,
901 end_buf,
902 &iwe, buf);
903 sprintf(buf, "Channel Precedence: "
904 "0x%02X%02X%02X%02X", cfg[13], cfg[14],
905 cfg[15], cfg[16]);
906 iwe.u.data.length = strlen(buf);
907 current_ev = iwe_stream_add_point(info, current_ev,
908 end_buf,
909 &iwe, buf);
910 kfree(buf);
911 }
912 }
913
914 return current_ev;
915}
916
917
918int ieee80211_scan_results(struct ieee80211_local *local,
919 struct iw_request_info *info,
920 char *buf, size_t len)
921{
922 char *current_ev = buf;
923 char *end_buf = buf + len;
924 struct ieee80211_bss *bss;
925
926 spin_lock_bh(&local->bss_lock);
927 list_for_each_entry(bss, &local->bss_list, list) {
928 if (buf + len - current_ev <= IW_EV_ADDR_LEN) {
929 spin_unlock_bh(&local->bss_lock);
930 return -E2BIG;
931 }
932 current_ev = ieee80211_scan_result(local, info, bss,
933 current_ev, end_buf);
934 }
935 spin_unlock_bh(&local->bss_lock);
936 return current_ev - buf;
937}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
new file mode 100644
index 000000000000..f72bad636d8e
--- /dev/null
+++ b/net/mac80211/spectmgmt.c
@@ -0,0 +1,86 @@
1/*
2 * spectrum management
3 *
4 * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Copyright 2002-2005, Instant802 Networks, Inc.
6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2008, Intel Corporation
10 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/ieee80211.h>
18#include <net/wireless.h>
19#include <net/mac80211.h>
20#include "ieee80211_i.h"
21#include "sta_info.h"
22#include "wme.h"
23
24static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
25 struct ieee80211_msrment_ie *request_ie,
26 const u8 *da, const u8 *bssid,
27 u8 dialog_token)
28{
29 struct ieee80211_local *local = sdata->local;
30 struct sk_buff *skb;
31 struct ieee80211_mgmt *msr_report;
32
33 skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
34 sizeof(struct ieee80211_msrment_ie));
35
36 if (!skb) {
37 printk(KERN_ERR "%s: failed to allocate buffer for "
38 "measurement report frame\n", sdata->dev->name);
39 return;
40 }
41
42 skb_reserve(skb, local->hw.extra_tx_headroom);
43 msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
44 memset(msr_report, 0, 24);
45 memcpy(msr_report->da, da, ETH_ALEN);
46 memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN);
47 memcpy(msr_report->bssid, bssid, ETH_ALEN);
48 msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
49 IEEE80211_STYPE_ACTION);
50
51 skb_put(skb, 1 + sizeof(msr_report->u.action.u.measurement));
52 msr_report->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
53 msr_report->u.action.u.measurement.action_code =
54 WLAN_ACTION_SPCT_MSR_RPRT;
55 msr_report->u.action.u.measurement.dialog_token = dialog_token;
56
57 msr_report->u.action.u.measurement.element_id = WLAN_EID_MEASURE_REPORT;
58 msr_report->u.action.u.measurement.length =
59 sizeof(struct ieee80211_msrment_ie);
60
61 memset(&msr_report->u.action.u.measurement.msr_elem, 0,
62 sizeof(struct ieee80211_msrment_ie));
63 msr_report->u.action.u.measurement.msr_elem.token = request_ie->token;
64 msr_report->u.action.u.measurement.msr_elem.mode |=
65 IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED;
66 msr_report->u.action.u.measurement.msr_elem.type = request_ie->type;
67
68 ieee80211_tx_skb(sdata, skb, 0);
69}
70
71void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
72 struct ieee80211_mgmt *mgmt,
73 size_t len)
74{
75 /*
76 * Ignoring measurement request is spec violation.
77 * Mandatory measurements must be reported optional
78 * measurements might be refused or reported incapable
79 * For now just refuse
80 * TODO: Answer basic measurement as unmeasured
81 */
82 ieee80211_send_refuse_measurement_request(sdata,
83 &mgmt->u.action.u.measurement.msr_elem,
84 mgmt->sa, mgmt->bssid,
85 mgmt->u.action.u.measurement.dialog_token);
86}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f2ba653b9d69..9b72d15bc8dc 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -73,11 +73,11 @@ static int sta_info_hash_del(struct ieee80211_local *local,
73{ 73{
74 struct sta_info *s; 74 struct sta_info *s;
75 75
76 s = local->sta_hash[STA_HASH(sta->addr)]; 76 s = local->sta_hash[STA_HASH(sta->sta.addr)];
77 if (!s) 77 if (!s)
78 return -ENOENT; 78 return -ENOENT;
79 if (s == sta) { 79 if (s == sta) {
80 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], 80 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
81 s->hnext); 81 s->hnext);
82 return 0; 82 return 0;
83 } 83 }
@@ -93,26 +93,19 @@ static int sta_info_hash_del(struct ieee80211_local *local,
93} 93}
94 94
95/* protected by RCU */ 95/* protected by RCU */
96static struct sta_info *__sta_info_find(struct ieee80211_local *local, 96struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
97 u8 *addr)
98{ 97{
99 struct sta_info *sta; 98 struct sta_info *sta;
100 99
101 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); 100 sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
102 while (sta) { 101 while (sta) {
103 if (compare_ether_addr(sta->addr, addr) == 0) 102 if (compare_ether_addr(sta->sta.addr, addr) == 0)
104 break; 103 break;
105 sta = rcu_dereference(sta->hnext); 104 sta = rcu_dereference(sta->hnext);
106 } 105 }
107 return sta; 106 return sta;
108} 107}
109 108
110struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr)
111{
112 return __sta_info_find(local, addr);
113}
114EXPORT_SYMBOL(sta_info_get);
115
116struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx, 109struct sta_info *sta_info_get_by_idx(struct ieee80211_local *local, int idx,
117 struct net_device *dev) 110 struct net_device *dev)
118{ 111{
@@ -146,12 +139,12 @@ static void __sta_info_free(struct ieee80211_local *local,
146{ 139{
147 DECLARE_MAC_BUF(mbuf); 140 DECLARE_MAC_BUF(mbuf);
148 141
149 rate_control_free_sta(sta->rate_ctrl, sta->rate_ctrl_priv); 142 rate_control_free_sta(sta);
150 rate_control_put(sta->rate_ctrl); 143 rate_control_put(sta->rate_ctrl);
151 144
152#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 145#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
153 printk(KERN_DEBUG "%s: Destroyed STA %s\n", 146 printk(KERN_DEBUG "%s: Destroyed STA %s\n",
154 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr)); 147 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->sta.addr));
155#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 148#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
156 149
157 kfree(sta); 150 kfree(sta);
@@ -219,8 +212,8 @@ void sta_info_destroy(struct sta_info *sta)
219static void sta_info_hash_add(struct ieee80211_local *local, 212static void sta_info_hash_add(struct ieee80211_local *local,
220 struct sta_info *sta) 213 struct sta_info *sta)
221{ 214{
222 sta->hnext = local->sta_hash[STA_HASH(sta->addr)]; 215 sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
223 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->addr)], sta); 216 rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
224} 217}
225 218
226struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, 219struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
@@ -231,20 +224,20 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
231 int i; 224 int i;
232 DECLARE_MAC_BUF(mbuf); 225 DECLARE_MAC_BUF(mbuf);
233 226
234 sta = kzalloc(sizeof(*sta), gfp); 227 sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
235 if (!sta) 228 if (!sta)
236 return NULL; 229 return NULL;
237 230
238 spin_lock_init(&sta->lock); 231 spin_lock_init(&sta->lock);
239 spin_lock_init(&sta->flaglock); 232 spin_lock_init(&sta->flaglock);
240 233
241 memcpy(sta->addr, addr, ETH_ALEN); 234 memcpy(sta->sta.addr, addr, ETH_ALEN);
242 sta->local = local; 235 sta->local = local;
243 sta->sdata = sdata; 236 sta->sdata = sdata;
244 237
245 sta->rate_ctrl = rate_control_get(local->rate_ctrl); 238 sta->rate_ctrl = rate_control_get(local->rate_ctrl);
246 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl, 239 sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
247 gfp); 240 &sta->sta, gfp);
248 if (!sta->rate_ctrl_priv) { 241 if (!sta->rate_ctrl_priv) {
249 rate_control_put(sta->rate_ctrl); 242 rate_control_put(sta->rate_ctrl);
250 kfree(sta); 243 kfree(sta);
@@ -271,7 +264,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
271 264
272#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 265#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
273 printk(KERN_DEBUG "%s: Allocated STA %s\n", 266 printk(KERN_DEBUG "%s: Allocated STA %s\n",
274 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->addr)); 267 wiphy_name(local->hw.wiphy), print_mac(mbuf, sta->sta.addr));
275#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 268#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
276 269
277#ifdef CONFIG_MAC80211_MESH 270#ifdef CONFIG_MAC80211_MESH
@@ -300,15 +293,15 @@ int sta_info_insert(struct sta_info *sta)
300 goto out_free; 293 goto out_free;
301 } 294 }
302 295
303 if (WARN_ON(compare_ether_addr(sta->addr, sdata->dev->dev_addr) == 0 || 296 if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 ||
304 is_multicast_ether_addr(sta->addr))) { 297 is_multicast_ether_addr(sta->sta.addr))) {
305 err = -EINVAL; 298 err = -EINVAL;
306 goto out_free; 299 goto out_free;
307 } 300 }
308 301
309 spin_lock_irqsave(&local->sta_lock, flags); 302 spin_lock_irqsave(&local->sta_lock, flags);
310 /* check if STA exists already */ 303 /* check if STA exists already */
311 if (__sta_info_find(local, sta->addr)) { 304 if (sta_info_get(local, sta->sta.addr)) {
312 spin_unlock_irqrestore(&local->sta_lock, flags); 305 spin_unlock_irqrestore(&local->sta_lock, flags);
313 err = -EEXIST; 306 err = -EEXIST;
314 goto out_free; 307 goto out_free;
@@ -319,18 +312,18 @@ int sta_info_insert(struct sta_info *sta)
319 312
320 /* notify driver */ 313 /* notify driver */
321 if (local->ops->sta_notify) { 314 if (local->ops->sta_notify) {
322 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 315 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
323 sdata = container_of(sdata->bss, 316 sdata = container_of(sdata->bss,
324 struct ieee80211_sub_if_data, 317 struct ieee80211_sub_if_data,
325 u.ap); 318 u.ap);
326 319
327 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 320 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
328 STA_NOTIFY_ADD, sta->addr); 321 STA_NOTIFY_ADD, &sta->sta);
329 } 322 }
330 323
331#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 324#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
332 printk(KERN_DEBUG "%s: Inserted STA %s\n", 325 printk(KERN_DEBUG "%s: Inserted STA %s\n",
333 wiphy_name(local->hw.wiphy), print_mac(mac, sta->addr)); 326 wiphy_name(local->hw.wiphy), print_mac(mac, sta->sta.addr));
334#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 327#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
335 328
336 spin_unlock_irqrestore(&local->sta_lock, flags); 329 spin_unlock_irqrestore(&local->sta_lock, flags);
@@ -379,11 +372,12 @@ static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
379{ 372{
380 BUG_ON(!bss); 373 BUG_ON(!bss);
381 374
382 __bss_tim_set(bss, sta->aid); 375 __bss_tim_set(bss, sta->sta.aid);
383 376
384 if (sta->local->ops->set_tim) { 377 if (sta->local->ops->set_tim) {
385 sta->local->tim_in_locked_section = true; 378 sta->local->tim_in_locked_section = true;
386 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 1); 379 sta->local->ops->set_tim(local_to_hw(sta->local),
380 &sta->sta, true);
387 sta->local->tim_in_locked_section = false; 381 sta->local->tim_in_locked_section = false;
388 } 382 }
389} 383}
@@ -404,11 +398,12 @@ static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
404{ 398{
405 BUG_ON(!bss); 399 BUG_ON(!bss);
406 400
407 __bss_tim_clear(bss, sta->aid); 401 __bss_tim_clear(bss, sta->sta.aid);
408 402
409 if (sta->local->ops->set_tim) { 403 if (sta->local->ops->set_tim) {
410 sta->local->tim_in_locked_section = true; 404 sta->local->tim_in_locked_section = true;
411 sta->local->ops->set_tim(local_to_hw(sta->local), sta->aid, 0); 405 sta->local->ops->set_tim(local_to_hw(sta->local),
406 &sta->sta, false);
412 sta->local->tim_in_locked_section = false; 407 sta->local->tim_in_locked_section = false;
413 } 408 }
414} 409}
@@ -424,7 +419,7 @@ void sta_info_clear_tim_bit(struct sta_info *sta)
424 spin_unlock_irqrestore(&sta->local->sta_lock, flags); 419 spin_unlock_irqrestore(&sta->local->sta_lock, flags);
425} 420}
426 421
427void __sta_info_unlink(struct sta_info **sta) 422static void __sta_info_unlink(struct sta_info **sta)
428{ 423{
429 struct ieee80211_local *local = (*sta)->local; 424 struct ieee80211_local *local = (*sta)->local;
430 struct ieee80211_sub_if_data *sdata = (*sta)->sdata; 425 struct ieee80211_sub_if_data *sdata = (*sta)->sdata;
@@ -456,13 +451,13 @@ void __sta_info_unlink(struct sta_info **sta)
456 local->num_sta--; 451 local->num_sta--;
457 452
458 if (local->ops->sta_notify) { 453 if (local->ops->sta_notify) {
459 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 454 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
460 sdata = container_of(sdata->bss, 455 sdata = container_of(sdata->bss,
461 struct ieee80211_sub_if_data, 456 struct ieee80211_sub_if_data,
462 u.ap); 457 u.ap);
463 458
464 local->ops->sta_notify(local_to_hw(local), &sdata->vif, 459 local->ops->sta_notify(local_to_hw(local), &sdata->vif,
465 STA_NOTIFY_REMOVE, (*sta)->addr); 460 STA_NOTIFY_REMOVE, &(*sta)->sta);
466 } 461 }
467 462
468 if (ieee80211_vif_is_mesh(&sdata->vif)) { 463 if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -474,7 +469,7 @@ void __sta_info_unlink(struct sta_info **sta)
474 469
475#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 470#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
476 printk(KERN_DEBUG "%s: Removed STA %s\n", 471 printk(KERN_DEBUG "%s: Removed STA %s\n",
477 wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->addr)); 472 wiphy_name(local->hw.wiphy), print_mac(mbuf, (*sta)->sta.addr));
478#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 473#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
479 474
480 /* 475 /*
@@ -570,7 +565,7 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
570 local->total_ps_buffered--; 565 local->total_ps_buffered--;
571#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 566#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
572 printk(KERN_DEBUG "Buffered frame expired (STA " 567 printk(KERN_DEBUG "Buffered frame expired (STA "
573 "%s)\n", print_mac(mac, sta->addr)); 568 "%s)\n", print_mac(mac, sta->sta.addr));
574#endif 569#endif
575 dev_kfree_skb(skb); 570 dev_kfree_skb(skb);
576 571
@@ -802,3 +797,40 @@ void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata)
802 schedule_work(&local->sta_flush_work); 797 schedule_work(&local->sta_flush_work);
803 spin_unlock_irqrestore(&local->sta_lock, flags); 798 spin_unlock_irqrestore(&local->sta_lock, flags);
804} 799}
800
801void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
802 unsigned long exp_time)
803{
804 struct ieee80211_local *local = sdata->local;
805 struct sta_info *sta, *tmp;
806 LIST_HEAD(tmp_list);
807 DECLARE_MAC_BUF(mac);
808 unsigned long flags;
809
810 spin_lock_irqsave(&local->sta_lock, flags);
811 list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
812 if (time_after(jiffies, sta->last_rx + exp_time)) {
813#ifdef CONFIG_MAC80211_IBSS_DEBUG
814 printk(KERN_DEBUG "%s: expiring inactive STA %s\n",
815 sdata->dev->name, print_mac(mac, sta->sta.addr));
816#endif
817 __sta_info_unlink(&sta);
818 if (sta)
819 list_add(&sta->list, &tmp_list);
820 }
821 spin_unlock_irqrestore(&local->sta_lock, flags);
822
823 list_for_each_entry_safe(sta, tmp, &tmp_list, list)
824 sta_info_destroy(sta);
825}
826
827struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_hw *hw,
828 const u8 *addr)
829{
830 struct sta_info *sta = sta_info_get(hw_to_local(hw), addr);
831
832 if (!sta)
833 return NULL;
834 return &sta->sta;
835}
836EXPORT_SYMBOL(ieee80211_find_sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 109db787ccb7..a6b51862a89d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -167,8 +167,6 @@ struct sta_ampdu_mlme {
167 * @lock: used for locking all fields that require locking, see comments 167 * @lock: used for locking all fields that require locking, see comments
168 * in the header file. 168 * in the header file.
169 * @flaglock: spinlock for flags accesses 169 * @flaglock: spinlock for flags accesses
170 * @ht_info: HT capabilities of this STA
171 * @supp_rates: Bitmap of supported rates (per band)
172 * @addr: MAC address of this STA 170 * @addr: MAC address of this STA
173 * @aid: STA's unique AID (1..2007, 0 = not assigned yet), 171 * @aid: STA's unique AID (1..2007, 0 = not assigned yet),
174 * only used in AP (and IBSS?) mode 172 * only used in AP (and IBSS?) mode
@@ -191,20 +189,15 @@ struct sta_ampdu_mlme {
191 * @last_qual: qual of last received frame from this STA 189 * @last_qual: qual of last received frame from this STA
192 * @last_noise: noise of last received frame from this STA 190 * @last_noise: noise of last received frame from this STA
193 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) 191 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
194 * @wme_rx_queue: TBD
195 * @tx_filtered_count: TBD 192 * @tx_filtered_count: TBD
196 * @tx_retry_failed: TBD 193 * @tx_retry_failed: TBD
197 * @tx_retry_count: TBD 194 * @tx_retry_count: TBD
198 * @tx_num_consecutive_failures: TBD
199 * @tx_num_mpdu_ok: TBD
200 * @tx_num_mpdu_fail: TBD
201 * @fail_avg: moving percentage of failed MSDUs 195 * @fail_avg: moving percentage of failed MSDUs
202 * @tx_packets: number of RX/TX MSDUs 196 * @tx_packets: number of RX/TX MSDUs
203 * @tx_bytes: TBD 197 * @tx_bytes: TBD
204 * @tx_fragments: number of transmitted MPDUs 198 * @tx_fragments: number of transmitted MPDUs
205 * @txrate_idx: TBD 199 * @last_txrate_idx: Index of the last used transmit rate
206 * @last_txrate_idx: TBD 200 * @tid_seq: TBD
207 * @wme_tx_queue: TBD
208 * @ampdu_mlme: TBD 201 * @ampdu_mlme: TBD
209 * @timer_to_tid: identity mapping to ID timers 202 * @timer_to_tid: identity mapping to ID timers
210 * @tid_to_tx_q: map tid to tx queue 203 * @tid_to_tx_q: map tid to tx queue
@@ -217,6 +210,7 @@ struct sta_ampdu_mlme {
217 * @plink_timeout: TBD 210 * @plink_timeout: TBD
218 * @plink_timer: TBD 211 * @plink_timer: TBD
219 * @debugfs: debug filesystem info 212 * @debugfs: debug filesystem info
213 * @sta: station information we share with the driver
220 */ 214 */
221struct sta_info { 215struct sta_info {
222 /* General information, mostly static */ 216 /* General information, mostly static */
@@ -229,10 +223,7 @@ struct sta_info {
229 void *rate_ctrl_priv; 223 void *rate_ctrl_priv;
230 spinlock_t lock; 224 spinlock_t lock;
231 spinlock_t flaglock; 225 spinlock_t flaglock;
232 struct ieee80211_ht_info ht_info; 226
233 u64 supp_rates[IEEE80211_NUM_BANDS];
234 u8 addr[ETH_ALEN];
235 u16 aid;
236 u16 listen_interval; 227 u16 listen_interval;
237 228
238 /* 229 /*
@@ -265,17 +256,10 @@ struct sta_info {
265 int last_qual; 256 int last_qual;
266 int last_noise; 257 int last_noise;
267 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; 258 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
268#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
269 unsigned int wme_rx_queue[NUM_RX_DATA_QUEUES];
270#endif
271 259
272 /* Updated from TX status path only, no locking requirements */ 260 /* Updated from TX status path only, no locking requirements */
273 unsigned long tx_filtered_count; 261 unsigned long tx_filtered_count;
274 unsigned long tx_retry_failed, tx_retry_count; 262 unsigned long tx_retry_failed, tx_retry_count;
275 /* TODO: update in generic code not rate control? */
276 u32 tx_num_consecutive_failures;
277 u32 tx_num_mpdu_ok;
278 u32 tx_num_mpdu_fail;
279 /* moving percentage of failed MSDUs */ 263 /* moving percentage of failed MSDUs */
280 unsigned int fail_avg; 264 unsigned int fail_avg;
281 265
@@ -283,12 +267,8 @@ struct sta_info {
283 unsigned long tx_packets; 267 unsigned long tx_packets;
284 unsigned long tx_bytes; 268 unsigned long tx_bytes;
285 unsigned long tx_fragments; 269 unsigned long tx_fragments;
286 int txrate_idx; 270 unsigned int last_txrate_idx;
287 int last_txrate_idx;
288 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; 271 u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
289#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
290 unsigned int wme_tx_queue[NUM_RX_DATA_QUEUES];
291#endif
292 272
293 /* 273 /*
294 * Aggregation information, locked with lock. 274 * Aggregation information, locked with lock.
@@ -319,13 +299,12 @@ struct sta_info {
319 struct dentry *num_ps_buf_frames; 299 struct dentry *num_ps_buf_frames;
320 struct dentry *inactive_ms; 300 struct dentry *inactive_ms;
321 struct dentry *last_seq_ctrl; 301 struct dentry *last_seq_ctrl;
322#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
323 struct dentry *wme_rx_queue;
324 struct dentry *wme_tx_queue;
325#endif
326 struct dentry *agg_status; 302 struct dentry *agg_status;
327 } debugfs; 303 } debugfs;
328#endif 304#endif
305
306 /* keep last! */
307 struct ieee80211_sta sta;
329}; 308};
330 309
331static inline enum plink_state sta_plink_state(struct sta_info *sta) 310static inline enum plink_state sta_plink_state(struct sta_info *sta)
@@ -425,7 +404,7 @@ static inline u32 get_sta_flags(struct sta_info *sta)
425/* 404/*
426 * Get a STA info, must have be under RCU read lock. 405 * Get a STA info, must have be under RCU read lock.
427 */ 406 */
428struct sta_info *sta_info_get(struct ieee80211_local *local, u8 *addr); 407struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr);
429/* 408/*
430 * Get STA info by index, BROKEN! 409 * Get STA info by index, BROKEN!
431 */ 410 */
@@ -451,7 +430,6 @@ int sta_info_insert(struct sta_info *sta);
451 * has already unlinked it. 430 * has already unlinked it.
452 */ 431 */
453void sta_info_unlink(struct sta_info **sta); 432void sta_info_unlink(struct sta_info **sta);
454void __sta_info_unlink(struct sta_info **sta);
455 433
456void sta_info_destroy(struct sta_info *sta); 434void sta_info_destroy(struct sta_info *sta);
457void sta_info_set_tim_bit(struct sta_info *sta); 435void sta_info_set_tim_bit(struct sta_info *sta);
@@ -463,5 +441,7 @@ void sta_info_stop(struct ieee80211_local *local);
463int sta_info_flush(struct ieee80211_local *local, 441int sta_info_flush(struct ieee80211_local *local,
464 struct ieee80211_sub_if_data *sdata); 442 struct ieee80211_sub_if_data *sdata);
465void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata); 443void sta_info_flush_delayed(struct ieee80211_sub_if_data *sdata);
444void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
445 unsigned long exp_time);
466 446
467#endif /* STA_INFO_H */ 447#endif /* STA_INFO_H */
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 995f7af3d25e..34b32bc8f609 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -304,7 +304,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
304 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { 304 key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
305 u8 bcast[ETH_ALEN] = 305 u8 bcast[ETH_ALEN] =
306 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 306 {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
307 u8 *sta_addr = key->sta->addr; 307 u8 *sta_addr = key->sta->sta.addr;
308 308
309 if (is_multicast_ether_addr(ra)) 309 if (is_multicast_ether_addr(ra))
310 sta_addr = bcast; 310 sta_addr = bcast;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4788f7b91f49..0cc2e23f082c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -38,43 +38,6 @@
38 38
39/* misc utils */ 39/* misc utils */
40 40
41#ifdef CONFIG_MAC80211_LOWTX_FRAME_DUMP
42static void ieee80211_dump_frame(const char *ifname, const char *title,
43 const struct sk_buff *skb)
44{
45 const struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
46 unsigned int hdrlen;
47 DECLARE_MAC_BUF(mac);
48
49 printk(KERN_DEBUG "%s: %s (len=%d)", ifname, title, skb->len);
50 if (skb->len < 4) {
51 printk("\n");
52 return;
53 }
54
55 hdrlen = ieee80211_hdrlen(hdr->frame_control);
56 if (hdrlen > skb->len)
57 hdrlen = skb->len;
58 if (hdrlen >= 4)
59 printk(" FC=0x%04x DUR=0x%04x",
60 le16_to_cpu(hdr->frame_control), le16_to_cpu(hdr->duration_id));
61 if (hdrlen >= 10)
62 printk(" A1=%s", print_mac(mac, hdr->addr1));
63 if (hdrlen >= 16)
64 printk(" A2=%s", print_mac(mac, hdr->addr2));
65 if (hdrlen >= 24)
66 printk(" A3=%s", print_mac(mac, hdr->addr3));
67 if (hdrlen >= 30)
68 printk(" A4=%s", print_mac(mac, hdr->addr4));
69 printk("\n");
70}
71#else /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
72static inline void ieee80211_dump_frame(const char *ifname, const char *title,
73 struct sk_buff *skb)
74{
75}
76#endif /* CONFIG_MAC80211_LOWTX_FRAME_DUMP */
77
78static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, 41static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
79 int next_frag_len) 42 int next_frag_len)
80{ 43{
@@ -82,6 +45,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
82 struct ieee80211_rate *txrate; 45 struct ieee80211_rate *txrate;
83 struct ieee80211_local *local = tx->local; 46 struct ieee80211_local *local = tx->local;
84 struct ieee80211_supported_band *sband; 47 struct ieee80211_supported_band *sband;
48 struct ieee80211_hdr *hdr;
85 49
86 sband = local->hw.wiphy->bands[tx->channel->band]; 50 sband = local->hw.wiphy->bands[tx->channel->band];
87 txrate = &sband->bitrates[tx->rate_idx]; 51 txrate = &sband->bitrates[tx->rate_idx];
@@ -107,10 +71,10 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
107 * at the highest possible rate belonging to the PHY rates in the 71 * at the highest possible rate belonging to the PHY rates in the
108 * BSSBasicRateSet 72 * BSSBasicRateSet
109 */ 73 */
110 74 hdr = (struct ieee80211_hdr *)tx->skb->data;
111 if ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL) { 75 if (ieee80211_is_ctl(hdr->frame_control)) {
112 /* TODO: These control frames are not currently sent by 76 /* TODO: These control frames are not currently sent by
113 * 80211.o, but should they be implemented, this function 77 * mac80211, but should they be implemented, this function
114 * needs to be updated to support duration field calculation. 78 * needs to be updated to support duration field calculation.
115 * 79 *
116 * RTS: time needed to transmit pending data/mgmt frame plus 80 * RTS: time needed to transmit pending data/mgmt frame plus
@@ -152,7 +116,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
152 if (r->bitrate > txrate->bitrate) 116 if (r->bitrate > txrate->bitrate)
153 break; 117 break;
154 118
155 if (tx->sdata->basic_rates & BIT(i)) 119 if (tx->sdata->bss_conf.basic_rates & BIT(i))
156 rate = r->bitrate; 120 rate = r->bitrate;
157 121
158 switch (sband->band) { 122 switch (sband->band) {
@@ -201,11 +165,10 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
201 return cpu_to_le16(dur); 165 return cpu_to_le16(dur);
202} 166}
203 167
204static int inline is_ieee80211_device(struct net_device *dev, 168static int inline is_ieee80211_device(struct ieee80211_local *local,
205 struct net_device *master) 169 struct net_device *dev)
206{ 170{
207 return (wdev_priv(dev->ieee80211_ptr) == 171 return local == wdev_priv(dev->ieee80211_ptr);
208 wdev_priv(master->ieee80211_ptr));
209} 172}
210 173
211/* tx handlers */ 174/* tx handlers */
@@ -213,21 +176,19 @@ static int inline is_ieee80211_device(struct net_device *dev,
213static ieee80211_tx_result debug_noinline 176static ieee80211_tx_result debug_noinline
214ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) 177ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
215{ 178{
216#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 179
217 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 180 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
218#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 181 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
220 u32 sta_flags; 182 u32 sta_flags;
221 183
222 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) 184 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
223 return TX_CONTINUE; 185 return TX_CONTINUE;
224 186
225 if (unlikely(tx->local->sta_sw_scanning) && 187 if (unlikely(tx->local->sw_scanning) &&
226 ((tx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 188 !ieee80211_is_probe_req(hdr->frame_control))
227 (tx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PROBE_REQ))
228 return TX_DROP; 189 return TX_DROP;
229 190
230 if (tx->sdata->vif.type == IEEE80211_IF_TYPE_MESH_POINT) 191 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
231 return TX_CONTINUE; 192 return TX_CONTINUE;
232 193
233 if (tx->flags & IEEE80211_TX_PS_BUFFERED) 194 if (tx->flags & IEEE80211_TX_PS_BUFFERED)
@@ -237,8 +198,8 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
237 198
238 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { 199 if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
239 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) && 200 if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
240 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 201 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
241 (tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { 202 ieee80211_is_data(hdr->frame_control))) {
242#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 203#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
243 DECLARE_MAC_BUF(mac); 204 DECLARE_MAC_BUF(mac);
244 printk(KERN_DEBUG "%s: dropped data frame to not " 205 printk(KERN_DEBUG "%s: dropped data frame to not "
@@ -249,9 +210,9 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
249 return TX_DROP; 210 return TX_DROP;
250 } 211 }
251 } else { 212 } else {
252 if (unlikely((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 213 if (unlikely(ieee80211_is_data(hdr->frame_control) &&
253 tx->local->num_sta == 0 && 214 tx->local->num_sta == 0 &&
254 tx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS)) { 215 tx->sdata->vif.type != NL80211_IFTYPE_ADHOC)) {
255 /* 216 /*
256 * No associated STAs - no need to send multicast 217 * No associated STAs - no need to send multicast
257 * frames. 218 * frames.
@@ -282,7 +243,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
282 243
283 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 244 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
284 struct ieee80211_if_ap *ap; 245 struct ieee80211_if_ap *ap;
285 if (sdata->vif.type != IEEE80211_IF_TYPE_AP) 246 if (sdata->vif.type != NL80211_IFTYPE_AP)
286 continue; 247 continue;
287 ap = &sdata->u.ap; 248 ap = &sdata->u.ap;
288 skb = skb_dequeue(&ap->ps_bc_buf); 249 skb = skb_dequeue(&ap->ps_bc_buf);
@@ -315,6 +276,7 @@ static ieee80211_tx_result
315ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) 276ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
316{ 277{
317 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 278 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
279 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
318 280
319 /* 281 /*
320 * broadcast/multicast frame 282 * broadcast/multicast frame
@@ -329,7 +291,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
329 return TX_CONTINUE; 291 return TX_CONTINUE;
330 292
331 /* no buffering for ordered frames */ 293 /* no buffering for ordered frames */
332 if (tx->fc & IEEE80211_FCTL_ORDER) 294 if (ieee80211_has_order(hdr->frame_control))
333 return TX_CONTINUE; 295 return TX_CONTINUE;
334 296
335 /* no stations in PS mode */ 297 /* no stations in PS mode */
@@ -367,12 +329,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
367{ 329{
368 struct sta_info *sta = tx->sta; 330 struct sta_info *sta = tx->sta;
369 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 331 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
332 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
370 u32 staflags; 333 u32 staflags;
371 DECLARE_MAC_BUF(mac); 334 DECLARE_MAC_BUF(mac);
372 335
373 if (unlikely(!sta || 336 if (unlikely(!sta || ieee80211_is_probe_resp(hdr->frame_control)))
374 ((tx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
375 (tx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP)))
376 return TX_CONTINUE; 337 return TX_CONTINUE;
377 338
378 staflags = get_sta_flags(sta); 339 staflags = get_sta_flags(sta);
@@ -382,7 +343,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
382#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 343#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
383 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries " 344 printk(KERN_DEBUG "STA %s aid %d: PS buffer (entries "
384 "before %d)\n", 345 "before %d)\n",
385 print_mac(mac, sta->addr), sta->aid, 346 print_mac(mac, sta->sta.addr), sta->sta.aid,
386 skb_queue_len(&sta->ps_tx_buf)); 347 skb_queue_len(&sta->ps_tx_buf));
387#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 348#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
388 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) 349 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -393,7 +354,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
393 if (net_ratelimit()) { 354 if (net_ratelimit()) {
394 printk(KERN_DEBUG "%s: STA %s TX " 355 printk(KERN_DEBUG "%s: STA %s TX "
395 "buffer full - dropping oldest frame\n", 356 "buffer full - dropping oldest frame\n",
396 tx->dev->name, print_mac(mac, sta->addr)); 357 tx->dev->name, print_mac(mac, sta->sta.addr));
397 } 358 }
398#endif 359#endif
399 dev_kfree_skb(old); 360 dev_kfree_skb(old);
@@ -412,7 +373,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
412 else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) { 373 else if (unlikely(test_sta_flags(sta, WLAN_STA_PS))) {
413 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll " 374 printk(KERN_DEBUG "%s: STA %s in PS mode, but pspoll "
414 "set -> send frame\n", tx->dev->name, 375 "set -> send frame\n", tx->dev->name,
415 print_mac(mac, sta->addr)); 376 print_mac(mac, sta->sta.addr));
416 } 377 }
417#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 378#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
418 clear_sta_flags(sta, WLAN_STA_PSPOLL); 379 clear_sta_flags(sta, WLAN_STA_PSPOLL);
@@ -437,7 +398,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
437{ 398{
438 struct ieee80211_key *key; 399 struct ieee80211_key *key;
439 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 400 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
440 u16 fc = tx->fc; 401 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
441 402
442 if (unlikely(tx->skb->do_not_encrypt)) 403 if (unlikely(tx->skb->do_not_encrypt))
443 tx->key = NULL; 404 tx->key = NULL;
@@ -454,22 +415,16 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
454 tx->key = NULL; 415 tx->key = NULL;
455 416
456 if (tx->key) { 417 if (tx->key) {
457 u16 ftype, stype;
458
459 tx->key->tx_rx_count++; 418 tx->key->tx_rx_count++;
460 /* TODO: add threshold stuff again */ 419 /* TODO: add threshold stuff again */
461 420
462 switch (tx->key->conf.alg) { 421 switch (tx->key->conf.alg) {
463 case ALG_WEP: 422 case ALG_WEP:
464 ftype = fc & IEEE80211_FCTL_FTYPE; 423 if (ieee80211_is_auth(hdr->frame_control))
465 stype = fc & IEEE80211_FCTL_STYPE;
466
467 if (ftype == IEEE80211_FTYPE_MGMT &&
468 stype == IEEE80211_STYPE_AUTH)
469 break; 424 break;
470 case ALG_TKIP: 425 case ALG_TKIP:
471 case ALG_CCMP: 426 case ALG_CCMP:
472 if (!WLAN_FC_DATA_PRESENT(fc)) 427 if (!ieee80211_is_data_present(hdr->frame_control))
473 tx->key = NULL; 428 tx->key = NULL;
474 break; 429 break;
475 } 430 }
@@ -491,7 +446,10 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
491 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 446 sband = tx->local->hw.wiphy->bands[tx->channel->band];
492 447
493 if (likely(tx->rate_idx < 0)) { 448 if (likely(tx->rate_idx < 0)) {
494 rate_control_get_rate(tx->dev, sband, tx->skb, &rsel); 449 rate_control_get_rate(tx->sdata, sband, tx->sta,
450 tx->skb, &rsel);
451 if (tx->sta)
452 tx->sta->last_txrate_idx = rsel.rate_idx;
495 tx->rate_idx = rsel.rate_idx; 453 tx->rate_idx = rsel.rate_idx;
496 if (unlikely(rsel.probe_idx >= 0)) { 454 if (unlikely(rsel.probe_idx >= 0)) {
497 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; 455 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
@@ -535,7 +493,7 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
535 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 493 sband = tx->local->hw.wiphy->bands[tx->channel->band];
536 494
537 if (tx->sta) 495 if (tx->sta)
538 info->control.aid = tx->sta->aid; 496 info->control.sta = &tx->sta->sta;
539 497
540 if (!info->control.retry_limit) { 498 if (!info->control.retry_limit) {
541 if (!is_multicast_ether_addr(hdr->addr1)) { 499 if (!is_multicast_ether_addr(hdr->addr1)) {
@@ -601,7 +559,7 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
601 for (idx = 0; idx < sband->n_bitrates; idx++) { 559 for (idx = 0; idx < sband->n_bitrates; idx++) {
602 if (sband->bitrates[idx].bitrate > rate->bitrate) 560 if (sband->bitrates[idx].bitrate > rate->bitrate)
603 continue; 561 continue;
604 if (tx->sdata->basic_rates & BIT(idx) && 562 if (tx->sdata->bss_conf.basic_rates & BIT(idx) &&
605 (baserate < 0 || 563 (baserate < 0 ||
606 (sband->bitrates[baserate].bitrate 564 (sband->bitrates[baserate].bitrate
607 < sband->bitrates[idx].bitrate))) 565 < sband->bitrates[idx].bitrate)))
@@ -615,7 +573,7 @@ ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
615 } 573 }
616 574
617 if (tx->sta) 575 if (tx->sta)
618 info->control.aid = tx->sta->aid; 576 info->control.sta = &tx->sta->sta;
619 577
620 return TX_CONTINUE; 578 return TX_CONTINUE;
621} 579}
@@ -629,7 +587,14 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
629 u8 *qc; 587 u8 *qc;
630 int tid; 588 int tid;
631 589
632 /* only for injected frames */ 590 /*
591 * Packet injection may want to control the sequence
592 * number, if we have no matching interface then we
593 * neither assign one ourselves nor ask the driver to.
594 */
595 if (unlikely(!info->control.vif))
596 return TX_CONTINUE;
597
633 if (unlikely(ieee80211_is_ctl(hdr->frame_control))) 598 if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
634 return TX_CONTINUE; 599 return TX_CONTINUE;
635 600
@@ -854,7 +819,6 @@ __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
854 sband = tx->local->hw.wiphy->bands[tx->channel->band]; 819 sband = tx->local->hw.wiphy->bands[tx->channel->band];
855 820
856 skb->do_not_encrypt = 1; 821 skb->do_not_encrypt = 1;
857 info->flags |= IEEE80211_TX_CTL_INJECTED;
858 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 822 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
859 823
860 /* 824 /*
@@ -986,7 +950,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
986 950
987 /* process and remove the injection radiotap header */ 951 /* process and remove the injection radiotap header */
988 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 952 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
989 if (unlikely(sdata->vif.type == IEEE80211_IF_TYPE_MNTR)) { 953 if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) {
990 if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP) 954 if (__ieee80211_parse_tx_radiotap(tx, skb) == TX_DROP)
991 return TX_DROP; 955 return TX_DROP;
992 956
@@ -1000,7 +964,6 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1000 hdr = (struct ieee80211_hdr *) skb->data; 964 hdr = (struct ieee80211_hdr *) skb->data;
1001 965
1002 tx->sta = sta_info_get(local, hdr->addr1); 966 tx->sta = sta_info_get(local, hdr->addr1);
1003 tx->fc = le16_to_cpu(hdr->frame_control);
1004 967
1005 if (is_multicast_ether_addr(hdr->addr1)) { 968 if (is_multicast_ether_addr(hdr->addr1)) {
1006 tx->flags &= ~IEEE80211_TX_UNICAST; 969 tx->flags &= ~IEEE80211_TX_UNICAST;
@@ -1025,7 +988,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1025 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT)) 988 else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
1026 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; 989 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1027 990
1028 hdrlen = ieee80211_get_hdrlen(tx->fc); 991 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1029 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) { 992 if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
1030 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)]; 993 u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
1031 tx->ethertype = (pos[0] << 8) | pos[1]; 994 tx->ethertype = (pos[0] << 8) | pos[1];
@@ -1038,14 +1001,14 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1038/* 1001/*
1039 * NB: @tx is uninitialised when passed in here 1002 * NB: @tx is uninitialised when passed in here
1040 */ 1003 */
1041static int ieee80211_tx_prepare(struct ieee80211_tx_data *tx, 1004static int ieee80211_tx_prepare(struct ieee80211_local *local,
1042 struct sk_buff *skb, 1005 struct ieee80211_tx_data *tx,
1043 struct net_device *mdev) 1006 struct sk_buff *skb)
1044{ 1007{
1045 struct net_device *dev; 1008 struct net_device *dev;
1046 1009
1047 dev = dev_get_by_index(&init_net, skb->iif); 1010 dev = dev_get_by_index(&init_net, skb->iif);
1048 if (unlikely(dev && !is_ieee80211_device(dev, mdev))) { 1011 if (unlikely(dev && !is_ieee80211_device(local, dev))) {
1049 dev_put(dev); 1012 dev_put(dev);
1050 dev = NULL; 1013 dev = NULL;
1051 } 1014 }
@@ -1068,8 +1031,6 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1068 return IEEE80211_TX_AGAIN; 1031 return IEEE80211_TX_AGAIN;
1069 info = IEEE80211_SKB_CB(skb); 1032 info = IEEE80211_SKB_CB(skb);
1070 1033
1071 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1072 "TX to low-level driver", skb);
1073 ret = local->ops->tx(local_to_hw(local), skb); 1034 ret = local->ops->tx(local_to_hw(local), skb);
1074 if (ret) 1035 if (ret)
1075 return IEEE80211_TX_AGAIN; 1036 return IEEE80211_TX_AGAIN;
@@ -1099,9 +1060,6 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1099 ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; 1060 ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
1100 } 1061 }
1101 1062
1102 ieee80211_dump_frame(wiphy_name(local->hw.wiphy),
1103 "TX to low-level driver",
1104 tx->extra_frag[i]);
1105 ret = local->ops->tx(local_to_hw(local), 1063 ret = local->ops->tx(local_to_hw(local),
1106 tx->extra_frag[i]); 1064 tx->extra_frag[i]);
1107 if (ret) 1065 if (ret)
@@ -1300,17 +1258,24 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1300int ieee80211_master_start_xmit(struct sk_buff *skb, 1258int ieee80211_master_start_xmit(struct sk_buff *skb,
1301 struct net_device *dev) 1259 struct net_device *dev)
1302{ 1260{
1261 struct ieee80211_master_priv *mpriv = netdev_priv(dev);
1262 struct ieee80211_local *local = mpriv->local;
1303 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1263 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1304 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1264 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1305 struct net_device *odev = NULL; 1265 struct net_device *odev = NULL;
1306 struct ieee80211_sub_if_data *osdata; 1266 struct ieee80211_sub_if_data *osdata;
1307 int headroom; 1267 int headroom;
1308 bool may_encrypt; 1268 bool may_encrypt;
1269 enum {
1270 NOT_MONITOR,
1271 FOUND_SDATA,
1272 UNKNOWN_ADDRESS,
1273 } monitor_iface = NOT_MONITOR;
1309 int ret; 1274 int ret;
1310 1275
1311 if (skb->iif) 1276 if (skb->iif)
1312 odev = dev_get_by_index(&init_net, skb->iif); 1277 odev = dev_get_by_index(&init_net, skb->iif);
1313 if (unlikely(odev && !is_ieee80211_device(odev, dev))) { 1278 if (unlikely(odev && !is_ieee80211_device(local, odev))) {
1314 dev_put(odev); 1279 dev_put(odev);
1315 odev = NULL; 1280 odev = NULL;
1316 } 1281 }
@@ -1335,12 +1300,56 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1335 if (is_multicast_ether_addr(hdr->addr3)) 1300 if (is_multicast_ether_addr(hdr->addr3))
1336 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); 1301 memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
1337 else 1302 else
1338 if (mesh_nexthop_lookup(skb, odev)) 1303 if (mesh_nexthop_lookup(skb, osdata))
1339 return 0; 1304 return 0;
1340 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1305 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
1341 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.sta, 1306 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
1342 fwded_frames); 1307 fwded_frames);
1343 } 1308 }
1309 } else if (unlikely(osdata->vif.type == NL80211_IFTYPE_MONITOR)) {
1310 struct ieee80211_sub_if_data *sdata;
1311 struct ieee80211_local *local = osdata->local;
1312 struct ieee80211_hdr *hdr;
1313 int hdrlen;
1314 u16 len_rthdr;
1315
1316 info->flags |= IEEE80211_TX_CTL_INJECTED;
1317 monitor_iface = UNKNOWN_ADDRESS;
1318
1319 len_rthdr = ieee80211_get_radiotap_len(skb->data);
1320 hdr = (struct ieee80211_hdr *)skb->data + len_rthdr;
1321 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1322
1323 /* check the header is complete in the frame */
1324 if (likely(skb->len >= len_rthdr + hdrlen)) {
1325 /*
1326 * We process outgoing injected frames that have a
1327 * local address we handle as though they are our
1328 * own frames.
1329 * This code here isn't entirely correct, the local
1330 * MAC address is not necessarily enough to find
1331 * the interface to use; for that proper VLAN/WDS
1332 * support we will need a different mechanism.
1333 */
1334
1335 rcu_read_lock();
1336 list_for_each_entry_rcu(sdata, &local->interfaces,
1337 list) {
1338 if (!netif_running(sdata->dev))
1339 continue;
1340 if (compare_ether_addr(sdata->dev->dev_addr,
1341 hdr->addr2)) {
1342 dev_hold(sdata->dev);
1343 dev_put(odev);
1344 osdata = sdata;
1345 odev = osdata->dev;
1346 skb->iif = sdata->dev->ifindex;
1347 monitor_iface = FOUND_SDATA;
1348 break;
1349 }
1350 }
1351 rcu_read_unlock();
1352 }
1344 } 1353 }
1345 1354
1346 may_encrypt = !skb->do_not_encrypt; 1355 may_encrypt = !skb->do_not_encrypt;
@@ -1357,7 +1366,12 @@ int ieee80211_master_start_xmit(struct sk_buff *skb,
1357 return 0; 1366 return 0;
1358 } 1367 }
1359 1368
1360 info->control.vif = &osdata->vif; 1369 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1370 osdata = container_of(osdata->bss,
1371 struct ieee80211_sub_if_data,
1372 u.ap);
1373 if (likely(monitor_iface != UNKNOWN_ADDRESS))
1374 info->control.vif = &osdata->vif;
1361 ret = ieee80211_tx(odev, skb); 1375 ret = ieee80211_tx(odev, skb);
1362 dev_put(odev); 1376 dev_put(odev);
1363 1377
@@ -1437,8 +1451,8 @@ fail:
1437int ieee80211_subif_start_xmit(struct sk_buff *skb, 1451int ieee80211_subif_start_xmit(struct sk_buff *skb,
1438 struct net_device *dev) 1452 struct net_device *dev)
1439{ 1453{
1440 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1454 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1441 struct ieee80211_sub_if_data *sdata; 1455 struct ieee80211_local *local = sdata->local;
1442 int ret = 1, head_need; 1456 int ret = 1, head_need;
1443 u16 ethertype, hdrlen, meshhdrlen = 0; 1457 u16 ethertype, hdrlen, meshhdrlen = 0;
1444 __le16 fc; 1458 __le16 fc;
@@ -1450,7 +1464,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1450 struct sta_info *sta; 1464 struct sta_info *sta;
1451 u32 sta_flags = 0; 1465 u32 sta_flags = 0;
1452 1466
1453 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1454 if (unlikely(skb->len < ETH_HLEN)) { 1467 if (unlikely(skb->len < ETH_HLEN)) {
1455 ret = 0; 1468 ret = 0;
1456 goto fail; 1469 goto fail;
@@ -1465,8 +1478,8 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1465 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); 1478 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
1466 1479
1467 switch (sdata->vif.type) { 1480 switch (sdata->vif.type) {
1468 case IEEE80211_IF_TYPE_AP: 1481 case NL80211_IFTYPE_AP:
1469 case IEEE80211_IF_TYPE_VLAN: 1482 case NL80211_IFTYPE_AP_VLAN:
1470 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); 1483 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
1471 /* DA BSSID SA */ 1484 /* DA BSSID SA */
1472 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1485 memcpy(hdr.addr1, skb->data, ETH_ALEN);
@@ -1474,7 +1487,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1474 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN); 1487 memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
1475 hdrlen = 24; 1488 hdrlen = 24;
1476 break; 1489 break;
1477 case IEEE80211_IF_TYPE_WDS: 1490 case NL80211_IFTYPE_WDS:
1478 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1491 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1479 /* RA TA DA SA */ 1492 /* RA TA DA SA */
1480 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN); 1493 memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
@@ -1484,24 +1497,56 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1484 hdrlen = 30; 1497 hdrlen = 30;
1485 break; 1498 break;
1486#ifdef CONFIG_MAC80211_MESH 1499#ifdef CONFIG_MAC80211_MESH
1487 case IEEE80211_IF_TYPE_MESH_POINT: 1500 case NL80211_IFTYPE_MESH_POINT:
1488 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); 1501 fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
1489 /* RA TA DA SA */ 1502 if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
1490 memset(hdr.addr1, 0, ETH_ALEN);
1491 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1492 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1493 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1494 if (!sdata->u.sta.mshcfg.dot11MeshTTL) {
1495 /* Do not send frames with mesh_ttl == 0 */ 1503 /* Do not send frames with mesh_ttl == 0 */
1496 sdata->u.sta.mshstats.dropped_frames_ttl++; 1504 sdata->u.mesh.mshstats.dropped_frames_ttl++;
1497 ret = 0; 1505 ret = 0;
1498 goto fail; 1506 goto fail;
1499 } 1507 }
1500 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata); 1508 memset(&mesh_hdr, 0, sizeof(mesh_hdr));
1509
1510 if (compare_ether_addr(dev->dev_addr,
1511 skb->data + ETH_ALEN) == 0) {
1512 /* RA TA DA SA */
1513 memset(hdr.addr1, 0, ETH_ALEN);
1514 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1515 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1516 memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
1517 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata);
1518 } else {
1519 /* packet from other interface */
1520 struct mesh_path *mppath;
1521
1522 memset(hdr.addr1, 0, ETH_ALEN);
1523 memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
1524 memcpy(hdr.addr4, dev->dev_addr, ETH_ALEN);
1525
1526 if (is_multicast_ether_addr(skb->data))
1527 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1528 else {
1529 rcu_read_lock();
1530 mppath = mpp_path_lookup(skb->data, sdata);
1531 if (mppath)
1532 memcpy(hdr.addr3, mppath->mpp, ETH_ALEN);
1533 else
1534 memset(hdr.addr3, 0xff, ETH_ALEN);
1535 rcu_read_unlock();
1536 }
1537
1538 mesh_hdr.flags |= MESH_FLAGS_AE_A5_A6;
1539 mesh_hdr.ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
1540 put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &mesh_hdr.seqnum);
1541 memcpy(mesh_hdr.eaddr1, skb->data, ETH_ALEN);
1542 memcpy(mesh_hdr.eaddr2, skb->data + ETH_ALEN, ETH_ALEN);
1543 sdata->u.mesh.mesh_seqnum++;
1544 meshhdrlen = 18;
1545 }
1501 hdrlen = 30; 1546 hdrlen = 30;
1502 break; 1547 break;
1503#endif 1548#endif
1504 case IEEE80211_IF_TYPE_STA: 1549 case NL80211_IFTYPE_STATION:
1505 fc |= cpu_to_le16(IEEE80211_FCTL_TODS); 1550 fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
1506 /* BSSID SA DA */ 1551 /* BSSID SA DA */
1507 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN); 1552 memcpy(hdr.addr1, sdata->u.sta.bssid, ETH_ALEN);
@@ -1509,7 +1554,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1509 memcpy(hdr.addr3, skb->data, ETH_ALEN); 1554 memcpy(hdr.addr3, skb->data, ETH_ALEN);
1510 hdrlen = 24; 1555 hdrlen = 24;
1511 break; 1556 break;
1512 case IEEE80211_IF_TYPE_IBSS: 1557 case NL80211_IFTYPE_ADHOC:
1513 /* DA SA BSSID */ 1558 /* DA SA BSSID */
1514 memcpy(hdr.addr1, skb->data, ETH_ALEN); 1559 memcpy(hdr.addr1, skb->data, ETH_ALEN);
1515 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); 1560 memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1588,19 +1633,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1588 nh_pos -= skip_header_bytes; 1633 nh_pos -= skip_header_bytes;
1589 h_pos -= skip_header_bytes; 1634 h_pos -= skip_header_bytes;
1590 1635
1591 /* TODO: implement support for fragments so that there is no need to
1592 * reallocate and copy payload; it might be enough to support one
1593 * extra fragment that would be copied in the beginning of the frame
1594 * data.. anyway, it would be nice to include this into skb structure
1595 * somehow
1596 *
1597 * There are few options for this:
1598 * use skb->cb as an extra space for 802.11 header
1599 * allocate new buffer if not enough headroom
1600 * make sure that there is enough headroom in every skb by increasing
1601 * build in headroom in __dev_alloc_skb() (linux/skbuff.h) and
1602 * alloc_skb() (net/core/skbuff.c)
1603 */
1604 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb); 1636 head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
1605 1637
1606 /* 1638 /*
@@ -1823,10 +1855,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1823 struct rate_selection rsel; 1855 struct rate_selection rsel;
1824 struct beacon_data *beacon; 1856 struct beacon_data *beacon;
1825 struct ieee80211_supported_band *sband; 1857 struct ieee80211_supported_band *sband;
1826 struct ieee80211_mgmt *mgmt;
1827 int *num_beacons;
1828 enum ieee80211_band band = local->hw.conf.channel->band; 1858 enum ieee80211_band band = local->hw.conf.channel->band;
1829 u8 *pos;
1830 1859
1831 sband = local->hw.wiphy->bands[band]; 1860 sband = local->hw.wiphy->bands[band];
1832 1861
@@ -1835,7 +1864,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1835 sdata = vif_to_sdata(vif); 1864 sdata = vif_to_sdata(vif);
1836 bdev = sdata->dev; 1865 bdev = sdata->dev;
1837 1866
1838 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 1867 if (sdata->vif.type == NL80211_IFTYPE_AP) {
1839 ap = &sdata->u.ap; 1868 ap = &sdata->u.ap;
1840 beacon = rcu_dereference(ap->beacon); 1869 beacon = rcu_dereference(ap->beacon);
1841 if (ap && beacon) { 1870 if (ap && beacon) {
@@ -1873,11 +1902,9 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1873 if (beacon->tail) 1902 if (beacon->tail)
1874 memcpy(skb_put(skb, beacon->tail_len), 1903 memcpy(skb_put(skb, beacon->tail_len),
1875 beacon->tail, beacon->tail_len); 1904 beacon->tail, beacon->tail_len);
1876
1877 num_beacons = &ap->num_beacons;
1878 } else 1905 } else
1879 goto out; 1906 goto out;
1880 } else if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 1907 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1881 struct ieee80211_hdr *hdr; 1908 struct ieee80211_hdr *hdr;
1882 ifsta = &sdata->u.sta; 1909 ifsta = &sdata->u.sta;
1883 1910
@@ -1889,11 +1916,13 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1889 goto out; 1916 goto out;
1890 1917
1891 hdr = (struct ieee80211_hdr *) skb->data; 1918 hdr = (struct ieee80211_hdr *) skb->data;
1892 hdr->frame_control = IEEE80211_FC(IEEE80211_FTYPE_MGMT, 1919 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
1893 IEEE80211_STYPE_BEACON); 1920 IEEE80211_STYPE_BEACON);
1894 1921
1895 num_beacons = &ifsta->num_beacons;
1896 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 1922 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
1923 struct ieee80211_mgmt *mgmt;
1924 u8 *pos;
1925
1897 /* headroom, head length, tail length and maximum TIM length */ 1926 /* headroom, head length, tail length and maximum TIM length */
1898 skb = dev_alloc_skb(local->tx_headroom + 400); 1927 skb = dev_alloc_skb(local->tx_headroom + 400);
1899 if (!skb) 1928 if (!skb)
@@ -1916,9 +1945,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1916 *pos++ = WLAN_EID_SSID; 1945 *pos++ = WLAN_EID_SSID;
1917 *pos++ = 0x0; 1946 *pos++ = 0x0;
1918 1947
1919 mesh_mgmt_ies_add(skb, sdata->dev); 1948 mesh_mgmt_ies_add(skb, sdata);
1920
1921 num_beacons = &sdata->u.sta.num_beacons;
1922 } else { 1949 } else {
1923 WARN_ON(1); 1950 WARN_ON(1);
1924 goto out; 1951 goto out;
@@ -1929,7 +1956,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1929 skb->do_not_encrypt = 1; 1956 skb->do_not_encrypt = 1;
1930 1957
1931 info->band = band; 1958 info->band = band;
1932 rate_control_get_rate(local->mdev, sband, skb, &rsel); 1959 rate_control_get_rate(sdata, sband, NULL, skb, &rsel);
1933 1960
1934 if (unlikely(rsel.rate_idx < 0)) { 1961 if (unlikely(rsel.rate_idx < 0)) {
1935 if (net_ratelimit()) { 1962 if (net_ratelimit()) {
@@ -1955,7 +1982,6 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
1955 info->antenna_sel_tx = local->hw.conf.antenna_sel_tx; 1982 info->antenna_sel_tx = local->hw.conf.antenna_sel_tx;
1956 info->control.retry_limit = 1; 1983 info->control.retry_limit = 1;
1957 1984
1958 (*num_beacons)++;
1959out: 1985out:
1960 rcu_read_unlock(); 1986 rcu_read_unlock();
1961 return skb; 1987 return skb;
@@ -2017,7 +2043,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2017 rcu_read_lock(); 2043 rcu_read_lock();
2018 beacon = rcu_dereference(bss->beacon); 2044 beacon = rcu_dereference(bss->beacon);
2019 2045
2020 if (sdata->vif.type != IEEE80211_IF_TYPE_AP || !beacon || !beacon->head) 2046 if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
2021 goto out; 2047 goto out;
2022 2048
2023 if (bss->dtim_count != 0) 2049 if (bss->dtim_count != 0)
@@ -2039,7 +2065,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2039 cpu_to_le16(IEEE80211_FCTL_MOREDATA); 2065 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
2040 } 2066 }
2041 2067
2042 if (!ieee80211_tx_prepare(&tx, skb, local->mdev)) 2068 if (!ieee80211_tx_prepare(local, &tx, skb))
2043 break; 2069 break;
2044 dev_kfree_skb_any(skb); 2070 dev_kfree_skb_any(skb);
2045 } 2071 }
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0d463c80c404..f32561ec224c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -43,7 +43,7 @@ const unsigned char bridge_tunnel_header[] __aligned(2) =
43 43
44 44
45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 45u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
46 enum ieee80211_if_types type) 46 enum nl80211_iftype type)
47{ 47{
48 __le16 fc = hdr->frame_control; 48 __le16 fc = hdr->frame_control;
49 49
@@ -77,10 +77,10 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
77 77
78 if (ieee80211_is_back_req(fc)) { 78 if (ieee80211_is_back_req(fc)) {
79 switch (type) { 79 switch (type) {
80 case IEEE80211_IF_TYPE_STA: 80 case NL80211_IFTYPE_STATION:
81 return hdr->addr2; 81 return hdr->addr2;
82 case IEEE80211_IF_TYPE_AP: 82 case NL80211_IFTYPE_AP:
83 case IEEE80211_IF_TYPE_VLAN: 83 case NL80211_IFTYPE_AP_VLAN:
84 return hdr->addr1; 84 return hdr->addr1;
85 default: 85 default:
86 break; /* fall through to the return */ 86 break; /* fall through to the return */
@@ -91,45 +91,6 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
91 return NULL; 91 return NULL;
92} 92}
93 93
94int ieee80211_get_hdrlen(u16 fc)
95{
96 int hdrlen = 24;
97
98 switch (fc & IEEE80211_FCTL_FTYPE) {
99 case IEEE80211_FTYPE_DATA:
100 if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
101 hdrlen = 30; /* Addr4 */
102 /*
103 * The QoS Control field is two bytes and its presence is
104 * indicated by the IEEE80211_STYPE_QOS_DATA bit. Add 2 to
105 * hdrlen if that bit is set.
106 * This works by masking out the bit and shifting it to
107 * bit position 1 so the result has the value 0 or 2.
108 */
109 hdrlen += (fc & IEEE80211_STYPE_QOS_DATA)
110 >> (ilog2(IEEE80211_STYPE_QOS_DATA)-1);
111 break;
112 case IEEE80211_FTYPE_CTL:
113 /*
114 * ACK and CTS are 10 bytes, all others 16. To see how
115 * to get this condition consider
116 * subtype mask: 0b0000000011110000 (0x00F0)
117 * ACK subtype: 0b0000000011010000 (0x00D0)
118 * CTS subtype: 0b0000000011000000 (0x00C0)
119 * bits that matter: ^^^ (0x00E0)
120 * value of those: 0b0000000011000000 (0x00C0)
121 */
122 if ((fc & 0xE0) == 0xC0)
123 hdrlen = 10;
124 else
125 hdrlen = 16;
126 break;
127 }
128
129 return hdrlen;
130}
131EXPORT_SYMBOL(ieee80211_get_hdrlen);
132
133unsigned int ieee80211_hdrlen(__le16 fc) 94unsigned int ieee80211_hdrlen(__le16 fc)
134{ 95{
135 unsigned int hdrlen = 24; 96 unsigned int hdrlen = 24;
@@ -270,16 +231,21 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
270 struct ieee80211_rate *rate) 231 struct ieee80211_rate *rate)
271{ 232{
272 struct ieee80211_local *local = hw_to_local(hw); 233 struct ieee80211_local *local = hw_to_local(hw);
273 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 234 struct ieee80211_sub_if_data *sdata;
274 u16 dur; 235 u16 dur;
275 int erp; 236 int erp;
237 bool short_preamble = false;
276 238
277 erp = 0; 239 erp = 0;
278 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 240 if (vif) {
279 erp = rate->flags & IEEE80211_RATE_ERP_G; 241 sdata = vif_to_sdata(vif);
242 short_preamble = sdata->bss_conf.use_short_preamble;
243 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
244 erp = rate->flags & IEEE80211_RATE_ERP_G;
245 }
280 246
281 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, 247 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp,
282 sdata->bss_conf.use_short_preamble); 248 short_preamble);
283 249
284 return cpu_to_le16(dur); 250 return cpu_to_le16(dur);
285} 251}
@@ -291,7 +257,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
291{ 257{
292 struct ieee80211_local *local = hw_to_local(hw); 258 struct ieee80211_local *local = hw_to_local(hw);
293 struct ieee80211_rate *rate; 259 struct ieee80211_rate *rate;
294 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 260 struct ieee80211_sub_if_data *sdata;
295 bool short_preamble; 261 bool short_preamble;
296 int erp; 262 int erp;
297 u16 dur; 263 u16 dur;
@@ -299,13 +265,17 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
299 265
300 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 266 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
301 267
302 short_preamble = sdata->bss_conf.use_short_preamble; 268 short_preamble = false;
303 269
304 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; 270 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
305 271
306 erp = 0; 272 erp = 0;
307 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 273 if (vif) {
308 erp = rate->flags & IEEE80211_RATE_ERP_G; 274 sdata = vif_to_sdata(vif);
275 short_preamble = sdata->bss_conf.use_short_preamble;
276 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
277 erp = rate->flags & IEEE80211_RATE_ERP_G;
278 }
309 279
310 /* CTS duration */ 280 /* CTS duration */
311 dur = ieee80211_frame_duration(local, 10, rate->bitrate, 281 dur = ieee80211_frame_duration(local, 10, rate->bitrate,
@@ -328,7 +298,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
328{ 298{
329 struct ieee80211_local *local = hw_to_local(hw); 299 struct ieee80211_local *local = hw_to_local(hw);
330 struct ieee80211_rate *rate; 300 struct ieee80211_rate *rate;
331 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 301 struct ieee80211_sub_if_data *sdata;
332 bool short_preamble; 302 bool short_preamble;
333 int erp; 303 int erp;
334 u16 dur; 304 u16 dur;
@@ -336,12 +306,16 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
336 306
337 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 307 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
338 308
339 short_preamble = sdata->bss_conf.use_short_preamble; 309 short_preamble = false;
340 310
341 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; 311 rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx];
342 erp = 0; 312 erp = 0;
343 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) 313 if (vif) {
344 erp = rate->flags & IEEE80211_RATE_ERP_G; 314 sdata = vif_to_sdata(vif);
315 short_preamble = sdata->bss_conf.use_short_preamble;
316 if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
317 erp = rate->flags & IEEE80211_RATE_ERP_G;
318 }
345 319
346 /* Data frame duration */ 320 /* Data frame duration */
347 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, 321 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate,
@@ -386,6 +360,13 @@ void ieee80211_stop_queues(struct ieee80211_hw *hw)
386} 360}
387EXPORT_SYMBOL(ieee80211_stop_queues); 361EXPORT_SYMBOL(ieee80211_stop_queues);
388 362
363int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
364{
365 struct ieee80211_local *local = hw_to_local(hw);
366 return __netif_subqueue_stopped(local->mdev, queue);
367}
368EXPORT_SYMBOL(ieee80211_queue_stopped);
369
389void ieee80211_wake_queues(struct ieee80211_hw *hw) 370void ieee80211_wake_queues(struct ieee80211_hw *hw)
390{ 371{
391 int i; 372 int i;
@@ -408,15 +389,16 @@ void ieee80211_iterate_active_interfaces(
408 389
409 list_for_each_entry(sdata, &local->interfaces, list) { 390 list_for_each_entry(sdata, &local->interfaces, list) {
410 switch (sdata->vif.type) { 391 switch (sdata->vif.type) {
411 case IEEE80211_IF_TYPE_INVALID: 392 case __NL80211_IFTYPE_AFTER_LAST:
412 case IEEE80211_IF_TYPE_MNTR: 393 case NL80211_IFTYPE_UNSPECIFIED:
413 case IEEE80211_IF_TYPE_VLAN: 394 case NL80211_IFTYPE_MONITOR:
395 case NL80211_IFTYPE_AP_VLAN:
414 continue; 396 continue;
415 case IEEE80211_IF_TYPE_AP: 397 case NL80211_IFTYPE_AP:
416 case IEEE80211_IF_TYPE_STA: 398 case NL80211_IFTYPE_STATION:
417 case IEEE80211_IF_TYPE_IBSS: 399 case NL80211_IFTYPE_ADHOC:
418 case IEEE80211_IF_TYPE_WDS: 400 case NL80211_IFTYPE_WDS:
419 case IEEE80211_IF_TYPE_MESH_POINT: 401 case NL80211_IFTYPE_MESH_POINT:
420 break; 402 break;
421 } 403 }
422 if (netif_running(sdata->dev)) 404 if (netif_running(sdata->dev))
@@ -441,15 +423,16 @@ void ieee80211_iterate_active_interfaces_atomic(
441 423
442 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 424 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
443 switch (sdata->vif.type) { 425 switch (sdata->vif.type) {
444 case IEEE80211_IF_TYPE_INVALID: 426 case __NL80211_IFTYPE_AFTER_LAST:
445 case IEEE80211_IF_TYPE_MNTR: 427 case NL80211_IFTYPE_UNSPECIFIED:
446 case IEEE80211_IF_TYPE_VLAN: 428 case NL80211_IFTYPE_MONITOR:
429 case NL80211_IFTYPE_AP_VLAN:
447 continue; 430 continue;
448 case IEEE80211_IF_TYPE_AP: 431 case NL80211_IFTYPE_AP:
449 case IEEE80211_IF_TYPE_STA: 432 case NL80211_IFTYPE_STATION:
450 case IEEE80211_IF_TYPE_IBSS: 433 case NL80211_IFTYPE_ADHOC:
451 case IEEE80211_IF_TYPE_WDS: 434 case NL80211_IFTYPE_WDS:
452 case IEEE80211_IF_TYPE_MESH_POINT: 435 case NL80211_IFTYPE_MESH_POINT:
453 break; 436 break;
454 } 437 }
455 if (netif_running(sdata->dev)) 438 if (netif_running(sdata->dev))
@@ -460,3 +443,243 @@ void ieee80211_iterate_active_interfaces_atomic(
460 rcu_read_unlock(); 443 rcu_read_unlock();
461} 444}
462EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); 445EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
446
447void ieee802_11_parse_elems(u8 *start, size_t len,
448 struct ieee802_11_elems *elems)
449{
450 size_t left = len;
451 u8 *pos = start;
452
453 memset(elems, 0, sizeof(*elems));
454 elems->ie_start = start;
455 elems->total_len = len;
456
457 while (left >= 2) {
458 u8 id, elen;
459
460 id = *pos++;
461 elen = *pos++;
462 left -= 2;
463
464 if (elen > left)
465 return;
466
467 switch (id) {
468 case WLAN_EID_SSID:
469 elems->ssid = pos;
470 elems->ssid_len = elen;
471 break;
472 case WLAN_EID_SUPP_RATES:
473 elems->supp_rates = pos;
474 elems->supp_rates_len = elen;
475 break;
476 case WLAN_EID_FH_PARAMS:
477 elems->fh_params = pos;
478 elems->fh_params_len = elen;
479 break;
480 case WLAN_EID_DS_PARAMS:
481 elems->ds_params = pos;
482 elems->ds_params_len = elen;
483 break;
484 case WLAN_EID_CF_PARAMS:
485 elems->cf_params = pos;
486 elems->cf_params_len = elen;
487 break;
488 case WLAN_EID_TIM:
489 elems->tim = pos;
490 elems->tim_len = elen;
491 break;
492 case WLAN_EID_IBSS_PARAMS:
493 elems->ibss_params = pos;
494 elems->ibss_params_len = elen;
495 break;
496 case WLAN_EID_CHALLENGE:
497 elems->challenge = pos;
498 elems->challenge_len = elen;
499 break;
500 case WLAN_EID_WPA:
501 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
502 pos[2] == 0xf2) {
503 /* Microsoft OUI (00:50:F2) */
504 if (pos[3] == 1) {
505 /* OUI Type 1 - WPA IE */
506 elems->wpa = pos;
507 elems->wpa_len = elen;
508 } else if (elen >= 5 && pos[3] == 2) {
509 if (pos[4] == 0) {
510 elems->wmm_info = pos;
511 elems->wmm_info_len = elen;
512 } else if (pos[4] == 1) {
513 elems->wmm_param = pos;
514 elems->wmm_param_len = elen;
515 }
516 }
517 }
518 break;
519 case WLAN_EID_RSN:
520 elems->rsn = pos;
521 elems->rsn_len = elen;
522 break;
523 case WLAN_EID_ERP_INFO:
524 elems->erp_info = pos;
525 elems->erp_info_len = elen;
526 break;
527 case WLAN_EID_EXT_SUPP_RATES:
528 elems->ext_supp_rates = pos;
529 elems->ext_supp_rates_len = elen;
530 break;
531 case WLAN_EID_HT_CAPABILITY:
532 elems->ht_cap_elem = pos;
533 elems->ht_cap_elem_len = elen;
534 break;
535 case WLAN_EID_HT_EXTRA_INFO:
536 elems->ht_info_elem = pos;
537 elems->ht_info_elem_len = elen;
538 break;
539 case WLAN_EID_MESH_ID:
540 elems->mesh_id = pos;
541 elems->mesh_id_len = elen;
542 break;
543 case WLAN_EID_MESH_CONFIG:
544 elems->mesh_config = pos;
545 elems->mesh_config_len = elen;
546 break;
547 case WLAN_EID_PEER_LINK:
548 elems->peer_link = pos;
549 elems->peer_link_len = elen;
550 break;
551 case WLAN_EID_PREQ:
552 elems->preq = pos;
553 elems->preq_len = elen;
554 break;
555 case WLAN_EID_PREP:
556 elems->prep = pos;
557 elems->prep_len = elen;
558 break;
559 case WLAN_EID_PERR:
560 elems->perr = pos;
561 elems->perr_len = elen;
562 break;
563 case WLAN_EID_CHANNEL_SWITCH:
564 elems->ch_switch_elem = pos;
565 elems->ch_switch_elem_len = elen;
566 break;
567 case WLAN_EID_QUIET:
568 if (!elems->quiet_elem) {
569 elems->quiet_elem = pos;
570 elems->quiet_elem_len = elen;
571 }
572 elems->num_of_quiet_elem++;
573 break;
574 case WLAN_EID_COUNTRY:
575 elems->country_elem = pos;
576 elems->country_elem_len = elen;
577 break;
578 case WLAN_EID_PWR_CONSTRAINT:
579 elems->pwr_constr_elem = pos;
580 elems->pwr_constr_elem_len = elen;
581 break;
582 default:
583 break;
584 }
585
586 left -= elen;
587 pos += elen;
588 }
589}
590
591void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
592{
593 struct ieee80211_local *local = sdata->local;
594 struct ieee80211_tx_queue_params qparam;
595 int i;
596
597 if (!local->ops->conf_tx)
598 return;
599
600 memset(&qparam, 0, sizeof(qparam));
601
602 qparam.aifs = 2;
603
604 if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ &&
605 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE))
606 qparam.cw_min = 31;
607 else
608 qparam.cw_min = 15;
609
610 qparam.cw_max = 1023;
611 qparam.txop = 0;
612
613 for (i = 0; i < local_to_hw(local)->queues; i++)
614 local->ops->conf_tx(local_to_hw(local), i, &qparam);
615}
616
617void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
618 int encrypt)
619{
620 skb->dev = sdata->local->mdev;
621 skb_set_mac_header(skb, 0);
622 skb_set_network_header(skb, 0);
623 skb_set_transport_header(skb, 0);
624
625 skb->iif = sdata->dev->ifindex;
626 skb->do_not_encrypt = !encrypt;
627
628 dev_queue_xmit(skb);
629}
630
631int ieee80211_set_freq(struct ieee80211_sub_if_data *sdata, int freqMHz)
632{
633 int ret = -EINVAL;
634 struct ieee80211_channel *chan;
635 struct ieee80211_local *local = sdata->local;
636
637 chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
638
639 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
640 if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
641 chan->flags & IEEE80211_CHAN_NO_IBSS) {
642 printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
643 "%d MHz\n", sdata->dev->name, chan->center_freq);
644 return ret;
645 }
646 local->oper_channel = chan;
647
648 if (local->sw_scanning || local->hw_scanning)
649 ret = 0;
650 else
651 ret = ieee80211_hw_config(local);
652
653 rate_control_clear(local);
654 }
655
656 return ret;
657}
658
659u64 ieee80211_mandatory_rates(struct ieee80211_local *local,
660 enum ieee80211_band band)
661{
662 struct ieee80211_supported_band *sband;
663 struct ieee80211_rate *bitrates;
664 u64 mandatory_rates;
665 enum ieee80211_rate_flags mandatory_flag;
666 int i;
667
668 sband = local->hw.wiphy->bands[band];
669 if (!sband) {
670 WARN_ON(1);
671 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
672 }
673
674 if (band == IEEE80211_BAND_2GHZ)
675 mandatory_flag = IEEE80211_RATE_MANDATORY_B;
676 else
677 mandatory_flag = IEEE80211_RATE_MANDATORY_A;
678
679 bitrates = sband->bitrates;
680 mandatory_rates = 0;
681 for (i = 0; i < sband->n_bitrates; i++)
682 if (bitrates[i].flags & mandatory_flag)
683 mandatory_rates |= BIT(i);
684 return mandatory_rates;
685}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 5c2bf0a3d4db..376c84987e4f 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -228,11 +228,10 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
228 return -1; 228 return -1;
229 229
230 hdrlen = ieee80211_hdrlen(hdr->frame_control); 230 hdrlen = ieee80211_hdrlen(hdr->frame_control);
231 231 if (skb->len < hdrlen + WEP_IV_LEN + WEP_ICV_LEN)
232 if (skb->len < 8 + hdrlen)
233 return -1; 232 return -1;
234 233
235 len = skb->len - hdrlen - 8; 234 len = skb->len - hdrlen - WEP_IV_LEN - WEP_ICV_LEN;
236 235
237 keyidx = skb->data[hdrlen + 3] >> 6; 236 keyidx = skb->data[hdrlen + 3] >> 6;
238 237
@@ -292,9 +291,10 @@ u8 * ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key)
292ieee80211_rx_result 291ieee80211_rx_result
293ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) 292ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
294{ 293{
295 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && 294 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
296 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 295
297 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) 296 if (!ieee80211_is_data(hdr->frame_control) &&
297 !ieee80211_is_auth(hdr->frame_control))
298 return RX_CONTINUE; 298 return RX_CONTINUE;
299 299
300 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) { 300 if (!(rx->status->flag & RX_FLAG_DECRYPTED)) {
@@ -303,7 +303,7 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
303 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) { 303 } else if (!(rx->status->flag & RX_FLAG_IV_STRIPPED)) {
304 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); 304 ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
305 /* remove ICV */ 305 /* remove ICV */
306 skb_trim(rx->skb, rx->skb->len - 4); 306 skb_trim(rx->skb, rx->skb->len - WEP_ICV_LEN);
307 } 307 }
308 308
309 return RX_CONTINUE; 309 return RX_CONTINUE;
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 34fa8ed1e784..7e0d53abde24 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -27,22 +27,19 @@
27#include "aes_ccm.h" 27#include "aes_ccm.h"
28 28
29 29
30static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr, 30static int ieee80211_set_encryption(struct ieee80211_sub_if_data *sdata, u8 *sta_addr,
31 int idx, int alg, int remove, 31 int idx, int alg, int remove,
32 int set_tx_key, const u8 *_key, 32 int set_tx_key, const u8 *_key,
33 size_t key_len) 33 size_t key_len)
34{ 34{
35 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 35 struct ieee80211_local *local = sdata->local;
36 struct sta_info *sta; 36 struct sta_info *sta;
37 struct ieee80211_key *key; 37 struct ieee80211_key *key;
38 struct ieee80211_sub_if_data *sdata;
39 int err; 38 int err;
40 39
41 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
42
43 if (idx < 0 || idx >= NUM_DEFAULT_KEYS) { 40 if (idx < 0 || idx >= NUM_DEFAULT_KEYS) {
44 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n", 41 printk(KERN_DEBUG "%s: set_encrypt - invalid idx=%d\n",
45 dev->name, idx); 42 sdata->dev->name, idx);
46 return -EINVAL; 43 return -EINVAL;
47 } 44 }
48 45
@@ -125,13 +122,13 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
125 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) 122 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
126 return -EOPNOTSUPP; 123 return -EOPNOTSUPP;
127 124
128 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 125 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
129 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 126 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
130 int ret = ieee80211_sta_set_extra_ie(dev, extra, data->length); 127 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
131 if (ret) 128 if (ret)
132 return ret; 129 return ret;
133 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 130 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
134 ieee80211_sta_req_auth(dev, &sdata->u.sta); 131 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
135 return 0; 132 return 0;
136 } 133 }
137 134
@@ -276,21 +273,21 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev,
276 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 273 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
277 int type; 274 int type;
278 275
279 if (sdata->vif.type == IEEE80211_IF_TYPE_VLAN) 276 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
280 return -EOPNOTSUPP; 277 return -EOPNOTSUPP;
281 278
282 switch (*mode) { 279 switch (*mode) {
283 case IW_MODE_INFRA: 280 case IW_MODE_INFRA:
284 type = IEEE80211_IF_TYPE_STA; 281 type = NL80211_IFTYPE_STATION;
285 break; 282 break;
286 case IW_MODE_ADHOC: 283 case IW_MODE_ADHOC:
287 type = IEEE80211_IF_TYPE_IBSS; 284 type = NL80211_IFTYPE_ADHOC;
288 break; 285 break;
289 case IW_MODE_REPEAT: 286 case IW_MODE_REPEAT:
290 type = IEEE80211_IF_TYPE_WDS; 287 type = NL80211_IFTYPE_WDS;
291 break; 288 break;
292 case IW_MODE_MONITOR: 289 case IW_MODE_MONITOR:
293 type = IEEE80211_IF_TYPE_MNTR; 290 type = NL80211_IFTYPE_MONITOR;
294 break; 291 break;
295 default: 292 default:
296 return -EINVAL; 293 return -EINVAL;
@@ -308,22 +305,22 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev,
308 305
309 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 306 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
310 switch (sdata->vif.type) { 307 switch (sdata->vif.type) {
311 case IEEE80211_IF_TYPE_AP: 308 case NL80211_IFTYPE_AP:
312 *mode = IW_MODE_MASTER; 309 *mode = IW_MODE_MASTER;
313 break; 310 break;
314 case IEEE80211_IF_TYPE_STA: 311 case NL80211_IFTYPE_STATION:
315 *mode = IW_MODE_INFRA; 312 *mode = IW_MODE_INFRA;
316 break; 313 break;
317 case IEEE80211_IF_TYPE_IBSS: 314 case NL80211_IFTYPE_ADHOC:
318 *mode = IW_MODE_ADHOC; 315 *mode = IW_MODE_ADHOC;
319 break; 316 break;
320 case IEEE80211_IF_TYPE_MNTR: 317 case NL80211_IFTYPE_MONITOR:
321 *mode = IW_MODE_MONITOR; 318 *mode = IW_MODE_MONITOR;
322 break; 319 break;
323 case IEEE80211_IF_TYPE_WDS: 320 case NL80211_IFTYPE_WDS:
324 *mode = IW_MODE_REPEAT; 321 *mode = IW_MODE_REPEAT;
325 break; 322 break;
326 case IEEE80211_IF_TYPE_VLAN: 323 case NL80211_IFTYPE_AP_VLAN:
327 *mode = IW_MODE_SECOND; /* FIXME */ 324 *mode = IW_MODE_SECOND; /* FIXME */
328 break; 325 break;
329 default: 326 default:
@@ -333,60 +330,31 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev,
333 return 0; 330 return 0;
334} 331}
335 332
336int ieee80211_set_freq(struct net_device *dev, int freqMHz)
337{
338 int ret = -EINVAL;
339 struct ieee80211_channel *chan;
340 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
341 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
342
343 chan = ieee80211_get_channel(local->hw.wiphy, freqMHz);
344
345 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
346 if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS &&
347 chan->flags & IEEE80211_CHAN_NO_IBSS) {
348 printk(KERN_DEBUG "%s: IBSS not allowed on frequency "
349 "%d MHz\n", dev->name, chan->center_freq);
350 return ret;
351 }
352 local->oper_channel = chan;
353
354 if (local->sta_sw_scanning || local->sta_hw_scanning)
355 ret = 0;
356 else
357 ret = ieee80211_hw_config(local);
358
359 rate_control_clear(local);
360 }
361
362 return ret;
363}
364
365static int ieee80211_ioctl_siwfreq(struct net_device *dev, 333static int ieee80211_ioctl_siwfreq(struct net_device *dev,
366 struct iw_request_info *info, 334 struct iw_request_info *info,
367 struct iw_freq *freq, char *extra) 335 struct iw_freq *freq, char *extra)
368{ 336{
369 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 337 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
370 338
371 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) 339 if (sdata->vif.type == NL80211_IFTYPE_STATION)
372 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL; 340 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL;
373 341
374 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */ 342 /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */
375 if (freq->e == 0) { 343 if (freq->e == 0) {
376 if (freq->m < 0) { 344 if (freq->m < 0) {
377 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) 345 if (sdata->vif.type == NL80211_IFTYPE_STATION)
378 sdata->u.sta.flags |= 346 sdata->u.sta.flags |=
379 IEEE80211_STA_AUTO_CHANNEL_SEL; 347 IEEE80211_STA_AUTO_CHANNEL_SEL;
380 return 0; 348 return 0;
381 } else 349 } else
382 return ieee80211_set_freq(dev, 350 return ieee80211_set_freq(sdata,
383 ieee80211_channel_to_frequency(freq->m)); 351 ieee80211_channel_to_frequency(freq->m));
384 } else { 352 } else {
385 int i, div = 1000000; 353 int i, div = 1000000;
386 for (i = 0; i < freq->e; i++) 354 for (i = 0; i < freq->e; i++)
387 div /= 10; 355 div /= 10;
388 if (div > 0) 356 if (div > 0)
389 return ieee80211_set_freq(dev, freq->m / div); 357 return ieee80211_set_freq(sdata, freq->m / div);
390 else 358 else
391 return -EINVAL; 359 return -EINVAL;
392 } 360 }
@@ -418,8 +386,8 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
418 len--; 386 len--;
419 387
420 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 388 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
421 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 389 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
422 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 390 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
423 int ret; 391 int ret;
424 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { 392 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
425 if (len > IEEE80211_MAX_SSID_LEN) 393 if (len > IEEE80211_MAX_SSID_LEN)
@@ -432,14 +400,14 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
432 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; 400 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
433 else 401 else
434 sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL; 402 sdata->u.sta.flags |= IEEE80211_STA_AUTO_SSID_SEL;
435 ret = ieee80211_sta_set_ssid(dev, ssid, len); 403 ret = ieee80211_sta_set_ssid(sdata, ssid, len);
436 if (ret) 404 if (ret)
437 return ret; 405 return ret;
438 ieee80211_sta_req_auth(dev, &sdata->u.sta); 406 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
439 return 0; 407 return 0;
440 } 408 }
441 409
442 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 410 if (sdata->vif.type == NL80211_IFTYPE_AP) {
443 memcpy(sdata->u.ap.ssid, ssid, len); 411 memcpy(sdata->u.ap.ssid, ssid, len);
444 memset(sdata->u.ap.ssid + len, 0, 412 memset(sdata->u.ap.ssid + len, 0,
445 IEEE80211_MAX_SSID_LEN - len); 413 IEEE80211_MAX_SSID_LEN - len);
@@ -458,9 +426,9 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev,
458 426
459 struct ieee80211_sub_if_data *sdata; 427 struct ieee80211_sub_if_data *sdata;
460 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 428 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
461 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 429 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
462 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 430 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
463 int res = ieee80211_sta_get_ssid(dev, ssid, &len); 431 int res = ieee80211_sta_get_ssid(sdata, ssid, &len);
464 if (res == 0) { 432 if (res == 0) {
465 data->length = len; 433 data->length = len;
466 data->flags = 1; 434 data->flags = 1;
@@ -469,7 +437,7 @@ static int ieee80211_ioctl_giwessid(struct net_device *dev,
469 return res; 437 return res;
470 } 438 }
471 439
472 if (sdata->vif.type == IEEE80211_IF_TYPE_AP) { 440 if (sdata->vif.type == NL80211_IFTYPE_AP) {
473 len = sdata->u.ap.ssid_len; 441 len = sdata->u.ap.ssid_len;
474 if (len > IW_ESSID_MAX_SIZE) 442 if (len > IW_ESSID_MAX_SIZE)
475 len = IW_ESSID_MAX_SIZE; 443 len = IW_ESSID_MAX_SIZE;
@@ -489,8 +457,8 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
489 struct ieee80211_sub_if_data *sdata; 457 struct ieee80211_sub_if_data *sdata;
490 458
491 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 459 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
492 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 460 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
493 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 461 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
494 int ret; 462 int ret;
495 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { 463 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
496 memcpy(sdata->u.sta.bssid, (u8 *) &ap_addr->sa_data, 464 memcpy(sdata->u.sta.bssid, (u8 *) &ap_addr->sa_data,
@@ -504,12 +472,12 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
504 sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL; 472 sdata->u.sta.flags |= IEEE80211_STA_AUTO_BSSID_SEL;
505 else 473 else
506 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 474 sdata->u.sta.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
507 ret = ieee80211_sta_set_bssid(dev, (u8 *) &ap_addr->sa_data); 475 ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
508 if (ret) 476 if (ret)
509 return ret; 477 return ret;
510 ieee80211_sta_req_auth(dev, &sdata->u.sta); 478 ieee80211_sta_req_auth(sdata, &sdata->u.sta);
511 return 0; 479 return 0;
512 } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { 480 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
513 /* 481 /*
514 * If it is necessary to update the WDS peer address 482 * If it is necessary to update the WDS peer address
515 * while the interface is running, then we need to do 483 * while the interface is running, then we need to do
@@ -537,10 +505,10 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
537 struct ieee80211_sub_if_data *sdata; 505 struct ieee80211_sub_if_data *sdata;
538 506
539 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 507 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
540 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 508 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
541 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 509 sdata->vif.type == NL80211_IFTYPE_ADHOC) {
542 if (sdata->u.sta.state == IEEE80211_ASSOCIATED || 510 if (sdata->u.sta.state == IEEE80211_STA_MLME_ASSOCIATED ||
543 sdata->u.sta.state == IEEE80211_IBSS_JOINED) { 511 sdata->u.sta.state == IEEE80211_STA_MLME_IBSS_JOINED) {
544 ap_addr->sa_family = ARPHRD_ETHER; 512 ap_addr->sa_family = ARPHRD_ETHER;
545 memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN); 513 memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN);
546 return 0; 514 return 0;
@@ -548,7 +516,7 @@ static int ieee80211_ioctl_giwap(struct net_device *dev,
548 memset(&ap_addr->sa_data, 0, ETH_ALEN); 516 memset(&ap_addr->sa_data, 0, ETH_ALEN);
549 return 0; 517 return 0;
550 } 518 }
551 } else if (sdata->vif.type == IEEE80211_IF_TYPE_WDS) { 519 } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
552 ap_addr->sa_family = ARPHRD_ETHER; 520 ap_addr->sa_family = ARPHRD_ETHER;
553 memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN); 521 memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN);
554 return 0; 522 return 0;
@@ -570,10 +538,10 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev,
570 if (!netif_running(dev)) 538 if (!netif_running(dev))
571 return -ENETDOWN; 539 return -ENETDOWN;
572 540
573 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 541 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
574 sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 542 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
575 sdata->vif.type != IEEE80211_IF_TYPE_MESH_POINT && 543 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
576 sdata->vif.type != IEEE80211_IF_TYPE_AP) 544 sdata->vif.type != NL80211_IFTYPE_AP)
577 return -EOPNOTSUPP; 545 return -EOPNOTSUPP;
578 546
579 /* if SSID was specified explicitly then use that */ 547 /* if SSID was specified explicitly then use that */
@@ -584,7 +552,7 @@ static int ieee80211_ioctl_siwscan(struct net_device *dev,
584 ssid_len = req->essid_len; 552 ssid_len = req->essid_len;
585 } 553 }
586 554
587 return ieee80211_sta_req_scan(dev, ssid, ssid_len); 555 return ieee80211_request_scan(sdata, ssid, ssid_len);
588} 556}
589 557
590 558
@@ -594,11 +562,14 @@ static int ieee80211_ioctl_giwscan(struct net_device *dev,
594{ 562{
595 int res; 563 int res;
596 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 564 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
565 struct ieee80211_sub_if_data *sdata;
566
567 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
597 568
598 if (local->sta_sw_scanning || local->sta_hw_scanning) 569 if (local->sw_scanning || local->hw_scanning)
599 return -EAGAIN; 570 return -EAGAIN;
600 571
601 res = ieee80211_sta_scan_results(dev, info, extra, data->length); 572 res = ieee80211_scan_results(local, info, extra, data->length);
602 if (res >= 0) { 573 if (res >= 0) {
603 data->length = res; 574 data->length = res;
604 return 0; 575 return 0;
@@ -656,7 +627,7 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev,
656 627
657 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 628 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
658 629
659 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 630 if (sdata->vif.type != NL80211_IFTYPE_STATION)
660 return -EOPNOTSUPP; 631 return -EOPNOTSUPP;
661 632
662 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 633 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -665,8 +636,8 @@ static int ieee80211_ioctl_giwrate(struct net_device *dev,
665 636
666 sta = sta_info_get(local, sdata->u.sta.bssid); 637 sta = sta_info_get(local, sdata->u.sta.bssid);
667 638
668 if (sta && sta->txrate_idx < sband->n_bitrates) 639 if (sta && sta->last_txrate_idx < sband->n_bitrates)
669 rate->value = sband->bitrates[sta->txrate_idx].bitrate; 640 rate->value = sband->bitrates[sta->last_txrate_idx].bitrate;
670 else 641 else
671 rate->value = 0; 642 rate->value = 0;
672 643
@@ -887,17 +858,17 @@ static int ieee80211_ioctl_siwmlme(struct net_device *dev,
887 struct iw_mlme *mlme = (struct iw_mlme *) extra; 858 struct iw_mlme *mlme = (struct iw_mlme *) extra;
888 859
889 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 860 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
890 if (sdata->vif.type != IEEE80211_IF_TYPE_STA && 861 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
891 sdata->vif.type != IEEE80211_IF_TYPE_IBSS) 862 sdata->vif.type != NL80211_IFTYPE_ADHOC)
892 return -EINVAL; 863 return -EINVAL;
893 864
894 switch (mlme->cmd) { 865 switch (mlme->cmd) {
895 case IW_MLME_DEAUTH: 866 case IW_MLME_DEAUTH:
896 /* TODO: mlme->addr.sa_data */ 867 /* TODO: mlme->addr.sa_data */
897 return ieee80211_sta_deauthenticate(dev, mlme->reason_code); 868 return ieee80211_sta_deauthenticate(sdata, mlme->reason_code);
898 case IW_MLME_DISASSOC: 869 case IW_MLME_DISASSOC:
899 /* TODO: mlme->addr.sa_data */ 870 /* TODO: mlme->addr.sa_data */
900 return ieee80211_sta_disassociate(dev, mlme->reason_code); 871 return ieee80211_sta_disassociate(sdata, mlme->reason_code);
901 default: 872 default:
902 return -EOPNOTSUPP; 873 return -EOPNOTSUPP;
903 } 874 }
@@ -938,7 +909,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev,
938 } 909 }
939 910
940 return ieee80211_set_encryption( 911 return ieee80211_set_encryption(
941 dev, bcaddr, 912 sdata, bcaddr,
942 idx, alg, remove, 913 idx, alg, remove,
943 !sdata->default_key, 914 !sdata->default_key,
944 keybuf, erq->length); 915 keybuf, erq->length);
@@ -983,7 +954,7 @@ static int ieee80211_ioctl_giwencode(struct net_device *dev,
983 erq->length = sdata->keys[idx]->conf.keylen; 954 erq->length = sdata->keys[idx]->conf.keylen;
984 erq->flags |= IW_ENCODE_ENABLED; 955 erq->flags |= IW_ENCODE_ENABLED;
985 956
986 if (sdata->vif.type == IEEE80211_IF_TYPE_STA) { 957 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
987 struct ieee80211_if_sta *ifsta = &sdata->u.sta; 958 struct ieee80211_if_sta *ifsta = &sdata->u.sta;
988 switch (ifsta->auth_alg) { 959 switch (ifsta->auth_alg) {
989 case WLAN_AUTH_OPEN: 960 case WLAN_AUTH_OPEN:
@@ -1057,7 +1028,7 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
1057 sdata->drop_unencrypted = !!data->value; 1028 sdata->drop_unencrypted = !!data->value;
1058 break; 1029 break;
1059 case IW_AUTH_PRIVACY_INVOKED: 1030 case IW_AUTH_PRIVACY_INVOKED:
1060 if (sdata->vif.type != IEEE80211_IF_TYPE_STA) 1031 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1061 ret = -EINVAL; 1032 ret = -EINVAL;
1062 else { 1033 else {
1063 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED; 1034 sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
@@ -1072,8 +1043,8 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
1072 } 1043 }
1073 break; 1044 break;
1074 case IW_AUTH_80211_AUTH_ALG: 1045 case IW_AUTH_80211_AUTH_ALG:
1075 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 1046 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
1076 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) 1047 sdata->vif.type == NL80211_IFTYPE_ADHOC)
1077 sdata->u.sta.auth_algs = data->value; 1048 sdata->u.sta.auth_algs = data->value;
1078 else 1049 else
1079 ret = -EOPNOTSUPP; 1050 ret = -EOPNOTSUPP;
@@ -1095,8 +1066,8 @@ static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev
1095 1066
1096 rcu_read_lock(); 1067 rcu_read_lock();
1097 1068
1098 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 1069 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
1099 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) 1070 sdata->vif.type == NL80211_IFTYPE_ADHOC)
1100 sta = sta_info_get(local, sdata->u.sta.bssid); 1071 sta = sta_info_get(local, sdata->u.sta.bssid);
1101 if (!sta) { 1072 if (!sta) {
1102 wstats->discard.fragment = 0; 1073 wstats->discard.fragment = 0;
@@ -1126,8 +1097,8 @@ static int ieee80211_ioctl_giwauth(struct net_device *dev,
1126 1097
1127 switch (data->flags & IW_AUTH_INDEX) { 1098 switch (data->flags & IW_AUTH_INDEX) {
1128 case IW_AUTH_80211_AUTH_ALG: 1099 case IW_AUTH_80211_AUTH_ALG:
1129 if (sdata->vif.type == IEEE80211_IF_TYPE_STA || 1100 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
1130 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) 1101 sdata->vif.type == NL80211_IFTYPE_ADHOC)
1131 data->value = sdata->u.sta.auth_algs; 1102 data->value = sdata->u.sta.auth_algs;
1132 else 1103 else
1133 ret = -EOPNOTSUPP; 1104 ret = -EOPNOTSUPP;
@@ -1184,7 +1155,7 @@ static int ieee80211_ioctl_siwencodeext(struct net_device *dev,
1184 } else 1155 } else
1185 idx--; 1156 idx--;
1186 1157
1187 return ieee80211_set_encryption(dev, ext->addr.sa_data, idx, alg, 1158 return ieee80211_set_encryption(sdata, ext->addr.sa_data, idx, alg,
1188 remove, 1159 remove,
1189 ext->ext_flags & 1160 ext->ext_flags &
1190 IW_ENCODE_EXT_SET_TX_KEY, 1161 IW_ENCODE_EXT_SET_TX_KEY,
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 4310e2f65661..139b5f267b34 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -39,7 +39,7 @@ static unsigned int classify_1d(struct sk_buff *skb)
39 return skb->priority - 256; 39 return skb->priority - 256;
40 40
41 switch (skb->protocol) { 41 switch (skb->protocol) {
42 case __constant_htons(ETH_P_IP): 42 case htons(ETH_P_IP):
43 dscp = ip_hdr(skb)->tos & 0xfc; 43 dscp = ip_hdr(skb)->tos & 0xfc;
44 break; 44 break;
45 45
@@ -47,8 +47,6 @@ static unsigned int classify_1d(struct sk_buff *skb)
47 return 0; 47 return 0;
48 } 48 }
49 49
50 if (dscp & 0x1c)
51 return 0;
52 return dscp >> 5; 50 return dscp >> 5;
53} 51}
54 52
@@ -75,9 +73,8 @@ static int wme_downgrade_ac(struct sk_buff *skb)
75 73
76 74
77/* Indicate which queue to use. */ 75/* Indicate which queue to use. */
78static u16 classify80211(struct sk_buff *skb, struct net_device *dev) 76static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
79{ 77{
80 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
81 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 78 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
82 79
83 if (!ieee80211_is_data(hdr->frame_control)) { 80 if (!ieee80211_is_data(hdr->frame_control)) {
@@ -115,14 +112,15 @@ static u16 classify80211(struct sk_buff *skb, struct net_device *dev)
115 112
116u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb) 113u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
117{ 114{
115 struct ieee80211_master_priv *mpriv = netdev_priv(dev);
116 struct ieee80211_local *local = mpriv->local;
118 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 117 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
119 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
120 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 118 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
121 struct sta_info *sta; 119 struct sta_info *sta;
122 u16 queue; 120 u16 queue;
123 u8 tid; 121 u8 tid;
124 122
125 queue = classify80211(skb, dev); 123 queue = classify80211(local, skb);
126 if (unlikely(queue >= local->hw.queues)) 124 if (unlikely(queue >= local->hw.queues))
127 queue = local->hw.queues - 1; 125 queue = local->hw.queues - 1;
128 126
@@ -212,7 +210,7 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
212 DECLARE_MAC_BUF(mac); 210 DECLARE_MAC_BUF(mac);
213 printk(KERN_DEBUG "allocated aggregation queue" 211 printk(KERN_DEBUG "allocated aggregation queue"
214 " %d tid %d addr %s pool=0x%lX\n", 212 " %d tid %d addr %s pool=0x%lX\n",
215 i, tid, print_mac(mac, sta->addr), 213 i, tid, print_mac(mac, sta->sta.addr),
216 local->queue_pool[0]); 214 local->queue_pool[0]);
217 } 215 }
218#endif /* CONFIG_MAC80211_HT_DEBUG */ 216#endif /* CONFIG_MAC80211_HT_DEBUG */
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 04de28c071a6..bc62f28a4d3d 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -1,5 +1,4 @@
1/* 1/*
2 * IEEE 802.11 driver (80211.o) - QoS datatypes
3 * Copyright 2004, Instant802 Networks, Inc. 2 * Copyright 2004, Instant802 Networks, Inc.
4 * Copyright 2005, Devicescape Software, Inc. 3 * Copyright 2005, Devicescape Software, Inc.
5 * 4 *
@@ -14,8 +13,6 @@
14#include <linux/netdevice.h> 13#include <linux/netdevice.h>
15#include "ieee80211_i.h" 14#include "ieee80211_i.h"
16 15
17#define QOS_CONTROL_LEN 2
18
19#define QOS_CONTROL_ACK_POLICY_NORMAL 0 16#define QOS_CONTROL_ACK_POLICY_NORMAL 0
20#define QOS_CONTROL_ACK_POLICY_NOACK 1 17#define QOS_CONTROL_ACK_POLICY_NOACK 1
21 18
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 2f33df0dcccf..37ae9a959f63 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -127,7 +127,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
127 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 127 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
128 return RX_DROP_UNUSABLE; 128 return RX_DROP_UNUSABLE;
129 129
130 mac80211_ev_michael_mic_failure(rx->dev, rx->key->conf.keyidx, 130 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
131 (void *) skb->data); 131 (void *) skb->data);
132 return RX_DROP_UNUSABLE; 132 return RX_DROP_UNUSABLE;
133 } 133 }
@@ -256,7 +256,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
256 256
257 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, 257 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
258 key, skb->data + hdrlen, 258 key, skb->data + hdrlen,
259 skb->len - hdrlen, rx->sta->addr, 259 skb->len - hdrlen, rx->sta->sta.addr,
260 hdr->addr1, hwaccel, rx->queue, 260 hdr->addr1, hwaccel, rx->queue,
261 &rx->tkip_iv32, 261 &rx->tkip_iv32,
262 &rx->tkip_iv16); 262 &rx->tkip_iv16);
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index 9f328593287e..307a2c3c2df4 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -136,17 +136,19 @@ static void localtime_3(struct xtm *r, time_t time)
136 * from w repeatedly while counting.) 136 * from w repeatedly while counting.)
137 */ 137 */
138 if (is_leap(year)) { 138 if (is_leap(year)) {
139 /* use days_since_leapyear[] in a leap year */
139 for (i = ARRAY_SIZE(days_since_leapyear) - 1; 140 for (i = ARRAY_SIZE(days_since_leapyear) - 1;
140 i > 0 && days_since_year[i] > w; --i) 141 i > 0 && days_since_leapyear[i] > w; --i)
141 /* just loop */; 142 /* just loop */;
143 r->monthday = w - days_since_leapyear[i] + 1;
142 } else { 144 } else {
143 for (i = ARRAY_SIZE(days_since_year) - 1; 145 for (i = ARRAY_SIZE(days_since_year) - 1;
144 i > 0 && days_since_year[i] > w; --i) 146 i > 0 && days_since_year[i] > w; --i)
145 /* just loop */; 147 /* just loop */;
148 r->monthday = w - days_since_year[i] + 1;
146 } 149 }
147 150
148 r->month = i + 1; 151 r->month = i + 1;
149 r->monthday = w - days_since_year[i] + 1;
150 return; 152 return;
151} 153}
152 154
diff --git a/net/phonet/Kconfig b/net/phonet/Kconfig
new file mode 100644
index 000000000000..51a5669573f2
--- /dev/null
+++ b/net/phonet/Kconfig
@@ -0,0 +1,16 @@
1#
2# Phonet protocol
3#
4
5config PHONET
6 tristate "Phonet protocols family"
7 help
8 The Phone Network protocol (PhoNet) is a packet-oriented
9 communication protocol developped by Nokia for use with its modems.
10
11 This is required for Maemo to use cellular data connectivity (if
12 supported). It can also be used to control Nokia phones
13 from a Linux computer, although AT commands may be easier to use.
14
15 To compile this driver as a module, choose M here: the module
16 will be called phonet. If unsure, say N.
diff --git a/net/phonet/Makefile b/net/phonet/Makefile
new file mode 100644
index 000000000000..ae9c3ed5be83
--- /dev/null
+++ b/net/phonet/Makefile
@@ -0,0 +1,9 @@
1obj-$(CONFIG_PHONET) += phonet.o
2
3phonet-objs := \
4 pn_dev.o \
5 pn_netlink.o \
6 socket.o \
7 datagram.o \
8 sysctl.o \
9 af_phonet.o
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
new file mode 100644
index 000000000000..1d8df6b7e3df
--- /dev/null
+++ b/net/phonet/af_phonet.c
@@ -0,0 +1,468 @@
1/*
2 * File: af_phonet.c
3 *
4 * Phonet protocols family
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <asm/unaligned.h>
29#include <net/sock.h>
30
31#include <linux/if_phonet.h>
32#include <linux/phonet.h>
33#include <net/phonet/phonet.h>
34#include <net/phonet/pn_dev.h>
35
36static struct net_proto_family phonet_proto_family;
37static struct phonet_protocol *phonet_proto_get(int protocol);
38static inline void phonet_proto_put(struct phonet_protocol *pp);
39
40/* protocol family functions */
41
42static int pn_socket_create(struct net *net, struct socket *sock, int protocol)
43{
44 struct sock *sk;
45 struct pn_sock *pn;
46 struct phonet_protocol *pnp;
47 int err;
48
49 if (net != &init_net)
50 return -EAFNOSUPPORT;
51
52 if (!capable(CAP_SYS_ADMIN))
53 return -EPERM;
54
55 if (protocol == 0) {
56 /* Default protocol selection */
57 switch (sock->type) {
58 case SOCK_DGRAM:
59 protocol = PN_PROTO_PHONET;
60 break;
61 default:
62 return -EPROTONOSUPPORT;
63 }
64 }
65
66 pnp = phonet_proto_get(protocol);
67 if (pnp == NULL)
68 return -EPROTONOSUPPORT;
69 if (sock->type != pnp->sock_type) {
70 err = -EPROTONOSUPPORT;
71 goto out;
72 }
73
74 sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot);
75 if (sk == NULL) {
76 err = -ENOMEM;
77 goto out;
78 }
79
80 sock_init_data(sock, sk);
81 sock->state = SS_UNCONNECTED;
82 sock->ops = pnp->ops;
83 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
84 sk->sk_protocol = protocol;
85 pn = pn_sk(sk);
86 pn->sobject = 0;
87 pn->resource = 0;
88 sk->sk_prot->init(sk);
89 err = 0;
90
91out:
92 phonet_proto_put(pnp);
93 return err;
94}
95
96static struct net_proto_family phonet_proto_family = {
97 .family = AF_PHONET,
98 .create = pn_socket_create,
99 .owner = THIS_MODULE,
100};
101
102/* Phonet device header operations */
103static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
104 unsigned short type, const void *daddr,
105 const void *saddr, unsigned len)
106{
107 u8 *media = skb_push(skb, 1);
108
109 if (type != ETH_P_PHONET)
110 return -1;
111
112 if (!saddr)
113 saddr = dev->dev_addr;
114 *media = *(const u8 *)saddr;
115 return 1;
116}
117
118static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr)
119{
120 const u8 *media = skb_mac_header(skb);
121 *haddr = *media;
122 return 1;
123}
124
125struct header_ops phonet_header_ops = {
126 .create = pn_header_create,
127 .parse = pn_header_parse,
128};
129EXPORT_SYMBOL(phonet_header_ops);
130
131/*
132 * Prepends an ISI header and sends a datagram.
133 */
134static int pn_send(struct sk_buff *skb, struct net_device *dev,
135 u16 dst, u16 src, u8 res, u8 irq)
136{
137 struct phonethdr *ph;
138 int err;
139
140 if (skb->len + 2 > 0xffff) {
141 /* Phonet length field would overflow */
142 err = -EMSGSIZE;
143 goto drop;
144 }
145
146 skb_reset_transport_header(skb);
147 WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */
148 skb_push(skb, sizeof(struct phonethdr));
149 skb_reset_network_header(skb);
150 ph = pn_hdr(skb);
151 ph->pn_rdev = pn_dev(dst);
152 ph->pn_sdev = pn_dev(src);
153 ph->pn_res = res;
154 ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph));
155 ph->pn_robj = pn_obj(dst);
156 ph->pn_sobj = pn_obj(src);
157
158 skb->protocol = htons(ETH_P_PHONET);
159 skb->priority = 0;
160 skb->dev = dev;
161
162 if (pn_addr(src) == pn_addr(dst)) {
163 skb_reset_mac_header(skb);
164 skb->pkt_type = PACKET_LOOPBACK;
165 skb_orphan(skb);
166 if (irq)
167 netif_rx(skb);
168 else
169 netif_rx_ni(skb);
170 err = 0;
171 } else {
172 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
173 NULL, NULL, skb->len);
174 if (err < 0) {
175 err = -EHOSTUNREACH;
176 goto drop;
177 }
178 err = dev_queue_xmit(skb);
179 }
180
181 return err;
182drop:
183 kfree_skb(skb);
184 return err;
185}
186
187static int pn_raw_send(const void *data, int len, struct net_device *dev,
188 u16 dst, u16 src, u8 res)
189{
190 struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC);
191 if (skb == NULL)
192 return -ENOMEM;
193
194 skb_reserve(skb, MAX_PHONET_HEADER);
195 __skb_put(skb, len);
196 skb_copy_to_linear_data(skb, data, len);
197 return pn_send(skb, dev, dst, src, res, 1);
198}
199
200/*
201 * Create a Phonet header for the skb and send it out. Returns
202 * non-zero error code if failed. The skb is freed then.
203 */
204int pn_skb_send(struct sock *sk, struct sk_buff *skb,
205 const struct sockaddr_pn *target)
206{
207 struct net_device *dev;
208 struct pn_sock *pn = pn_sk(sk);
209 int err;
210 u16 src;
211 u8 daddr = pn_sockaddr_get_addr(target), saddr = PN_NO_ADDR;
212
213 err = -EHOSTUNREACH;
214 if (sk->sk_bound_dev_if)
215 dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
216 else
217 dev = phonet_device_get(sock_net(sk));
218 if (!dev || !(dev->flags & IFF_UP))
219 goto drop;
220
221 saddr = phonet_address_get(dev, daddr);
222 if (saddr == PN_NO_ADDR)
223 goto drop;
224
225 src = pn->sobject;
226 if (!pn_addr(src))
227 src = pn_object(saddr, pn_obj(src));
228
229 err = pn_send(skb, dev, pn_sockaddr_get_object(target),
230 src, pn_sockaddr_get_resource(target), 0);
231 dev_put(dev);
232 return err;
233
234drop:
235 kfree_skb(skb);
236 if (dev)
237 dev_put(dev);
238 return err;
239}
240EXPORT_SYMBOL(pn_skb_send);
241
242/* Do not send an error message in response to an error message */
243static inline int can_respond(struct sk_buff *skb)
244{
245 const struct phonethdr *ph;
246 const struct phonetmsg *pm;
247 u8 submsg_id;
248
249 if (!pskb_may_pull(skb, 3))
250 return 0;
251
252 ph = pn_hdr(skb);
253 if (phonet_address_get(skb->dev, ph->pn_rdev) != ph->pn_rdev)
254 return 0; /* we are not the destination */
255 if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5))
256 return 0;
257
258 ph = pn_hdr(skb); /* re-acquires the pointer */
259 pm = pn_msg(skb);
260 if (pm->pn_msg_id != PN_COMMON_MESSAGE)
261 return 1;
262 submsg_id = (ph->pn_res == PN_PREFIX)
263 ? pm->pn_e_submsg_id : pm->pn_submsg_id;
264 if (submsg_id != PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP &&
265 pm->pn_e_submsg_id != PN_COMM_SERVICE_NOT_IDENTIFIED_RESP)
266 return 1;
267 return 0;
268}
269
270static int send_obj_unreachable(struct sk_buff *rskb)
271{
272 const struct phonethdr *oph = pn_hdr(rskb);
273 const struct phonetmsg *opm = pn_msg(rskb);
274 struct phonetmsg resp;
275
276 memset(&resp, 0, sizeof(resp));
277 resp.pn_trans_id = opm->pn_trans_id;
278 resp.pn_msg_id = PN_COMMON_MESSAGE;
279 if (oph->pn_res == PN_PREFIX) {
280 resp.pn_e_res_id = opm->pn_e_res_id;
281 resp.pn_e_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP;
282 resp.pn_e_orig_msg_id = opm->pn_msg_id;
283 resp.pn_e_status = 0;
284 } else {
285 resp.pn_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP;
286 resp.pn_orig_msg_id = opm->pn_msg_id;
287 resp.pn_status = 0;
288 }
289 return pn_raw_send(&resp, sizeof(resp), rskb->dev,
290 pn_object(oph->pn_sdev, oph->pn_sobj),
291 pn_object(oph->pn_rdev, oph->pn_robj),
292 oph->pn_res);
293}
294
295static int send_reset_indications(struct sk_buff *rskb)
296{
297 struct phonethdr *oph = pn_hdr(rskb);
298 static const u8 data[4] = {
299 0x00 /* trans ID */, 0x10 /* subscribe msg */,
300 0x00 /* subscription count */, 0x00 /* dummy */
301 };
302
303 return pn_raw_send(data, sizeof(data), rskb->dev,
304 pn_object(oph->pn_sdev, 0x00),
305 pn_object(oph->pn_rdev, oph->pn_robj), 0x10);
306}
307
308
309/* packet type functions */
310
311/*
312 * Stuff received packets to associated sockets.
313 * On error, returns non-zero and releases the skb.
314 */
315static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
316 struct packet_type *pkttype,
317 struct net_device *orig_dev)
318{
319 struct phonethdr *ph;
320 struct sock *sk;
321 struct sockaddr_pn sa;
322 u16 len;
323
324 if (dev_net(dev) != &init_net)
325 goto out;
326
327 /* check we have at least a full Phonet header */
328 if (!pskb_pull(skb, sizeof(struct phonethdr)))
329 goto out;
330
331 /* check that the advertised length is correct */
332 ph = pn_hdr(skb);
333 len = get_unaligned_be16(&ph->pn_length);
334 if (len < 2)
335 goto out;
336 len -= 2;
337 if ((len > skb->len) || pskb_trim(skb, len))
338 goto out;
339 skb_reset_transport_header(skb);
340
341 pn_skb_get_dst_sockaddr(skb, &sa);
342 if (pn_sockaddr_get_addr(&sa) == 0)
343 goto out; /* currently, we cannot be device 0 */
344
345 sk = pn_find_sock_by_sa(&sa);
346 if (sk == NULL) {
347 if (can_respond(skb)) {
348 send_obj_unreachable(skb);
349 send_reset_indications(skb);
350 }
351 goto out;
352 }
353
354 /* Push data to the socket (or other sockets connected to it). */
355 return sk_receive_skb(sk, skb, 0);
356
357out:
358 kfree_skb(skb);
359 return NET_RX_DROP;
360}
361
362static struct packet_type phonet_packet_type = {
363 .type = __constant_htons(ETH_P_PHONET),
364 .dev = NULL,
365 .func = phonet_rcv,
366};
367
368/* Transport protocol registration */
369static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
370static DEFINE_SPINLOCK(proto_tab_lock);
371
372int __init_or_module phonet_proto_register(int protocol,
373 struct phonet_protocol *pp)
374{
375 int err = 0;
376
377 if (protocol >= PHONET_NPROTO)
378 return -EINVAL;
379
380 err = proto_register(pp->prot, 1);
381 if (err)
382 return err;
383
384 spin_lock(&proto_tab_lock);
385 if (proto_tab[protocol])
386 err = -EBUSY;
387 else
388 proto_tab[protocol] = pp;
389 spin_unlock(&proto_tab_lock);
390
391 return err;
392}
393EXPORT_SYMBOL(phonet_proto_register);
394
395void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
396{
397 spin_lock(&proto_tab_lock);
398 BUG_ON(proto_tab[protocol] != pp);
399 proto_tab[protocol] = NULL;
400 spin_unlock(&proto_tab_lock);
401 proto_unregister(pp->prot);
402}
403EXPORT_SYMBOL(phonet_proto_unregister);
404
405static struct phonet_protocol *phonet_proto_get(int protocol)
406{
407 struct phonet_protocol *pp;
408
409 if (protocol >= PHONET_NPROTO)
410 return NULL;
411
412 spin_lock(&proto_tab_lock);
413 pp = proto_tab[protocol];
414 if (pp && !try_module_get(pp->prot->owner))
415 pp = NULL;
416 spin_unlock(&proto_tab_lock);
417
418 return pp;
419}
420
421static inline void phonet_proto_put(struct phonet_protocol *pp)
422{
423 module_put(pp->prot->owner);
424}
425
426/* Module registration */
427static int __init phonet_init(void)
428{
429 int err;
430
431 err = sock_register(&phonet_proto_family);
432 if (err) {
433 printk(KERN_ALERT
434 "phonet protocol family initialization failed\n");
435 return err;
436 }
437
438 phonet_device_init();
439 dev_add_pack(&phonet_packet_type);
440 phonet_netlink_register();
441 phonet_sysctl_init();
442
443 err = isi_register();
444 if (err)
445 goto err;
446 return 0;
447
448err:
449 phonet_sysctl_exit();
450 sock_unregister(AF_PHONET);
451 dev_remove_pack(&phonet_packet_type);
452 phonet_device_exit();
453 return err;
454}
455
456static void __exit phonet_exit(void)
457{
458 isi_unregister();
459 phonet_sysctl_exit();
460 sock_unregister(AF_PHONET);
461 dev_remove_pack(&phonet_packet_type);
462 phonet_device_exit();
463}
464
465module_init(phonet_init);
466module_exit(phonet_exit);
467MODULE_DESCRIPTION("Phonet protocol stack for Linux");
468MODULE_LICENSE("GPL");
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
new file mode 100644
index 000000000000..e087862ed7e4
--- /dev/null
+++ b/net/phonet/datagram.c
@@ -0,0 +1,197 @@
1/*
2 * File: datagram.c
3 *
4 * Datagram (ISI) Phonet sockets
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/socket.h>
28#include <asm/ioctls.h>
29#include <net/sock.h>
30
31#include <linux/phonet.h>
32#include <net/phonet/phonet.h>
33
34static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
35
36/* associated socket ceases to exist */
37static void pn_sock_close(struct sock *sk, long timeout)
38{
39 sk_common_release(sk);
40}
41
42static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
43{
44 struct sk_buff *skb;
45 int answ;
46
47 switch (cmd) {
48 case SIOCINQ:
49 lock_sock(sk);
50 skb = skb_peek(&sk->sk_receive_queue);
51 answ = skb ? skb->len : 0;
52 release_sock(sk);
53 return put_user(answ, (int __user *)arg);
54 }
55
56 return -ENOIOCTLCMD;
57}
58
59/* Destroy socket. All references are gone. */
60static void pn_destruct(struct sock *sk)
61{
62 skb_queue_purge(&sk->sk_receive_queue);
63}
64
65static int pn_init(struct sock *sk)
66{
67 sk->sk_destruct = pn_destruct;
68 return 0;
69}
70
71static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
72 struct msghdr *msg, size_t len)
73{
74 struct sockaddr_pn *target;
75 struct sk_buff *skb;
76 int err;
77
78 if (msg->msg_flags & MSG_OOB)
79 return -EOPNOTSUPP;
80
81 if (msg->msg_name == NULL)
82 return -EDESTADDRREQ;
83
84 if (msg->msg_namelen < sizeof(struct sockaddr_pn))
85 return -EINVAL;
86
87 target = (struct sockaddr_pn *)msg->msg_name;
88 if (target->spn_family != AF_PHONET)
89 return -EAFNOSUPPORT;
90
91 skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len,
92 msg->msg_flags & MSG_DONTWAIT, &err);
93 if (skb == NULL)
94 return err;
95 skb_reserve(skb, MAX_PHONET_HEADER);
96
97 err = memcpy_fromiovec((void *)skb_put(skb, len), msg->msg_iov, len);
98 if (err < 0) {
99 kfree_skb(skb);
100 return err;
101 }
102
103 /*
104 * Fill in the Phonet header and
105 * finally pass the packet forwards.
106 */
107 err = pn_skb_send(sk, skb, target);
108
109 /* If ok, return len. */
110 return (err >= 0) ? len : err;
111}
112
113static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
114 struct msghdr *msg, size_t len, int noblock,
115 int flags, int *addr_len)
116{
117 struct sk_buff *skb = NULL;
118 struct sockaddr_pn sa;
119 int rval = -EOPNOTSUPP;
120 int copylen;
121
122 if (flags & MSG_OOB)
123 goto out_nofree;
124
125 if (addr_len)
126 *addr_len = sizeof(sa);
127
128 skb = skb_recv_datagram(sk, flags, noblock, &rval);
129 if (skb == NULL)
130 goto out_nofree;
131
132 pn_skb_get_src_sockaddr(skb, &sa);
133
134 copylen = skb->len;
135 if (len < copylen) {
136 msg->msg_flags |= MSG_TRUNC;
137 copylen = len;
138 }
139
140 rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen);
141 if (rval) {
142 rval = -EFAULT;
143 goto out;
144 }
145
146 rval = (flags & MSG_TRUNC) ? skb->len : copylen;
147
148 if (msg->msg_name != NULL)
149 memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
150
151out:
152 skb_free_datagram(sk, skb);
153
154out_nofree:
155 return rval;
156}
157
158/* Queue an skb for a sock. */
159static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
160{
161 int err = sock_queue_rcv_skb(sk, skb);
162 if (err < 0)
163 kfree_skb(skb);
164 return err ? NET_RX_DROP : NET_RX_SUCCESS;
165}
166
167/* Module registration */
168static struct proto pn_proto = {
169 .close = pn_sock_close,
170 .ioctl = pn_ioctl,
171 .init = pn_init,
172 .sendmsg = pn_sendmsg,
173 .recvmsg = pn_recvmsg,
174 .backlog_rcv = pn_backlog_rcv,
175 .hash = pn_sock_hash,
176 .unhash = pn_sock_unhash,
177 .get_port = pn_sock_get_port,
178 .obj_size = sizeof(struct pn_sock),
179 .owner = THIS_MODULE,
180 .name = "PHONET",
181};
182
183static struct phonet_protocol pn_dgram_proto = {
184 .ops = &phonet_dgram_ops,
185 .prot = &pn_proto,
186 .sock_type = SOCK_DGRAM,
187};
188
189int __init isi_register(void)
190{
191 return phonet_proto_register(PN_PROTO_PHONET, &pn_dgram_proto);
192}
193
194void __exit isi_unregister(void)
195{
196 phonet_proto_unregister(PN_PROTO_PHONET, &pn_dgram_proto);
197}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
new file mode 100644
index 000000000000..53be9fc82aaa
--- /dev/null
+++ b/net/phonet/pn_dev.c
@@ -0,0 +1,208 @@
1/*
2 * File: pn_dev.c
3 *
4 * Phonet network device
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/net.h>
28#include <linux/netdevice.h>
29#include <linux/phonet.h>
30#include <net/sock.h>
31#include <net/phonet/pn_dev.h>
32
33/* when accessing, remember to lock with spin_lock(&pndevs.lock); */
34struct phonet_device_list pndevs = {
35 .list = LIST_HEAD_INIT(pndevs.list),
36 .lock = __SPIN_LOCK_UNLOCKED(pndevs.lock),
37};
38
39/* Allocate new Phonet device. */
40static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
41{
42 struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC);
43 if (pnd == NULL)
44 return NULL;
45 pnd->netdev = dev;
46 bitmap_zero(pnd->addrs, 64);
47
48 list_add(&pnd->list, &pndevs.list);
49 return pnd;
50}
51
52static struct phonet_device *__phonet_get(struct net_device *dev)
53{
54 struct phonet_device *pnd;
55
56 list_for_each_entry(pnd, &pndevs.list, list) {
57 if (pnd->netdev == dev)
58 return pnd;
59 }
60 return NULL;
61}
62
63static void __phonet_device_free(struct phonet_device *pnd)
64{
65 list_del(&pnd->list);
66 kfree(pnd);
67}
68
69struct net_device *phonet_device_get(struct net *net)
70{
71 struct phonet_device *pnd;
72 struct net_device *dev;
73
74 spin_lock_bh(&pndevs.lock);
75 list_for_each_entry(pnd, &pndevs.list, list) {
76 dev = pnd->netdev;
77 BUG_ON(!dev);
78
79 if (dev_net(dev) == net &&
80 (dev->reg_state == NETREG_REGISTERED) &&
81 ((pnd->netdev->flags & IFF_UP)) == IFF_UP)
82 break;
83 dev = NULL;
84 }
85 if (dev)
86 dev_hold(dev);
87 spin_unlock_bh(&pndevs.lock);
88 return dev;
89}
90
91int phonet_address_add(struct net_device *dev, u8 addr)
92{
93 struct phonet_device *pnd;
94 int err = 0;
95
96 spin_lock_bh(&pndevs.lock);
97 /* Find or create Phonet-specific device data */
98 pnd = __phonet_get(dev);
99 if (pnd == NULL)
100 pnd = __phonet_device_alloc(dev);
101 if (unlikely(pnd == NULL))
102 err = -ENOMEM;
103 else if (test_and_set_bit(addr >> 2, pnd->addrs))
104 err = -EEXIST;
105 spin_unlock_bh(&pndevs.lock);
106 return err;
107}
108
109int phonet_address_del(struct net_device *dev, u8 addr)
110{
111 struct phonet_device *pnd;
112 int err = 0;
113
114 spin_lock_bh(&pndevs.lock);
115 pnd = __phonet_get(dev);
116 if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs))
117 err = -EADDRNOTAVAIL;
118 if (bitmap_empty(pnd->addrs, 64))
119 __phonet_device_free(pnd);
120 spin_unlock_bh(&pndevs.lock);
121 return err;
122}
123
124/* Gets a source address toward a destination, through a interface. */
125u8 phonet_address_get(struct net_device *dev, u8 addr)
126{
127 struct phonet_device *pnd;
128
129 spin_lock_bh(&pndevs.lock);
130 pnd = __phonet_get(dev);
131 if (pnd) {
132 BUG_ON(bitmap_empty(pnd->addrs, 64));
133
134 /* Use same source address as destination, if possible */
135 if (!test_bit(addr >> 2, pnd->addrs))
136 addr = find_first_bit(pnd->addrs, 64) << 2;
137 } else
138 addr = PN_NO_ADDR;
139 spin_unlock_bh(&pndevs.lock);
140 return addr;
141}
142
143int phonet_address_lookup(u8 addr)
144{
145 struct phonet_device *pnd;
146
147 spin_lock_bh(&pndevs.lock);
148 list_for_each_entry(pnd, &pndevs.list, list) {
149 /* Don't allow unregistering devices! */
150 if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
151 ((pnd->netdev->flags & IFF_UP)) != IFF_UP)
152 continue;
153
154 if (test_bit(addr >> 2, pnd->addrs)) {
155 spin_unlock_bh(&pndevs.lock);
156 return 0;
157 }
158 }
159 spin_unlock_bh(&pndevs.lock);
160 return -EADDRNOTAVAIL;
161}
162
163/* notify Phonet of device events */
164static int phonet_device_notify(struct notifier_block *me, unsigned long what,
165 void *arg)
166{
167 struct net_device *dev = arg;
168
169 if (what == NETDEV_UNREGISTER) {
170 struct phonet_device *pnd;
171
172 /* Destroy phonet-specific device data */
173 spin_lock_bh(&pndevs.lock);
174 pnd = __phonet_get(dev);
175 if (pnd)
176 __phonet_device_free(pnd);
177 spin_unlock_bh(&pndevs.lock);
178 }
179 return 0;
180
181}
182
183static struct notifier_block phonet_device_notifier = {
184 .notifier_call = phonet_device_notify,
185 .priority = 0,
186};
187
188/* Initialize Phonet devices list */
189void phonet_device_init(void)
190{
191 register_netdevice_notifier(&phonet_device_notifier);
192}
193
194void phonet_device_exit(void)
195{
196 struct phonet_device *pnd, *n;
197
198 rtnl_unregister_all(PF_PHONET);
199 rtnl_lock();
200 spin_lock_bh(&pndevs.lock);
201
202 list_for_each_entry_safe(pnd, n, &pndevs.list, list)
203 __phonet_device_free(pnd);
204
205 spin_unlock_bh(&pndevs.lock);
206 rtnl_unlock();
207 unregister_netdevice_notifier(&phonet_device_notifier);
208}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
new file mode 100644
index 000000000000..b1770d66bc8d
--- /dev/null
+++ b/net/phonet/pn_netlink.c
@@ -0,0 +1,165 @@
1/*
2 * File: pn_netlink.c
3 *
4 * Phonet netlink interface
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/netlink.h>
28#include <linux/phonet.h>
29#include <net/sock.h>
30#include <net/phonet/pn_dev.h>
31
32static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
33 u32 pid, u32 seq, int event);
34
35static void rtmsg_notify(int event, struct net_device *dev, u8 addr)
36{
37 struct sk_buff *skb;
38 int err = -ENOBUFS;
39
40 skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
41 nla_total_size(1), GFP_KERNEL);
42 if (skb == NULL)
43 goto errout;
44 err = fill_addr(skb, dev, addr, 0, 0, event);
45 if (err < 0) {
46 WARN_ON(err == -EMSGSIZE);
47 kfree_skb(skb);
48 goto errout;
49 }
50 err = rtnl_notify(skb, dev_net(dev), 0,
51 RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL);
52errout:
53 if (err < 0)
54 rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
55}
56
57static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = {
58 [IFA_LOCAL] = { .type = NLA_U8 },
59};
60
61static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr)
62{
63 struct net *net = sock_net(skb->sk);
64 struct nlattr *tb[IFA_MAX+1];
65 struct net_device *dev;
66 struct ifaddrmsg *ifm;
67 int err;
68 u8 pnaddr;
69
70 if (!capable(CAP_SYS_ADMIN))
71 return -EPERM;
72
73 ASSERT_RTNL();
74
75 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_phonet_policy);
76 if (err < 0)
77 return err;
78
79 ifm = nlmsg_data(nlh);
80 if (tb[IFA_LOCAL] == NULL)
81 return -EINVAL;
82 pnaddr = nla_get_u8(tb[IFA_LOCAL]);
83 if (pnaddr & 3)
84 /* Phonet addresses only have 6 high-order bits */
85 return -EINVAL;
86
87 dev = __dev_get_by_index(net, ifm->ifa_index);
88 if (dev == NULL)
89 return -ENODEV;
90
91 if (nlh->nlmsg_type == RTM_NEWADDR)
92 err = phonet_address_add(dev, pnaddr);
93 else
94 err = phonet_address_del(dev, pnaddr);
95 if (!err)
96 rtmsg_notify(nlh->nlmsg_type, dev, pnaddr);
97 return err;
98}
99
100static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
101 u32 pid, u32 seq, int event)
102{
103 struct ifaddrmsg *ifm;
104 struct nlmsghdr *nlh;
105
106 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), 0);
107 if (nlh == NULL)
108 return -EMSGSIZE;
109
110 ifm = nlmsg_data(nlh);
111 ifm->ifa_family = AF_PHONET;
112 ifm->ifa_prefixlen = 0;
113 ifm->ifa_flags = IFA_F_PERMANENT;
114 ifm->ifa_scope = RT_SCOPE_LINK;
115 ifm->ifa_index = dev->ifindex;
116 NLA_PUT_U8(skb, IFA_LOCAL, addr);
117 return nlmsg_end(skb, nlh);
118
119nla_put_failure:
120 nlmsg_cancel(skb, nlh);
121 return -EMSGSIZE;
122}
123
124static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
125{
126 struct phonet_device *pnd;
127 int dev_idx = 0, dev_start_idx = cb->args[0];
128 int addr_idx = 0, addr_start_idx = cb->args[1];
129
130 spin_lock_bh(&pndevs.lock);
131 list_for_each_entry(pnd, &pndevs.list, list) {
132 u8 addr;
133
134 if (dev_idx > dev_start_idx)
135 addr_start_idx = 0;
136 if (dev_idx++ < dev_start_idx)
137 continue;
138
139 addr_idx = 0;
140 for (addr = find_first_bit(pnd->addrs, 64); addr < 64;
141 addr = find_next_bit(pnd->addrs, 64, 1+addr)) {
142 if (addr_idx++ < addr_start_idx)
143 continue;
144
145 if (fill_addr(skb, pnd->netdev, addr << 2,
146 NETLINK_CB(cb->skb).pid,
147 cb->nlh->nlmsg_seq, RTM_NEWADDR))
148 goto out;
149 }
150 }
151
152out:
153 spin_unlock_bh(&pndevs.lock);
154 cb->args[0] = dev_idx;
155 cb->args[1] = addr_idx;
156
157 return skb->len;
158}
159
160void __init phonet_netlink_register(void)
161{
162 rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL);
163 rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL);
164 rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit);
165}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
new file mode 100644
index 000000000000..dfd4061646db
--- /dev/null
+++ b/net/phonet/socket.c
@@ -0,0 +1,312 @@
1/*
2 * File: socket.c
3 *
4 * Phonet sockets
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * Original author: Sakari Ailus <sakari.ailus@nokia.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * version 2 as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/net.h>
28#include <net/sock.h>
29#include <net/tcp_states.h>
30
31#include <linux/phonet.h>
32#include <net/phonet/phonet.h>
33#include <net/phonet/pn_dev.h>
34
35static int pn_socket_release(struct socket *sock)
36{
37 struct sock *sk = sock->sk;
38
39 if (sk) {
40 sock->sk = NULL;
41 sk->sk_prot->close(sk, 0);
42 }
43 return 0;
44}
45
46static struct {
47 struct hlist_head hlist;
48 spinlock_t lock;
49} pnsocks = {
50 .hlist = HLIST_HEAD_INIT,
51 .lock = __SPIN_LOCK_UNLOCKED(pnsocks.lock),
52};
53
54/*
55 * Find address based on socket address, match only certain fields.
56 * Also grab sock if it was found. Remember to sock_put it later.
57 */
58struct sock *pn_find_sock_by_sa(const struct sockaddr_pn *spn)
59{
60 struct hlist_node *node;
61 struct sock *sknode;
62 struct sock *rval = NULL;
63 u16 obj = pn_sockaddr_get_object(spn);
64 u8 res = spn->spn_resource;
65
66 spin_lock_bh(&pnsocks.lock);
67
68 sk_for_each(sknode, node, &pnsocks.hlist) {
69 struct pn_sock *pn = pn_sk(sknode);
70 BUG_ON(!pn->sobject); /* unbound socket */
71
72 if (pn_port(obj)) {
73 /* Look up socket by port */
74 if (pn_port(pn->sobject) != pn_port(obj))
75 continue;
76 } else {
77 /* If port is zero, look up by resource */
78 if (pn->resource != res)
79 continue;
80 }
81 if (pn_addr(pn->sobject)
82 && pn_addr(pn->sobject) != pn_addr(obj))
83 continue;
84
85 rval = sknode;
86 sock_hold(sknode);
87 break;
88 }
89
90 spin_unlock_bh(&pnsocks.lock);
91
92 return rval;
93
94}
95
96void pn_sock_hash(struct sock *sk)
97{
98 spin_lock_bh(&pnsocks.lock);
99 sk_add_node(sk, &pnsocks.hlist);
100 spin_unlock_bh(&pnsocks.lock);
101}
102EXPORT_SYMBOL(pn_sock_hash);
103
104void pn_sock_unhash(struct sock *sk)
105{
106 spin_lock_bh(&pnsocks.lock);
107 sk_del_node_init(sk);
108 spin_unlock_bh(&pnsocks.lock);
109}
110EXPORT_SYMBOL(pn_sock_unhash);
111
112static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
113{
114 struct sock *sk = sock->sk;
115 struct pn_sock *pn = pn_sk(sk);
116 struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
117 int err;
118 u16 handle;
119 u8 saddr;
120
121 if (sk->sk_prot->bind)
122 return sk->sk_prot->bind(sk, addr, len);
123
124 if (len < sizeof(struct sockaddr_pn))
125 return -EINVAL;
126 if (spn->spn_family != AF_PHONET)
127 return -EAFNOSUPPORT;
128
129 handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
130 saddr = pn_addr(handle);
131 if (saddr && phonet_address_lookup(saddr))
132 return -EADDRNOTAVAIL;
133
134 lock_sock(sk);
135 if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
136 err = -EINVAL; /* attempt to rebind */
137 goto out;
138 }
139 err = sk->sk_prot->get_port(sk, pn_port(handle));
140 if (err)
141 goto out;
142
143 /* get_port() sets the port, bind() sets the address if applicable */
144 pn->sobject = pn_object(saddr, pn_port(pn->sobject));
145 pn->resource = spn->spn_resource;
146
147 /* Enable RX on the socket */
148 sk->sk_prot->hash(sk);
149out:
150 release_sock(sk);
151 return err;
152}
153
154static int pn_socket_autobind(struct socket *sock)
155{
156 struct sockaddr_pn sa;
157 int err;
158
159 memset(&sa, 0, sizeof(sa));
160 sa.spn_family = AF_PHONET;
161 err = pn_socket_bind(sock, (struct sockaddr *)&sa,
162 sizeof(struct sockaddr_pn));
163 if (err != -EINVAL)
164 return err;
165 BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
166 return 0; /* socket was already bound */
167}
168
169static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
170 int *sockaddr_len, int peer)
171{
172 struct sock *sk = sock->sk;
173 struct pn_sock *pn = pn_sk(sk);
174
175 memset(addr, 0, sizeof(struct sockaddr_pn));
176 addr->sa_family = AF_PHONET;
177 if (!peer) /* Race with bind() here is userland's problem. */
178 pn_sockaddr_set_object((struct sockaddr_pn *)addr,
179 pn->sobject);
180
181 *sockaddr_len = sizeof(struct sockaddr_pn);
182 return 0;
183}
184
185static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
186 unsigned long arg)
187{
188 struct sock *sk = sock->sk;
189 struct pn_sock *pn = pn_sk(sk);
190
191 if (cmd == SIOCPNGETOBJECT) {
192 struct net_device *dev;
193 u16 handle;
194 u8 saddr;
195
196 if (get_user(handle, (__u16 __user *)arg))
197 return -EFAULT;
198
199 lock_sock(sk);
200 if (sk->sk_bound_dev_if)
201 dev = dev_get_by_index(sock_net(sk),
202 sk->sk_bound_dev_if);
203 else
204 dev = phonet_device_get(sock_net(sk));
205 if (dev && (dev->flags & IFF_UP))
206 saddr = phonet_address_get(dev, pn_addr(handle));
207 else
208 saddr = PN_NO_ADDR;
209 release_sock(sk);
210
211 if (dev)
212 dev_put(dev);
213 if (saddr == PN_NO_ADDR)
214 return -EHOSTUNREACH;
215
216 handle = pn_object(saddr, pn_port(pn->sobject));
217 return put_user(handle, (__u16 __user *)arg);
218 }
219
220 return sk->sk_prot->ioctl(sk, cmd, arg);
221}
222
223static int pn_socket_sendmsg(struct kiocb *iocb, struct socket *sock,
224 struct msghdr *m, size_t total_len)
225{
226 struct sock *sk = sock->sk;
227
228 if (pn_socket_autobind(sock))
229 return -EAGAIN;
230
231 return sk->sk_prot->sendmsg(iocb, sk, m, total_len);
232}
233
234const struct proto_ops phonet_dgram_ops = {
235 .family = AF_PHONET,
236 .owner = THIS_MODULE,
237 .release = pn_socket_release,
238 .bind = pn_socket_bind,
239 .connect = sock_no_connect,
240 .socketpair = sock_no_socketpair,
241 .accept = sock_no_accept,
242 .getname = pn_socket_getname,
243 .poll = datagram_poll,
244 .ioctl = pn_socket_ioctl,
245 .listen = sock_no_listen,
246 .shutdown = sock_no_shutdown,
247 .setsockopt = sock_no_setsockopt,
248 .getsockopt = sock_no_getsockopt,
249#ifdef CONFIG_COMPAT
250 .compat_setsockopt = sock_no_setsockopt,
251 .compat_getsockopt = sock_no_getsockopt,
252#endif
253 .sendmsg = pn_socket_sendmsg,
254 .recvmsg = sock_common_recvmsg,
255 .mmap = sock_no_mmap,
256 .sendpage = sock_no_sendpage,
257};
258
259static DEFINE_MUTEX(port_mutex);
260
261/* allocate port for a socket */
262int pn_sock_get_port(struct sock *sk, unsigned short sport)
263{
264 static int port_cur;
265 struct pn_sock *pn = pn_sk(sk);
266 struct sockaddr_pn try_sa;
267 struct sock *tmpsk;
268
269 memset(&try_sa, 0, sizeof(struct sockaddr_pn));
270 try_sa.spn_family = AF_PHONET;
271
272 mutex_lock(&port_mutex);
273
274 if (!sport) {
275 /* search free port */
276 int port, pmin, pmax;
277
278 phonet_get_local_port_range(&pmin, &pmax);
279 for (port = pmin; port <= pmax; port++) {
280 port_cur++;
281 if (port_cur < pmin || port_cur > pmax)
282 port_cur = pmin;
283
284 pn_sockaddr_set_port(&try_sa, port_cur);
285 tmpsk = pn_find_sock_by_sa(&try_sa);
286 if (tmpsk == NULL) {
287 sport = port_cur;
288 goto found;
289 } else
290 sock_put(tmpsk);
291 }
292 } else {
293 /* try to find specific port */
294 pn_sockaddr_set_port(&try_sa, sport);
295 tmpsk = pn_find_sock_by_sa(&try_sa);
296 if (tmpsk == NULL)
297 /* No sock there! We can use that port... */
298 goto found;
299 else
300 sock_put(tmpsk);
301 }
302 mutex_unlock(&port_mutex);
303
304 /* the port must be in use already */
305 return -EADDRINUSE;
306
307found:
308 mutex_unlock(&port_mutex);
309 pn->sobject = pn_object(pn_addr(pn->sobject), sport);
310 return 0;
311}
312EXPORT_SYMBOL(pn_sock_get_port);
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
new file mode 100644
index 000000000000..600a4309b8c8
--- /dev/null
+++ b/net/phonet/sysctl.c
@@ -0,0 +1,113 @@
1/*
2 * File: sysctl.c
3 *
4 * Phonet /proc/sys/net/phonet interface implementation
5 *
6 * Copyright (C) 2008 Nokia Corporation.
7 *
8 * Contact: Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25#include <linux/seqlock.h>
26#include <linux/sysctl.h>
27#include <linux/errno.h>
28#include <linux/init.h>
29
30#define DYNAMIC_PORT_MIN 0x40
31#define DYNAMIC_PORT_MAX 0x7f
32
33static DEFINE_SEQLOCK(local_port_range_lock);
34static int local_port_range_min[2] = {0, 0};
35static int local_port_range_max[2] = {1023, 1023};
36static int local_port_range[2] = {DYNAMIC_PORT_MIN, DYNAMIC_PORT_MAX};
37static struct ctl_table_header *phonet_table_hrd;
38
39static void set_local_port_range(int range[2])
40{
41 write_seqlock(&local_port_range_lock);
42 local_port_range[0] = range[0];
43 local_port_range[1] = range[1];
44 write_sequnlock(&local_port_range_lock);
45}
46
47void phonet_get_local_port_range(int *min, int *max)
48{
49 unsigned seq;
50 do {
51 seq = read_seqbegin(&local_port_range_lock);
52 if (min)
53 *min = local_port_range[0];
54 if (max)
55 *max = local_port_range[1];
56 } while (read_seqretry(&local_port_range_lock, seq));
57}
58
59static int proc_local_port_range(ctl_table *table, int write, struct file *filp,
60 void __user *buffer,
61 size_t *lenp, loff_t *ppos)
62{
63 int ret;
64 int range[2] = {local_port_range[0], local_port_range[1]};
65 ctl_table tmp = {
66 .data = &range,
67 .maxlen = sizeof(range),
68 .mode = table->mode,
69 .extra1 = &local_port_range_min,
70 .extra2 = &local_port_range_max,
71 };
72
73 ret = proc_dointvec_minmax(&tmp, write, filp, buffer, lenp, ppos);
74
75 if (write && ret == 0) {
76 if (range[1] < range[0])
77 ret = -EINVAL;
78 else
79 set_local_port_range(range);
80 }
81
82 return ret;
83}
84
85static struct ctl_table phonet_table[] = {
86 {
87 .ctl_name = CTL_UNNUMBERED,
88 .procname = "local_port_range",
89 .data = &local_port_range,
90 .maxlen = sizeof(local_port_range),
91 .mode = 0644,
92 .proc_handler = &proc_local_port_range,
93 .strategy = NULL,
94 },
95 { .ctl_name = 0 }
96};
97
98struct ctl_path phonet_ctl_path[] = {
99 { .procname = "net", .ctl_name = CTL_NET, },
100 { .procname = "phonet", .ctl_name = CTL_UNNUMBERED, },
101 { },
102};
103
104int __init phonet_sysctl_init(void)
105{
106 phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table);
107 return phonet_table_hrd == NULL ? -ENOMEM : 0;
108}
109
110void phonet_sysctl_exit(void)
111{
112 unregister_sysctl_table(phonet_table_hrd);
113}
diff --git a/net/rfkill/rfkill-input.h b/net/rfkill/rfkill-input.h
index f63d05045685..bbfa646157c6 100644
--- a/net/rfkill/rfkill-input.h
+++ b/net/rfkill/rfkill-input.h
@@ -13,5 +13,6 @@
13 13
14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state); 14void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
15void rfkill_epo(void); 15void rfkill_epo(void);
16void rfkill_restore_states(void);
16 17
17#endif /* __RFKILL_INPUT_H */ 18#endif /* __RFKILL_INPUT_H */
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 74aecc098bad..ea0dc04b3c77 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -37,14 +37,20 @@ MODULE_DESCRIPTION("RF switch support");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39static LIST_HEAD(rfkill_list); /* list of registered rf switches */ 39static LIST_HEAD(rfkill_list); /* list of registered rf switches */
40static DEFINE_MUTEX(rfkill_mutex); 40static DEFINE_MUTEX(rfkill_global_mutex);
41 41
42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED; 42static unsigned int rfkill_default_state = RFKILL_STATE_UNBLOCKED;
43module_param_named(default_state, rfkill_default_state, uint, 0444); 43module_param_named(default_state, rfkill_default_state, uint, 0444);
44MODULE_PARM_DESC(default_state, 44MODULE_PARM_DESC(default_state,
45 "Default initial state for all radio types, 0 = radio off"); 45 "Default initial state for all radio types, 0 = radio off");
46 46
47static enum rfkill_state rfkill_states[RFKILL_TYPE_MAX]; 47struct rfkill_gsw_state {
48 enum rfkill_state current_state;
49 enum rfkill_state default_state;
50};
51
52static struct rfkill_gsw_state rfkill_global_states[RFKILL_TYPE_MAX];
53static unsigned long rfkill_states_lockdflt[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
48 54
49static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list); 55static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
50 56
@@ -70,6 +76,7 @@ static BLOCKING_NOTIFIER_HEAD(rfkill_notifier_list);
70 */ 76 */
71int register_rfkill_notifier(struct notifier_block *nb) 77int register_rfkill_notifier(struct notifier_block *nb)
72{ 78{
79 BUG_ON(!nb);
73 return blocking_notifier_chain_register(&rfkill_notifier_list, nb); 80 return blocking_notifier_chain_register(&rfkill_notifier_list, nb);
74} 81}
75EXPORT_SYMBOL_GPL(register_rfkill_notifier); 82EXPORT_SYMBOL_GPL(register_rfkill_notifier);
@@ -85,6 +92,7 @@ EXPORT_SYMBOL_GPL(register_rfkill_notifier);
85 */ 92 */
86int unregister_rfkill_notifier(struct notifier_block *nb) 93int unregister_rfkill_notifier(struct notifier_block *nb)
87{ 94{
95 BUG_ON(!nb);
88 return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb); 96 return blocking_notifier_chain_unregister(&rfkill_notifier_list, nb);
89} 97}
90EXPORT_SYMBOL_GPL(unregister_rfkill_notifier); 98EXPORT_SYMBOL_GPL(unregister_rfkill_notifier);
@@ -195,6 +203,11 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
195 * BLOCK even a transmitter that is already in state 203 * BLOCK even a transmitter that is already in state
196 * RFKILL_STATE_HARD_BLOCKED */ 204 * RFKILL_STATE_HARD_BLOCKED */
197 break; 205 break;
206 default:
207 WARN(1, KERN_WARNING
208 "rfkill: illegal state %d passed as parameter "
209 "to rfkill_toggle_radio\n", state);
210 return -EINVAL;
198 } 211 }
199 212
200 if (force || state != rfkill->state) { 213 if (force || state != rfkill->state) {
@@ -213,22 +226,29 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
213} 226}
214 227
215/** 228/**
216 * rfkill_switch_all - Toggle state of all switches of given type 229 * __rfkill_switch_all - Toggle state of all switches of given type
217 * @type: type of interfaces to be affected 230 * @type: type of interfaces to be affected
218 * @state: the new state 231 * @state: the new state
219 * 232 *
220 * This function toggles the state of all switches of given type, 233 * This function toggles the state of all switches of given type,
221 * unless a specific switch is claimed by userspace (in which case, 234 * unless a specific switch is claimed by userspace (in which case,
222 * that switch is left alone) or suspended. 235 * that switch is left alone) or suspended.
236 *
237 * Caller must have acquired rfkill_global_mutex.
223 */ 238 */
224void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state) 239static void __rfkill_switch_all(const enum rfkill_type type,
240 const enum rfkill_state state)
225{ 241{
226 struct rfkill *rfkill; 242 struct rfkill *rfkill;
227 243
228 mutex_lock(&rfkill_mutex); 244 if (WARN((state >= RFKILL_STATE_MAX || type >= RFKILL_TYPE_MAX),
229 245 KERN_WARNING
230 rfkill_states[type] = state; 246 "rfkill: illegal state %d or type %d "
247 "passed as parameter to __rfkill_switch_all\n",
248 state, type))
249 return;
231 250
251 rfkill_global_states[type].current_state = state;
232 list_for_each_entry(rfkill, &rfkill_list, node) { 252 list_for_each_entry(rfkill, &rfkill_list, node) {
233 if ((!rfkill->user_claim) && (rfkill->type == type)) { 253 if ((!rfkill->user_claim) && (rfkill->type == type)) {
234 mutex_lock(&rfkill->mutex); 254 mutex_lock(&rfkill->mutex);
@@ -236,8 +256,21 @@ void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
236 mutex_unlock(&rfkill->mutex); 256 mutex_unlock(&rfkill->mutex);
237 } 257 }
238 } 258 }
259}
239 260
240 mutex_unlock(&rfkill_mutex); 261/**
262 * rfkill_switch_all - Toggle state of all switches of given type
263 * @type: type of interfaces to be affected
264 * @state: the new state
265 *
266 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
267 * Please refer to __rfkill_switch_all() for details.
268 */
269void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
270{
271 mutex_lock(&rfkill_global_mutex);
272 __rfkill_switch_all(type, state);
273 mutex_unlock(&rfkill_global_mutex);
241} 274}
242EXPORT_SYMBOL(rfkill_switch_all); 275EXPORT_SYMBOL(rfkill_switch_all);
243 276
@@ -245,23 +278,53 @@ EXPORT_SYMBOL(rfkill_switch_all);
245 * rfkill_epo - emergency power off all transmitters 278 * rfkill_epo - emergency power off all transmitters
246 * 279 *
247 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, 280 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
248 * ignoring everything in its path but rfkill_mutex and rfkill->mutex. 281 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
282 *
283 * The global state before the EPO is saved and can be restored later
284 * using rfkill_restore_states().
249 */ 285 */
250void rfkill_epo(void) 286void rfkill_epo(void)
251{ 287{
252 struct rfkill *rfkill; 288 struct rfkill *rfkill;
289 int i;
290
291 mutex_lock(&rfkill_global_mutex);
253 292
254 mutex_lock(&rfkill_mutex);
255 list_for_each_entry(rfkill, &rfkill_list, node) { 293 list_for_each_entry(rfkill, &rfkill_list, node) {
256 mutex_lock(&rfkill->mutex); 294 mutex_lock(&rfkill->mutex);
257 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); 295 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
258 mutex_unlock(&rfkill->mutex); 296 mutex_unlock(&rfkill->mutex);
259 } 297 }
260 mutex_unlock(&rfkill_mutex); 298 for (i = 0; i < RFKILL_TYPE_MAX; i++) {
299 rfkill_global_states[i].default_state =
300 rfkill_global_states[i].current_state;
301 rfkill_global_states[i].current_state =
302 RFKILL_STATE_SOFT_BLOCKED;
303 }
304 mutex_unlock(&rfkill_global_mutex);
261} 305}
262EXPORT_SYMBOL_GPL(rfkill_epo); 306EXPORT_SYMBOL_GPL(rfkill_epo);
263 307
264/** 308/**
309 * rfkill_restore_states - restore global states
310 *
311 * Restore (and sync switches to) the global state from the
312 * states in rfkill_default_states. This can undo the effects of
313 * a call to rfkill_epo().
314 */
315void rfkill_restore_states(void)
316{
317 int i;
318
319 mutex_lock(&rfkill_global_mutex);
320
321 for (i = 0; i < RFKILL_TYPE_MAX; i++)
322 __rfkill_switch_all(i, rfkill_global_states[i].default_state);
323 mutex_unlock(&rfkill_global_mutex);
324}
325EXPORT_SYMBOL_GPL(rfkill_restore_states);
326
327/**
265 * rfkill_force_state - Force the internal rfkill radio state 328 * rfkill_force_state - Force the internal rfkill radio state
266 * @rfkill: pointer to the rfkill class to modify. 329 * @rfkill: pointer to the rfkill class to modify.
267 * @state: the current radio state the class should be forced to. 330 * @state: the current radio state the class should be forced to.
@@ -282,9 +345,11 @@ int rfkill_force_state(struct rfkill *rfkill, enum rfkill_state state)
282{ 345{
283 enum rfkill_state oldstate; 346 enum rfkill_state oldstate;
284 347
285 if (state != RFKILL_STATE_SOFT_BLOCKED && 348 BUG_ON(!rfkill);
286 state != RFKILL_STATE_UNBLOCKED && 349 if (WARN((state >= RFKILL_STATE_MAX),
287 state != RFKILL_STATE_HARD_BLOCKED) 350 KERN_WARNING
351 "rfkill: illegal state %d passed as parameter "
352 "to rfkill_force_state\n", state))
288 return -EINVAL; 353 return -EINVAL;
289 354
290 mutex_lock(&rfkill->mutex); 355 mutex_lock(&rfkill->mutex);
@@ -352,12 +417,16 @@ static ssize_t rfkill_state_store(struct device *dev,
352 const char *buf, size_t count) 417 const char *buf, size_t count)
353{ 418{
354 struct rfkill *rfkill = to_rfkill(dev); 419 struct rfkill *rfkill = to_rfkill(dev);
355 unsigned int state = simple_strtoul(buf, NULL, 0); 420 unsigned long state;
356 int error; 421 int error;
357 422
358 if (!capable(CAP_NET_ADMIN)) 423 if (!capable(CAP_NET_ADMIN))
359 return -EPERM; 424 return -EPERM;
360 425
426 error = strict_strtoul(buf, 0, &state);
427 if (error)
428 return error;
429
361 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */ 430 /* RFKILL_STATE_HARD_BLOCKED is illegal here... */
362 if (state != RFKILL_STATE_UNBLOCKED && 431 if (state != RFKILL_STATE_UNBLOCKED &&
363 state != RFKILL_STATE_SOFT_BLOCKED) 432 state != RFKILL_STATE_SOFT_BLOCKED)
@@ -385,7 +454,8 @@ static ssize_t rfkill_claim_store(struct device *dev,
385 const char *buf, size_t count) 454 const char *buf, size_t count)
386{ 455{
387 struct rfkill *rfkill = to_rfkill(dev); 456 struct rfkill *rfkill = to_rfkill(dev);
388 bool claim = !!simple_strtoul(buf, NULL, 0); 457 unsigned long claim_tmp;
458 bool claim;
389 int error; 459 int error;
390 460
391 if (!capable(CAP_NET_ADMIN)) 461 if (!capable(CAP_NET_ADMIN))
@@ -394,11 +464,16 @@ static ssize_t rfkill_claim_store(struct device *dev,
394 if (rfkill->user_claim_unsupported) 464 if (rfkill->user_claim_unsupported)
395 return -EOPNOTSUPP; 465 return -EOPNOTSUPP;
396 466
467 error = strict_strtoul(buf, 0, &claim_tmp);
468 if (error)
469 return error;
470 claim = !!claim_tmp;
471
397 /* 472 /*
398 * Take the global lock to make sure the kernel is not in 473 * Take the global lock to make sure the kernel is not in
399 * the middle of rfkill_switch_all 474 * the middle of rfkill_switch_all
400 */ 475 */
401 error = mutex_lock_interruptible(&rfkill_mutex); 476 error = mutex_lock_interruptible(&rfkill_global_mutex);
402 if (error) 477 if (error)
403 return error; 478 return error;
404 479
@@ -406,14 +481,14 @@ static ssize_t rfkill_claim_store(struct device *dev,
406 if (!claim) { 481 if (!claim) {
407 mutex_lock(&rfkill->mutex); 482 mutex_lock(&rfkill->mutex);
408 rfkill_toggle_radio(rfkill, 483 rfkill_toggle_radio(rfkill,
409 rfkill_states[rfkill->type], 484 rfkill_global_states[rfkill->type].current_state,
410 0); 485 0);
411 mutex_unlock(&rfkill->mutex); 486 mutex_unlock(&rfkill->mutex);
412 } 487 }
413 rfkill->user_claim = claim; 488 rfkill->user_claim = claim;
414 } 489 }
415 490
416 mutex_unlock(&rfkill_mutex); 491 mutex_unlock(&rfkill_global_mutex);
417 492
418 return error ? error : count; 493 return error ? error : count;
419} 494}
@@ -437,21 +512,9 @@ static void rfkill_release(struct device *dev)
437#ifdef CONFIG_PM 512#ifdef CONFIG_PM
438static int rfkill_suspend(struct device *dev, pm_message_t state) 513static int rfkill_suspend(struct device *dev, pm_message_t state)
439{ 514{
440 struct rfkill *rfkill = to_rfkill(dev); 515 /* mark class device as suspended */
441 516 if (dev->power.power_state.event != state.event)
442 if (dev->power.power_state.event != state.event) {
443 if (state.event & PM_EVENT_SLEEP) {
444 /* Stop transmitter, keep state, no notifies */
445 update_rfkill_state(rfkill);
446
447 mutex_lock(&rfkill->mutex);
448 rfkill->toggle_radio(rfkill->data,
449 RFKILL_STATE_SOFT_BLOCKED);
450 mutex_unlock(&rfkill->mutex);
451 }
452
453 dev->power.power_state = state; 517 dev->power.power_state = state;
454 }
455 518
456 return 0; 519 return 0;
457} 520}
@@ -525,24 +588,60 @@ static struct class rfkill_class = {
525 .dev_uevent = rfkill_dev_uevent, 588 .dev_uevent = rfkill_dev_uevent,
526}; 589};
527 590
591static int rfkill_check_duplicity(const struct rfkill *rfkill)
592{
593 struct rfkill *p;
594 unsigned long seen[BITS_TO_LONGS(RFKILL_TYPE_MAX)];
595
596 memset(seen, 0, sizeof(seen));
597
598 list_for_each_entry(p, &rfkill_list, node) {
599 if (WARN((p == rfkill), KERN_WARNING
600 "rfkill: illegal attempt to register "
601 "an already registered rfkill struct\n"))
602 return -EEXIST;
603 set_bit(p->type, seen);
604 }
605
606 /* 0: first switch of its kind */
607 return test_bit(rfkill->type, seen);
608}
609
528static int rfkill_add_switch(struct rfkill *rfkill) 610static int rfkill_add_switch(struct rfkill *rfkill)
529{ 611{
530 mutex_lock(&rfkill_mutex); 612 int error;
613
614 mutex_lock(&rfkill_global_mutex);
615
616 error = rfkill_check_duplicity(rfkill);
617 if (error < 0)
618 goto unlock_out;
531 619
532 rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type], 0); 620 if (!error) {
621 /* lock default after first use */
622 set_bit(rfkill->type, rfkill_states_lockdflt);
623 rfkill_global_states[rfkill->type].current_state =
624 rfkill_global_states[rfkill->type].default_state;
625 }
626
627 rfkill_toggle_radio(rfkill,
628 rfkill_global_states[rfkill->type].current_state,
629 0);
533 630
534 list_add_tail(&rfkill->node, &rfkill_list); 631 list_add_tail(&rfkill->node, &rfkill_list);
535 632
536 mutex_unlock(&rfkill_mutex); 633 error = 0;
634unlock_out:
635 mutex_unlock(&rfkill_global_mutex);
537 636
538 return 0; 637 return error;
539} 638}
540 639
541static void rfkill_remove_switch(struct rfkill *rfkill) 640static void rfkill_remove_switch(struct rfkill *rfkill)
542{ 641{
543 mutex_lock(&rfkill_mutex); 642 mutex_lock(&rfkill_global_mutex);
544 list_del_init(&rfkill->node); 643 list_del_init(&rfkill->node);
545 mutex_unlock(&rfkill_mutex); 644 mutex_unlock(&rfkill_global_mutex);
546 645
547 mutex_lock(&rfkill->mutex); 646 mutex_lock(&rfkill->mutex);
548 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1); 647 rfkill_toggle_radio(rfkill, RFKILL_STATE_SOFT_BLOCKED, 1);
@@ -562,11 +661,18 @@ static void rfkill_remove_switch(struct rfkill *rfkill)
562 * NOTE: If registration fails the structure shoudl be freed by calling 661 * NOTE: If registration fails the structure shoudl be freed by calling
563 * rfkill_free() otherwise rfkill_unregister() should be used. 662 * rfkill_free() otherwise rfkill_unregister() should be used.
564 */ 663 */
565struct rfkill *rfkill_allocate(struct device *parent, enum rfkill_type type) 664struct rfkill * __must_check rfkill_allocate(struct device *parent,
665 enum rfkill_type type)
566{ 666{
567 struct rfkill *rfkill; 667 struct rfkill *rfkill;
568 struct device *dev; 668 struct device *dev;
569 669
670 if (WARN((type >= RFKILL_TYPE_MAX),
671 KERN_WARNING
672 "rfkill: illegal type %d passed as parameter "
673 "to rfkill_allocate\n", type))
674 return NULL;
675
570 rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL); 676 rfkill = kzalloc(sizeof(struct rfkill), GFP_KERNEL);
571 if (!rfkill) 677 if (!rfkill)
572 return NULL; 678 return NULL;
@@ -633,15 +739,18 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
633 * structure needs to be registered. Immediately from registration the 739 * structure needs to be registered. Immediately from registration the
634 * switch driver should be able to service calls to toggle_radio. 740 * switch driver should be able to service calls to toggle_radio.
635 */ 741 */
636int rfkill_register(struct rfkill *rfkill) 742int __must_check rfkill_register(struct rfkill *rfkill)
637{ 743{
638 static atomic_t rfkill_no = ATOMIC_INIT(0); 744 static atomic_t rfkill_no = ATOMIC_INIT(0);
639 struct device *dev = &rfkill->dev; 745 struct device *dev = &rfkill->dev;
640 int error; 746 int error;
641 747
642 if (!rfkill->toggle_radio) 748 if (WARN((!rfkill || !rfkill->toggle_radio ||
643 return -EINVAL; 749 rfkill->type >= RFKILL_TYPE_MAX ||
644 if (rfkill->type >= RFKILL_TYPE_MAX) 750 rfkill->state >= RFKILL_STATE_MAX),
751 KERN_WARNING
752 "rfkill: attempt to register a "
753 "badly initialized rfkill struct\n"))
645 return -EINVAL; 754 return -EINVAL;
646 755
647 snprintf(dev->bus_id, sizeof(dev->bus_id), 756 snprintf(dev->bus_id, sizeof(dev->bus_id),
@@ -676,6 +785,7 @@ EXPORT_SYMBOL(rfkill_register);
676 */ 785 */
677void rfkill_unregister(struct rfkill *rfkill) 786void rfkill_unregister(struct rfkill *rfkill)
678{ 787{
788 BUG_ON(!rfkill);
679 device_del(&rfkill->dev); 789 device_del(&rfkill->dev);
680 rfkill_remove_switch(rfkill); 790 rfkill_remove_switch(rfkill);
681 rfkill_led_trigger_unregister(rfkill); 791 rfkill_led_trigger_unregister(rfkill);
@@ -683,6 +793,56 @@ void rfkill_unregister(struct rfkill *rfkill)
683} 793}
684EXPORT_SYMBOL(rfkill_unregister); 794EXPORT_SYMBOL(rfkill_unregister);
685 795
796/**
797 * rfkill_set_default - set initial value for a switch type
798 * @type - the type of switch to set the default state of
799 * @state - the new default state for that group of switches
800 *
801 * Sets the initial state rfkill should use for a given type.
802 * The following initial states are allowed: RFKILL_STATE_SOFT_BLOCKED
803 * and RFKILL_STATE_UNBLOCKED.
804 *
805 * This function is meant to be used by platform drivers for platforms
806 * that can save switch state across power down/reboot.
807 *
808 * The default state for each switch type can be changed exactly once.
809 * After a switch of that type is registered, the default state cannot
810 * be changed anymore. This guards against multiple drivers it the
811 * same platform trying to set the initial switch default state, which
812 * is not allowed.
813 *
814 * Returns -EPERM if the state has already been set once or is in use,
815 * so drivers likely want to either ignore or at most printk(KERN_NOTICE)
816 * if this function returns -EPERM.
817 *
818 * Returns 0 if the new default state was set, or an error if it
819 * could not be set.
820 */
821int rfkill_set_default(enum rfkill_type type, enum rfkill_state state)
822{
823 int error;
824
825 if (WARN((type >= RFKILL_TYPE_MAX ||
826 (state != RFKILL_STATE_SOFT_BLOCKED &&
827 state != RFKILL_STATE_UNBLOCKED)),
828 KERN_WARNING
829 "rfkill: illegal state %d or type %d passed as "
830 "parameter to rfkill_set_default\n", state, type))
831 return -EINVAL;
832
833 mutex_lock(&rfkill_global_mutex);
834
835 if (!test_and_set_bit(type, rfkill_states_lockdflt)) {
836 rfkill_global_states[type].default_state = state;
837 error = 0;
838 } else
839 error = -EPERM;
840
841 mutex_unlock(&rfkill_global_mutex);
842 return error;
843}
844EXPORT_SYMBOL_GPL(rfkill_set_default);
845
686/* 846/*
687 * Rfkill module initialization/deinitialization. 847 * Rfkill module initialization/deinitialization.
688 */ 848 */
@@ -696,8 +856,8 @@ static int __init rfkill_init(void)
696 rfkill_default_state != RFKILL_STATE_UNBLOCKED) 856 rfkill_default_state != RFKILL_STATE_UNBLOCKED)
697 return -EINVAL; 857 return -EINVAL;
698 858
699 for (i = 0; i < ARRAY_SIZE(rfkill_states); i++) 859 for (i = 0; i < RFKILL_TYPE_MAX; i++)
700 rfkill_states[i] = rfkill_default_state; 860 rfkill_global_states[i].default_state = rfkill_default_state;
701 861
702 error = class_register(&rfkill_class); 862 error = class_register(&rfkill_class);
703 if (error) { 863 if (error) {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 9437b27ff84d..6767e54155db 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -106,6 +106,15 @@ config NET_SCH_PRIO
106 To compile this code as a module, choose M here: the 106 To compile this code as a module, choose M here: the
107 module will be called sch_prio. 107 module will be called sch_prio.
108 108
109config NET_SCH_MULTIQ
110 tristate "Hardware Multiqueue-aware Multi Band Queuing (MULTIQ)"
111 ---help---
112 Say Y here if you want to use an n-band queue packet scheduler
113 to support devices that have multiple hardware transmit queues.
114
115 To compile this code as a module, choose M here: the
116 module will be called sch_multiq.
117
109config NET_SCH_RED 118config NET_SCH_RED
110 tristate "Random Early Detection (RED)" 119 tristate "Random Early Detection (RED)"
111 ---help--- 120 ---help---
@@ -476,6 +485,17 @@ config NET_ACT_SIMP
476 To compile this code as a module, choose M here: the 485 To compile this code as a module, choose M here: the
477 module will be called simple. 486 module will be called simple.
478 487
488config NET_ACT_SKBEDIT
489 tristate "SKB Editing"
490 depends on NET_CLS_ACT
491 ---help---
492 Say Y here to change skb priority or queue_mapping settings.
493
494 If unsure, say N.
495
496 To compile this code as a module, choose M here: the
497 module will be called skbedit.
498
479config NET_CLS_IND 499config NET_CLS_IND
480 bool "Incoming device classification" 500 bool "Incoming device classification"
481 depends on NET_CLS_U32 || NET_CLS_FW 501 depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 1d2b0f7df848..e60c9925b269 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o
14obj-$(CONFIG_NET_ACT_NAT) += act_nat.o 14obj-$(CONFIG_NET_ACT_NAT) += act_nat.o
15obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o 15obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
16obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o 16obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
17obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
17obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o 18obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
18obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o 19obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
19obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o 20obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
26obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o 27obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
27obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o 28obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
28obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o 29obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
30obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
29obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o 31obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
30obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o 32obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
31obj-$(CONFIG_NET_CLS_U32) += cls_u32.o 33obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
new file mode 100644
index 000000000000..fe9777e77f35
--- /dev/null
+++ b/net/sched/act_skbedit.c
@@ -0,0 +1,203 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/skbuff.h>
24#include <linux/rtnetlink.h>
25#include <net/netlink.h>
26#include <net/pkt_sched.h>
27
28#include <linux/tc_act/tc_skbedit.h>
29#include <net/tc_act/tc_skbedit.h>
30
31#define SKBEDIT_TAB_MASK 15
32static struct tcf_common *tcf_skbedit_ht[SKBEDIT_TAB_MASK + 1];
33static u32 skbedit_idx_gen;
34static DEFINE_RWLOCK(skbedit_lock);
35
36static struct tcf_hashinfo skbedit_hash_info = {
37 .htab = tcf_skbedit_ht,
38 .hmask = SKBEDIT_TAB_MASK,
39 .lock = &skbedit_lock,
40};
41
42static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
43 struct tcf_result *res)
44{
45 struct tcf_skbedit *d = a->priv;
46
47 spin_lock(&d->tcf_lock);
48 d->tcf_tm.lastuse = jiffies;
49 d->tcf_bstats.bytes += qdisc_pkt_len(skb);
50 d->tcf_bstats.packets++;
51
52 if (d->flags & SKBEDIT_F_PRIORITY)
53 skb->priority = d->priority;
54 if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
55 skb->dev->real_num_tx_queues > d->queue_mapping)
56 skb_set_queue_mapping(skb, d->queue_mapping);
57
58 spin_unlock(&d->tcf_lock);
59 return d->tcf_action;
60}
61
62static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
63 [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) },
64 [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) },
65 [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) },
66};
67
68static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
69 struct tc_action *a, int ovr, int bind)
70{
71 struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
72 struct tc_skbedit *parm;
73 struct tcf_skbedit *d;
74 struct tcf_common *pc;
75 u32 flags = 0, *priority = NULL;
76 u16 *queue_mapping = NULL;
77 int ret = 0, err;
78
79 if (nla == NULL)
80 return -EINVAL;
81
82 err = nla_parse_nested(tb, TCA_SKBEDIT_MAX, nla, skbedit_policy);
83 if (err < 0)
84 return err;
85
86 if (tb[TCA_SKBEDIT_PARMS] == NULL)
87 return -EINVAL;
88
89 if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
90 flags |= SKBEDIT_F_PRIORITY;
91 priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
92 }
93
94 if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
95 flags |= SKBEDIT_F_QUEUE_MAPPING;
96 queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
97 }
98 if (!flags)
99 return -EINVAL;
100
101 parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
102
103 pc = tcf_hash_check(parm->index, a, bind, &skbedit_hash_info);
104 if (!pc) {
105 pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
106 &skbedit_idx_gen, &skbedit_hash_info);
107 if (unlikely(!pc))
108 return -ENOMEM;
109
110 d = to_skbedit(pc);
111 ret = ACT_P_CREATED;
112 } else {
113 d = to_skbedit(pc);
114 if (!ovr) {
115 tcf_hash_release(pc, bind, &skbedit_hash_info);
116 return -EEXIST;
117 }
118 }
119
120 spin_lock_bh(&d->tcf_lock);
121
122 d->flags = flags;
123 if (flags & SKBEDIT_F_PRIORITY)
124 d->priority = *priority;
125 if (flags & SKBEDIT_F_QUEUE_MAPPING)
126 d->queue_mapping = *queue_mapping;
127 d->tcf_action = parm->action;
128
129 spin_unlock_bh(&d->tcf_lock);
130
131 if (ret == ACT_P_CREATED)
132 tcf_hash_insert(pc, &skbedit_hash_info);
133 return ret;
134}
135
136static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
137{
138 struct tcf_skbedit *d = a->priv;
139
140 if (d)
141 return tcf_hash_release(&d->common, bind, &skbedit_hash_info);
142 return 0;
143}
144
145static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
146 int bind, int ref)
147{
148 unsigned char *b = skb_tail_pointer(skb);
149 struct tcf_skbedit *d = a->priv;
150 struct tc_skbedit opt;
151 struct tcf_t t;
152
153 opt.index = d->tcf_index;
154 opt.refcnt = d->tcf_refcnt - ref;
155 opt.bindcnt = d->tcf_bindcnt - bind;
156 opt.action = d->tcf_action;
157 NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
158 if (d->flags & SKBEDIT_F_PRIORITY)
159 NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
160 &d->priority);
161 if (d->flags & SKBEDIT_F_QUEUE_MAPPING)
162 NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING,
163 sizeof(d->queue_mapping), &d->queue_mapping);
164 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
165 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
166 t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
167 NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t);
168 return skb->len;
169
170nla_put_failure:
171 nlmsg_trim(skb, b);
172 return -1;
173}
174
175static struct tc_action_ops act_skbedit_ops = {
176 .kind = "skbedit",
177 .hinfo = &skbedit_hash_info,
178 .type = TCA_ACT_SKBEDIT,
179 .capab = TCA_CAP_NONE,
180 .owner = THIS_MODULE,
181 .act = tcf_skbedit,
182 .dump = tcf_skbedit_dump,
183 .cleanup = tcf_skbedit_cleanup,
184 .init = tcf_skbedit_init,
185 .walk = tcf_generic_walker,
186};
187
188MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
189MODULE_DESCRIPTION("SKB Editing");
190MODULE_LICENSE("GPL");
191
192static int __init skbedit_init_module(void)
193{
194 return tcf_register_action(&act_skbedit_ops);
195}
196
197static void __exit skbedit_cleanup_module(void)
198{
199 tcf_unregister_action(&act_skbedit_ops);
200}
201
202module_init(skbedit_init_module);
203module_exit(skbedit_cleanup_module);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 8f63a1a94014..0ebaff637e31 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -67,9 +67,9 @@ static inline u32 addr_fold(void *addr)
67static u32 flow_get_src(const struct sk_buff *skb) 67static u32 flow_get_src(const struct sk_buff *skb)
68{ 68{
69 switch (skb->protocol) { 69 switch (skb->protocol) {
70 case __constant_htons(ETH_P_IP): 70 case htons(ETH_P_IP):
71 return ntohl(ip_hdr(skb)->saddr); 71 return ntohl(ip_hdr(skb)->saddr);
72 case __constant_htons(ETH_P_IPV6): 72 case htons(ETH_P_IPV6):
73 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]); 73 return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]);
74 default: 74 default:
75 return addr_fold(skb->sk); 75 return addr_fold(skb->sk);
@@ -79,9 +79,9 @@ static u32 flow_get_src(const struct sk_buff *skb)
79static u32 flow_get_dst(const struct sk_buff *skb) 79static u32 flow_get_dst(const struct sk_buff *skb)
80{ 80{
81 switch (skb->protocol) { 81 switch (skb->protocol) {
82 case __constant_htons(ETH_P_IP): 82 case htons(ETH_P_IP):
83 return ntohl(ip_hdr(skb)->daddr); 83 return ntohl(ip_hdr(skb)->daddr);
84 case __constant_htons(ETH_P_IPV6): 84 case htons(ETH_P_IPV6):
85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
86 default: 86 default:
87 return addr_fold(skb->dst) ^ (__force u16)skb->protocol; 87 return addr_fold(skb->dst) ^ (__force u16)skb->protocol;
@@ -91,9 +91,9 @@ static u32 flow_get_dst(const struct sk_buff *skb)
91static u32 flow_get_proto(const struct sk_buff *skb) 91static u32 flow_get_proto(const struct sk_buff *skb)
92{ 92{
93 switch (skb->protocol) { 93 switch (skb->protocol) {
94 case __constant_htons(ETH_P_IP): 94 case htons(ETH_P_IP):
95 return ip_hdr(skb)->protocol; 95 return ip_hdr(skb)->protocol;
96 case __constant_htons(ETH_P_IPV6): 96 case htons(ETH_P_IPV6):
97 return ipv6_hdr(skb)->nexthdr; 97 return ipv6_hdr(skb)->nexthdr;
98 default: 98 default:
99 return 0; 99 return 0;
@@ -120,7 +120,7 @@ static u32 flow_get_proto_src(const struct sk_buff *skb)
120 u32 res = 0; 120 u32 res = 0;
121 121
122 switch (skb->protocol) { 122 switch (skb->protocol) {
123 case __constant_htons(ETH_P_IP): { 123 case htons(ETH_P_IP): {
124 struct iphdr *iph = ip_hdr(skb); 124 struct iphdr *iph = ip_hdr(skb);
125 125
126 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 126 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
@@ -128,7 +128,7 @@ static u32 flow_get_proto_src(const struct sk_buff *skb)
128 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4)); 128 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4));
129 break; 129 break;
130 } 130 }
131 case __constant_htons(ETH_P_IPV6): { 131 case htons(ETH_P_IPV6): {
132 struct ipv6hdr *iph = ipv6_hdr(skb); 132 struct ipv6hdr *iph = ipv6_hdr(skb);
133 133
134 if (has_ports(iph->nexthdr)) 134 if (has_ports(iph->nexthdr))
@@ -147,7 +147,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
147 u32 res = 0; 147 u32 res = 0;
148 148
149 switch (skb->protocol) { 149 switch (skb->protocol) {
150 case __constant_htons(ETH_P_IP): { 150 case htons(ETH_P_IP): {
151 struct iphdr *iph = ip_hdr(skb); 151 struct iphdr *iph = ip_hdr(skb);
152 152
153 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 153 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
@@ -155,7 +155,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
155 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2)); 155 res = ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2));
156 break; 156 break;
157 } 157 }
158 case __constant_htons(ETH_P_IPV6): { 158 case htons(ETH_P_IPV6): {
159 struct ipv6hdr *iph = ipv6_hdr(skb); 159 struct ipv6hdr *iph = ipv6_hdr(skb);
160 160
161 if (has_ports(iph->nexthdr)) 161 if (has_ports(iph->nexthdr))
@@ -213,9 +213,9 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
213static u32 flow_get_nfct_src(const struct sk_buff *skb) 213static u32 flow_get_nfct_src(const struct sk_buff *skb)
214{ 214{
215 switch (skb->protocol) { 215 switch (skb->protocol) {
216 case __constant_htons(ETH_P_IP): 216 case htons(ETH_P_IP):
217 return ntohl(CTTUPLE(skb, src.u3.ip)); 217 return ntohl(CTTUPLE(skb, src.u3.ip));
218 case __constant_htons(ETH_P_IPV6): 218 case htons(ETH_P_IPV6):
219 return ntohl(CTTUPLE(skb, src.u3.ip6[3])); 219 return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
220 } 220 }
221fallback: 221fallback:
@@ -225,9 +225,9 @@ fallback:
225static u32 flow_get_nfct_dst(const struct sk_buff *skb) 225static u32 flow_get_nfct_dst(const struct sk_buff *skb)
226{ 226{
227 switch (skb->protocol) { 227 switch (skb->protocol) {
228 case __constant_htons(ETH_P_IP): 228 case htons(ETH_P_IP):
229 return ntohl(CTTUPLE(skb, dst.u3.ip)); 229 return ntohl(CTTUPLE(skb, dst.u3.ip));
230 case __constant_htons(ETH_P_IPV6): 230 case htons(ETH_P_IPV6):
231 return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); 231 return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
232 } 232 }
233fallback: 233fallback:
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index cc49c932641d..bc450397487a 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -14,6 +14,7 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/tc_ematch/tc_em_cmp.h> 16#include <linux/tc_ematch/tc_em_cmp.h>
17#include <asm/unaligned.h>
17#include <net/pkt_cls.h> 18#include <net/pkt_cls.h>
18 19
19static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp) 20static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp)
@@ -37,8 +38,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
37 break; 38 break;
38 39
39 case TCF_EM_ALIGN_U16: 40 case TCF_EM_ALIGN_U16:
40 val = *ptr << 8; 41 val = get_unaligned_be16(ptr);
41 val |= *(ptr+1);
42 42
43 if (cmp_needs_transformation(cmp)) 43 if (cmp_needs_transformation(cmp))
44 val = be16_to_cpu(val); 44 val = be16_to_cpu(val);
@@ -47,10 +47,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
47 case TCF_EM_ALIGN_U32: 47 case TCF_EM_ALIGN_U32:
48 /* Worth checking boundries? The branching seems 48 /* Worth checking boundries? The branching seems
49 * to get worse. Visit again. */ 49 * to get worse. Visit again. */
50 val = *ptr << 24; 50 val = get_unaligned_be32(ptr);
51 val |= *(ptr+1) << 16;
52 val |= *(ptr+2) << 8;
53 val |= *(ptr+3);
54 51
55 if (cmp_needs_transformation(cmp)) 52 if (cmp_needs_transformation(cmp))
56 val = be32_to_cpu(val); 53 val = be32_to_cpu(val);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index edd1298f85f6..ba43aab3a851 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -202,7 +202,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
202 202
203 if (p->set_tc_index) { 203 if (p->set_tc_index) {
204 switch (skb->protocol) { 204 switch (skb->protocol) {
205 case __constant_htons(ETH_P_IP): 205 case htons(ETH_P_IP):
206 if (skb_cow_head(skb, sizeof(struct iphdr))) 206 if (skb_cow_head(skb, sizeof(struct iphdr)))
207 goto drop; 207 goto drop;
208 208
@@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
210 & ~INET_ECN_MASK; 210 & ~INET_ECN_MASK;
211 break; 211 break;
212 212
213 case __constant_htons(ETH_P_IPV6): 213 case htons(ETH_P_IPV6):
214 if (skb_cow_head(skb, sizeof(struct ipv6hdr))) 214 if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
215 goto drop; 215 goto drop;
216 216
@@ -289,11 +289,11 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
289 pr_debug("index %d->%d\n", skb->tc_index, index); 289 pr_debug("index %d->%d\n", skb->tc_index, index);
290 290
291 switch (skb->protocol) { 291 switch (skb->protocol) {
292 case __constant_htons(ETH_P_IP): 292 case htons(ETH_P_IP):
293 ipv4_change_dsfield(ip_hdr(skb), p->mask[index], 293 ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
294 p->value[index]); 294 p->value[index]);
295 break; 295 break;
296 case __constant_htons(ETH_P_IPV6): 296 case htons(ETH_P_IPV6):
297 ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index], 297 ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index],
298 p->value[index]); 298 p->value[index]);
299 break; 299 break;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ec0a0839ce51..5e7e0bd38fe8 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -44,10 +44,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
44 44
45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) 45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
46{ 46{
47 if (unlikely(skb->next)) 47 __skb_queue_head(&q->requeue, skb);
48 q->gso_skb = skb;
49 else
50 q->ops->requeue(skb, q);
51 48
52 __netif_schedule(q); 49 __netif_schedule(q);
53 return 0; 50 return 0;
@@ -55,12 +52,21 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
55 52
56static inline struct sk_buff *dequeue_skb(struct Qdisc *q) 53static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
57{ 54{
58 struct sk_buff *skb; 55 struct sk_buff *skb = skb_peek(&q->requeue);
56
57 if (unlikely(skb)) {
58 struct net_device *dev = qdisc_dev(q);
59 struct netdev_queue *txq;
59 60
60 if ((skb = q->gso_skb)) 61 /* check the reason of requeuing without tx lock first */
61 q->gso_skb = NULL; 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
62 else 63 if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
64 __skb_unlink(skb, &q->requeue);
65 else
66 skb = NULL;
67 } else {
63 skb = q->dequeue(q); 68 skb = q->dequeue(q);
69 }
64 70
65 return skb; 71 return skb;
66} 72}
@@ -327,6 +333,7 @@ struct Qdisc noop_qdisc = {
327 .flags = TCQ_F_BUILTIN, 333 .flags = TCQ_F_BUILTIN,
328 .ops = &noop_qdisc_ops, 334 .ops = &noop_qdisc_ops,
329 .list = LIST_HEAD_INIT(noop_qdisc.list), 335 .list = LIST_HEAD_INIT(noop_qdisc.list),
336 .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
330 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), 337 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
331 .dev_queue = &noop_netdev_queue, 338 .dev_queue = &noop_netdev_queue,
332}; 339};
@@ -352,6 +359,7 @@ static struct Qdisc noqueue_qdisc = {
352 .flags = TCQ_F_BUILTIN, 359 .flags = TCQ_F_BUILTIN,
353 .ops = &noqueue_qdisc_ops, 360 .ops = &noqueue_qdisc_ops,
354 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 361 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
362 .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
355 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), 363 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
356 .dev_queue = &noqueue_netdev_queue, 364 .dev_queue = &noqueue_netdev_queue,
357}; 365};
@@ -472,6 +480,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
472 sch->padded = (char *) sch - (char *) p; 480 sch->padded = (char *) sch - (char *) p;
473 481
474 INIT_LIST_HEAD(&sch->list); 482 INIT_LIST_HEAD(&sch->list);
483 skb_queue_head_init(&sch->requeue);
475 skb_queue_head_init(&sch->q); 484 skb_queue_head_init(&sch->q);
476 sch->ops = ops; 485 sch->ops = ops;
477 sch->enqueue = ops->enqueue; 486 sch->enqueue = ops->enqueue;
@@ -539,7 +548,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
539 module_put(ops->owner); 548 module_put(ops->owner);
540 dev_put(qdisc_dev(qdisc)); 549 dev_put(qdisc_dev(qdisc));
541 550
542 kfree_skb(qdisc->gso_skb); 551 __skb_queue_purge(&qdisc->requeue);
543 552
544 kfree((char *) qdisc - qdisc->padded); 553 kfree((char *) qdisc - qdisc->padded);
545} 554}
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
new file mode 100644
index 000000000000..915f3149dde2
--- /dev/null
+++ b/net/sched/sch_multiq.c
@@ -0,0 +1,477 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/skbuff.h>
26#include <net/netlink.h>
27#include <net/pkt_sched.h>
28
29
30struct multiq_sched_data {
31 u16 bands;
32 u16 max_bands;
33 u16 curband;
34 struct tcf_proto *filter_list;
35 struct Qdisc **queues;
36};
37
38
39static struct Qdisc *
40multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
41{
42 struct multiq_sched_data *q = qdisc_priv(sch);
43 u32 band;
44 struct tcf_result res;
45 int err;
46
47 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
48 err = tc_classify(skb, q->filter_list, &res);
49#ifdef CONFIG_NET_CLS_ACT
50 switch (err) {
51 case TC_ACT_STOLEN:
52 case TC_ACT_QUEUED:
53 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
54 case TC_ACT_SHOT:
55 return NULL;
56 }
57#endif
58 band = skb_get_queue_mapping(skb);
59
60 if (band >= q->bands)
61 return q->queues[0];
62
63 return q->queues[band];
64}
65
66static int
67multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
68{
69 struct Qdisc *qdisc;
70 int ret;
71
72 qdisc = multiq_classify(skb, sch, &ret);
73#ifdef CONFIG_NET_CLS_ACT
74 if (qdisc == NULL) {
75
76 if (ret & __NET_XMIT_BYPASS)
77 sch->qstats.drops++;
78 kfree_skb(skb);
79 return ret;
80 }
81#endif
82
83 ret = qdisc_enqueue(skb, qdisc);
84 if (ret == NET_XMIT_SUCCESS) {
85 sch->bstats.bytes += qdisc_pkt_len(skb);
86 sch->bstats.packets++;
87 sch->q.qlen++;
88 return NET_XMIT_SUCCESS;
89 }
90 if (net_xmit_drop_count(ret))
91 sch->qstats.drops++;
92 return ret;
93}
94
95
96static int
97multiq_requeue(struct sk_buff *skb, struct Qdisc *sch)
98{
99 struct Qdisc *qdisc;
100 struct multiq_sched_data *q = qdisc_priv(sch);
101 int ret;
102
103 qdisc = multiq_classify(skb, sch, &ret);
104#ifdef CONFIG_NET_CLS_ACT
105 if (qdisc == NULL) {
106 if (ret & __NET_XMIT_BYPASS)
107 sch->qstats.drops++;
108 kfree_skb(skb);
109 return ret;
110 }
111#endif
112
113 ret = qdisc->ops->requeue(skb, qdisc);
114 if (ret == NET_XMIT_SUCCESS) {
115 sch->q.qlen++;
116 sch->qstats.requeues++;
117 if (q->curband)
118 q->curband--;
119 else
120 q->curband = q->bands - 1;
121 return NET_XMIT_SUCCESS;
122 }
123 if (net_xmit_drop_count(ret))
124 sch->qstats.drops++;
125 return ret;
126}
127
128
129static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
130{
131 struct multiq_sched_data *q = qdisc_priv(sch);
132 struct Qdisc *qdisc;
133 struct sk_buff *skb;
134 int band;
135
136 for (band = 0; band < q->bands; band++) {
137 /* cycle through bands to ensure fairness */
138 q->curband++;
139 if (q->curband >= q->bands)
140 q->curband = 0;
141
142 /* Check that target subqueue is available before
143 * pulling an skb to avoid excessive requeues
144 */
145 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
146 qdisc = q->queues[q->curband];
147 skb = qdisc->dequeue(qdisc);
148 if (skb) {
149 sch->q.qlen--;
150 return skb;
151 }
152 }
153 }
154 return NULL;
155
156}
157
158static unsigned int multiq_drop(struct Qdisc *sch)
159{
160 struct multiq_sched_data *q = qdisc_priv(sch);
161 int band;
162 unsigned int len;
163 struct Qdisc *qdisc;
164
165 for (band = q->bands-1; band >= 0; band--) {
166 qdisc = q->queues[band];
167 if (qdisc->ops->drop) {
168 len = qdisc->ops->drop(qdisc);
169 if (len != 0) {
170 sch->q.qlen--;
171 return len;
172 }
173 }
174 }
175 return 0;
176}
177
178
179static void
180multiq_reset(struct Qdisc *sch)
181{
182 u16 band;
183 struct multiq_sched_data *q = qdisc_priv(sch);
184
185 for (band = 0; band < q->bands; band++)
186 qdisc_reset(q->queues[band]);
187 sch->q.qlen = 0;
188 q->curband = 0;
189}
190
191static void
192multiq_destroy(struct Qdisc *sch)
193{
194 int band;
195 struct multiq_sched_data *q = qdisc_priv(sch);
196
197 tcf_destroy_chain(&q->filter_list);
198 for (band = 0; band < q->bands; band++)
199 qdisc_destroy(q->queues[band]);
200
201 kfree(q->queues);
202}
203
204static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
205{
206 struct multiq_sched_data *q = qdisc_priv(sch);
207 struct tc_multiq_qopt *qopt;
208 int i;
209
210 if (!netif_is_multiqueue(qdisc_dev(sch)))
211 return -EINVAL;
212 if (nla_len(opt) < sizeof(*qopt))
213 return -EINVAL;
214
215 qopt = nla_data(opt);
216
217 qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
218
219 sch_tree_lock(sch);
220 q->bands = qopt->bands;
221 for (i = q->bands; i < q->max_bands; i++) {
222 if (q->queues[i] != &noop_qdisc) {
223 struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc);
224 qdisc_tree_decrease_qlen(child, child->q.qlen);
225 qdisc_destroy(child);
226 }
227 }
228
229 sch_tree_unlock(sch);
230
231 for (i = 0; i < q->bands; i++) {
232 if (q->queues[i] == &noop_qdisc) {
233 struct Qdisc *child;
234 child = qdisc_create_dflt(qdisc_dev(sch),
235 sch->dev_queue,
236 &pfifo_qdisc_ops,
237 TC_H_MAKE(sch->handle,
238 i + 1));
239 if (child) {
240 sch_tree_lock(sch);
241 child = xchg(&q->queues[i], child);
242
243 if (child != &noop_qdisc) {
244 qdisc_tree_decrease_qlen(child,
245 child->q.qlen);
246 qdisc_destroy(child);
247 }
248 sch_tree_unlock(sch);
249 }
250 }
251 }
252 return 0;
253}
254
255static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
256{
257 struct multiq_sched_data *q = qdisc_priv(sch);
258 int i, err;
259
260 q->queues = NULL;
261
262 if (opt == NULL)
263 return -EINVAL;
264
265 q->max_bands = qdisc_dev(sch)->num_tx_queues;
266
267 q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
268 if (!q->queues)
269 return -ENOBUFS;
270 for (i = 0; i < q->max_bands; i++)
271 q->queues[i] = &noop_qdisc;
272
273 err = multiq_tune(sch,opt);
274
275 if (err)
276 kfree(q->queues);
277
278 return err;
279}
280
281static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
282{
283 struct multiq_sched_data *q = qdisc_priv(sch);
284 unsigned char *b = skb_tail_pointer(skb);
285 struct tc_multiq_qopt opt;
286
287 opt.bands = q->bands;
288 opt.max_bands = q->max_bands;
289
290 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
291
292 return skb->len;
293
294nla_put_failure:
295 nlmsg_trim(skb, b);
296 return -1;
297}
298
299static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
300 struct Qdisc **old)
301{
302 struct multiq_sched_data *q = qdisc_priv(sch);
303 unsigned long band = arg - 1;
304
305 if (band >= q->bands)
306 return -EINVAL;
307
308 if (new == NULL)
309 new = &noop_qdisc;
310
311 sch_tree_lock(sch);
312 *old = q->queues[band];
313 q->queues[band] = new;
314 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
315 qdisc_reset(*old);
316 sch_tree_unlock(sch);
317
318 return 0;
319}
320
321static struct Qdisc *
322multiq_leaf(struct Qdisc *sch, unsigned long arg)
323{
324 struct multiq_sched_data *q = qdisc_priv(sch);
325 unsigned long band = arg - 1;
326
327 if (band >= q->bands)
328 return NULL;
329
330 return q->queues[band];
331}
332
333static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
334{
335 struct multiq_sched_data *q = qdisc_priv(sch);
336 unsigned long band = TC_H_MIN(classid);
337
338 if (band - 1 >= q->bands)
339 return 0;
340 return band;
341}
342
343static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
344 u32 classid)
345{
346 return multiq_get(sch, classid);
347}
348
349
350static void multiq_put(struct Qdisc *q, unsigned long cl)
351{
352 return;
353}
354
355static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent,
356 struct nlattr **tca, unsigned long *arg)
357{
358 unsigned long cl = *arg;
359 struct multiq_sched_data *q = qdisc_priv(sch);
360
361 if (cl - 1 > q->bands)
362 return -ENOENT;
363 return 0;
364}
365
366static int multiq_delete(struct Qdisc *sch, unsigned long cl)
367{
368 struct multiq_sched_data *q = qdisc_priv(sch);
369 if (cl - 1 > q->bands)
370 return -ENOENT;
371 return 0;
372}
373
374
375static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
376 struct sk_buff *skb, struct tcmsg *tcm)
377{
378 struct multiq_sched_data *q = qdisc_priv(sch);
379
380 if (cl - 1 > q->bands)
381 return -ENOENT;
382 tcm->tcm_handle |= TC_H_MIN(cl);
383 if (q->queues[cl-1])
384 tcm->tcm_info = q->queues[cl-1]->handle;
385 return 0;
386}
387
388static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
389 struct gnet_dump *d)
390{
391 struct multiq_sched_data *q = qdisc_priv(sch);
392 struct Qdisc *cl_q;
393
394 cl_q = q->queues[cl - 1];
395 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
396 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
397 return -1;
398
399 return 0;
400}
401
402static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
403{
404 struct multiq_sched_data *q = qdisc_priv(sch);
405 int band;
406
407 if (arg->stop)
408 return;
409
410 for (band = 0; band < q->bands; band++) {
411 if (arg->count < arg->skip) {
412 arg->count++;
413 continue;
414 }
415 if (arg->fn(sch, band+1, arg) < 0) {
416 arg->stop = 1;
417 break;
418 }
419 arg->count++;
420 }
421}
422
423static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl)
424{
425 struct multiq_sched_data *q = qdisc_priv(sch);
426
427 if (cl)
428 return NULL;
429 return &q->filter_list;
430}
431
432static const struct Qdisc_class_ops multiq_class_ops = {
433 .graft = multiq_graft,
434 .leaf = multiq_leaf,
435 .get = multiq_get,
436 .put = multiq_put,
437 .change = multiq_change,
438 .delete = multiq_delete,
439 .walk = multiq_walk,
440 .tcf_chain = multiq_find_tcf,
441 .bind_tcf = multiq_bind,
442 .unbind_tcf = multiq_put,
443 .dump = multiq_dump_class,
444 .dump_stats = multiq_dump_class_stats,
445};
446
447static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
448 .next = NULL,
449 .cl_ops = &multiq_class_ops,
450 .id = "multiq",
451 .priv_size = sizeof(struct multiq_sched_data),
452 .enqueue = multiq_enqueue,
453 .dequeue = multiq_dequeue,
454 .requeue = multiq_requeue,
455 .drop = multiq_drop,
456 .init = multiq_init,
457 .reset = multiq_reset,
458 .destroy = multiq_destroy,
459 .change = multiq_tune,
460 .dump = multiq_dump,
461 .owner = THIS_MODULE,
462};
463
464static int __init multiq_module_init(void)
465{
466 return register_qdisc(&multiq_qdisc_ops);
467}
468
469static void __exit multiq_module_exit(void)
470{
471 unregister_qdisc(&multiq_qdisc_ops);
472}
473
474module_init(multiq_module_init)
475module_exit(multiq_module_exit)
476
477MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 3781e55046d0..a11959908d9a 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -388,6 +388,20 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
388 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 388 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
389}; 389};
390 390
391static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
392 const struct nla_policy *policy, int len)
393{
394 int nested_len = nla_len(nla) - NLA_ALIGN(len);
395
396 if (nested_len < 0)
397 return -EINVAL;
398 if (nested_len >= nla_attr_size(0))
399 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
400 nested_len, policy);
401 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
402 return 0;
403}
404
391/* Parse netlink message to set options */ 405/* Parse netlink message to set options */
392static int netem_change(struct Qdisc *sch, struct nlattr *opt) 406static int netem_change(struct Qdisc *sch, struct nlattr *opt)
393{ 407{
@@ -399,8 +413,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
399 if (opt == NULL) 413 if (opt == NULL)
400 return -EINVAL; 414 return -EINVAL;
401 415
402 ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy, 416 qopt = nla_data(opt);
403 qopt, sizeof(*qopt)); 417 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
404 if (ret < 0) 418 if (ret < 0)
405 return ret; 419 return ret;
406 420
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index a6697c686c7f..504a78cdb718 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -254,16 +254,12 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
254{ 254{
255 struct prio_sched_data *q = qdisc_priv(sch); 255 struct prio_sched_data *q = qdisc_priv(sch);
256 unsigned char *b = skb_tail_pointer(skb); 256 unsigned char *b = skb_tail_pointer(skb);
257 struct nlattr *nest;
258 struct tc_prio_qopt opt; 257 struct tc_prio_qopt opt;
259 258
260 opt.bands = q->bands; 259 opt.bands = q->bands;
261 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1); 260 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
262 261
263 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); 262 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
264 if (nest == NULL)
265 goto nla_put_failure;
266 nla_nest_compat_end(skb, nest);
267 263
268 return skb->len; 264 return skb->len;
269 265
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 6e041d10dbdb..fe1508ef0d3d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -119,7 +119,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
119 u32 h, h2; 119 u32 h, h2;
120 120
121 switch (skb->protocol) { 121 switch (skb->protocol) {
122 case __constant_htons(ETH_P_IP): 122 case htons(ETH_P_IP):
123 { 123 {
124 const struct iphdr *iph = ip_hdr(skb); 124 const struct iphdr *iph = ip_hdr(skb);
125 h = iph->daddr; 125 h = iph->daddr;
@@ -134,7 +134,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
134 h2 ^= *(((u32*)iph) + iph->ihl); 134 h2 ^= *(((u32*)iph) + iph->ihl);
135 break; 135 break;
136 } 136 }
137 case __constant_htons(ETH_P_IPV6): 137 case htons(ETH_P_IPV6):
138 { 138 {
139 struct ipv6hdr *iph = ipv6_hdr(skb); 139 struct ipv6hdr *iph = ipv6_hdr(skb);
140 h = iph->daddr.s6_addr32[3]; 140 h = iph->daddr.s6_addr32[3];
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 5061a26c5028..7b23803343cc 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -317,7 +317,7 @@ static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
317 } 317 }
318 318
319 /* Insert before pos. */ 319 /* Insert before pos. */
320 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm); 320 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
321 321
322} 322}
323 323
@@ -825,8 +825,7 @@ static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
825 825
826 826
827 /* Insert before pos. */ 827 /* Insert before pos. */
828 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby); 828 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
829
830} 829}
831 830
832static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, 831static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index e55427f73dfe..5c1954d28d09 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -769,7 +769,7 @@ repost:
769 /* check for expected message types */ 769 /* check for expected message types */
770 /* The order of some of these tests is important. */ 770 /* The order of some of these tests is important. */
771 switch (headerp->rm_type) { 771 switch (headerp->rm_type) {
772 case __constant_htonl(RDMA_MSG): 772 case htonl(RDMA_MSG):
773 /* never expect read chunks */ 773 /* never expect read chunks */
774 /* never expect reply chunks (two ways to check) */ 774 /* never expect reply chunks (two ways to check) */
775 /* never expect write chunks without having offered RDMA */ 775 /* never expect write chunks without having offered RDMA */
@@ -802,7 +802,7 @@ repost:
802 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len); 802 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len);
803 break; 803 break;
804 804
805 case __constant_htonl(RDMA_NOMSG): 805 case htonl(RDMA_NOMSG):
806 /* never expect read or write chunks, always reply chunks */ 806 /* never expect read or write chunks, always reply chunks */
807 if (headerp->rm_body.rm_chunks[0] != xdr_zero || 807 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
808 headerp->rm_body.rm_chunks[1] != xdr_zero || 808 headerp->rm_body.rm_chunks[1] != xdr_zero ||
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 833b024f8f66..b97bd9fe6b79 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -14,6 +14,38 @@ config NL80211
14 14
15 If unsure, say Y. 15 If unsure, say Y.
16 16
17config WIRELESS_OLD_REGULATORY
18 bool "Old wireless static regulatory defintions"
19 default n
20 ---help---
21 This option enables the old static regulatory information
22 and uses it within the new framework. This is available
23 temporarily as an option to help prevent immediate issues
24 due to the switch to the new regulatory framework which
25 does require a new userspace application which has the
26 database of regulatory information (CRDA) and another for
27 setting regulatory domains (iw).
28
29 For more information see:
30
31 http://wireless.kernel.org/en/developers/Regulatory/CRDA
32 http://wireless.kernel.org/en/users/Documentation/iw
33
34 It is important to note though that if you *do* have CRDA present
35 and if this option is enabled CRDA *will* be called to update the
36 regulatory domain (for US and JP only). Support for letting the user
37 set the regulatory domain through iw is also supported. This option
38 mainly exists to leave around for a kernel release some old static
39 regulatory domains that were defined and to keep around the old
40 ieee80211_regdom module parameter. This is being phased out and you
41 should stop using them ASAP.
42
43 Say N unless you cannot install a new userspace application
44 or have one currently depending on the ieee80211_regdom module
45 parameter and cannot port it to use the new userspace interfaces.
46
47 This is scheduled for removal for 2.6.29.
48
17config WIRELESS_EXT 49config WIRELESS_EXT
18 bool "Wireless extensions" 50 bool "Wireless extensions"
19 default n 51 default n
diff --git a/net/wireless/core.c b/net/wireless/core.c
index f1da0b93bc56..5cadbeb76a14 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This is the linux wireless configuration interface. 2 * This is the linux wireless configuration interface.
3 * 3 *
4 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2006-2008 Johannes Berg <johannes@sipsolutions.net>
5 */ 5 */
6 6
7#include <linux/if.h> 7#include <linux/if.h>
@@ -13,12 +13,14 @@
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/notifier.h> 14#include <linux/notifier.h>
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/list.h>
16#include <net/genetlink.h> 17#include <net/genetlink.h>
17#include <net/cfg80211.h> 18#include <net/cfg80211.h>
18#include <net/wireless.h> 19#include <net/wireless.h>
19#include "nl80211.h" 20#include "nl80211.h"
20#include "core.h" 21#include "core.h"
21#include "sysfs.h" 22#include "sysfs.h"
23#include "reg.h"
22 24
23/* name for sysfs, %d is appended */ 25/* name for sysfs, %d is appended */
24#define PHY_NAME "phy" 26#define PHY_NAME "phy"
@@ -32,7 +34,6 @@ MODULE_DESCRIPTION("wireless configuration support");
32 * often because we need to do it for each command */ 34 * often because we need to do it for each command */
33LIST_HEAD(cfg80211_drv_list); 35LIST_HEAD(cfg80211_drv_list);
34DEFINE_MUTEX(cfg80211_drv_mutex); 36DEFINE_MUTEX(cfg80211_drv_mutex);
35static int wiphy_counter;
36 37
37/* for debugfs */ 38/* for debugfs */
38static struct dentry *ieee80211_debugfs_dir; 39static struct dentry *ieee80211_debugfs_dir;
@@ -204,6 +205,8 @@ out_unlock:
204 205
205struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv) 206struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv)
206{ 207{
208 static int wiphy_counter;
209
207 struct cfg80211_registered_device *drv; 210 struct cfg80211_registered_device *drv;
208 int alloc_size; 211 int alloc_size;
209 212
@@ -220,21 +223,18 @@ struct wiphy *wiphy_new(struct cfg80211_ops *ops, int sizeof_priv)
220 223
221 mutex_lock(&cfg80211_drv_mutex); 224 mutex_lock(&cfg80211_drv_mutex);
222 225
223 drv->idx = wiphy_counter; 226 drv->idx = wiphy_counter++;
224
225 /* now increase counter for the next device unless
226 * it has wrapped previously */
227 if (wiphy_counter >= 0)
228 wiphy_counter++;
229
230 mutex_unlock(&cfg80211_drv_mutex);
231 227
232 if (unlikely(drv->idx < 0)) { 228 if (unlikely(drv->idx < 0)) {
229 wiphy_counter--;
230 mutex_unlock(&cfg80211_drv_mutex);
233 /* ugh, wrapped! */ 231 /* ugh, wrapped! */
234 kfree(drv); 232 kfree(drv);
235 return NULL; 233 return NULL;
236 } 234 }
237 235
236 mutex_unlock(&cfg80211_drv_mutex);
237
238 /* give it a proper name */ 238 /* give it a proper name */
239 snprintf(drv->wiphy.dev.bus_id, BUS_ID_SIZE, 239 snprintf(drv->wiphy.dev.bus_id, BUS_ID_SIZE,
240 PHY_NAME "%d", drv->idx); 240 PHY_NAME "%d", drv->idx);
@@ -259,6 +259,13 @@ int wiphy_register(struct wiphy *wiphy)
259 struct ieee80211_supported_band *sband; 259 struct ieee80211_supported_band *sband;
260 bool have_band = false; 260 bool have_band = false;
261 int i; 261 int i;
262 u16 ifmodes = wiphy->interface_modes;
263
264 /* sanity check ifmodes */
265 WARN_ON(!ifmodes);
266 ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
267 if (WARN_ON(ifmodes != wiphy->interface_modes))
268 wiphy->interface_modes = ifmodes;
262 269
263 /* sanity check supported bands/channels */ 270 /* sanity check supported bands/channels */
264 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 271 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
@@ -295,7 +302,9 @@ int wiphy_register(struct wiphy *wiphy)
295 ieee80211_set_bitrate_flags(wiphy); 302 ieee80211_set_bitrate_flags(wiphy);
296 303
297 /* set up regulatory info */ 304 /* set up regulatory info */
298 wiphy_update_regulatory(wiphy); 305 mutex_lock(&cfg80211_reg_mutex);
306 wiphy_update_regulatory(wiphy, REGDOM_SET_BY_CORE);
307 mutex_unlock(&cfg80211_reg_mutex);
299 308
300 mutex_lock(&cfg80211_drv_mutex); 309 mutex_lock(&cfg80211_drv_mutex);
301 310
@@ -373,6 +382,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
373 382
374 rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy); 383 rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
375 384
385 WARN_ON(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_UNSPECIFIED);
386
376 switch (state) { 387 switch (state) {
377 case NETDEV_REGISTER: 388 case NETDEV_REGISTER:
378 mutex_lock(&rdev->devlist_mtx); 389 mutex_lock(&rdev->devlist_mtx);
@@ -404,7 +415,9 @@ static struct notifier_block cfg80211_netdev_notifier = {
404 415
405static int cfg80211_init(void) 416static int cfg80211_init(void)
406{ 417{
407 int err = wiphy_sysfs_init(); 418 int err;
419
420 err = wiphy_sysfs_init();
408 if (err) 421 if (err)
409 goto out_fail_sysfs; 422 goto out_fail_sysfs;
410 423
@@ -418,8 +431,14 @@ static int cfg80211_init(void)
418 431
419 ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL); 432 ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL);
420 433
434 err = regulatory_init();
435 if (err)
436 goto out_fail_reg;
437
421 return 0; 438 return 0;
422 439
440out_fail_reg:
441 debugfs_remove(ieee80211_debugfs_dir);
423out_fail_nl80211: 442out_fail_nl80211:
424 unregister_netdevice_notifier(&cfg80211_netdev_notifier); 443 unregister_netdevice_notifier(&cfg80211_netdev_notifier);
425out_fail_notifier: 444out_fail_notifier:
@@ -427,6 +446,7 @@ out_fail_notifier:
427out_fail_sysfs: 446out_fail_sysfs:
428 return err; 447 return err;
429} 448}
449
430subsys_initcall(cfg80211_init); 450subsys_initcall(cfg80211_init);
431 451
432static void cfg80211_exit(void) 452static void cfg80211_exit(void)
@@ -435,5 +455,6 @@ static void cfg80211_exit(void)
435 nl80211_exit(); 455 nl80211_exit();
436 unregister_netdevice_notifier(&cfg80211_netdev_notifier); 456 unregister_netdevice_notifier(&cfg80211_netdev_notifier);
437 wiphy_sysfs_exit(); 457 wiphy_sysfs_exit();
458 regulatory_exit();
438} 459}
439module_exit(cfg80211_exit); 460module_exit(cfg80211_exit);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 7a02c356d63d..771cc5cc7658 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -79,6 +79,6 @@ extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv,
79 char *newname); 79 char *newname);
80 80
81void ieee80211_set_bitrate_flags(struct wiphy *wiphy); 81void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
82void wiphy_update_regulatory(struct wiphy *wiphy); 82void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby);
83 83
84#endif /* __NET_WIRELESS_CORE_H */ 84#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 59eb2cf42e5f..572793c8c7ab 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -18,6 +18,7 @@
18#include <net/cfg80211.h> 18#include <net/cfg80211.h>
19#include "core.h" 19#include "core.h"
20#include "nl80211.h" 20#include "nl80211.h"
21#include "reg.h"
21 22
22/* the netlink family */ 23/* the netlink family */
23static struct genl_family nl80211_fam = { 24static struct genl_family nl80211_fam = {
@@ -87,6 +88,16 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
87 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, 88 [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
88 .len = IEEE80211_MAX_MESH_ID_LEN }, 89 .len = IEEE80211_MAX_MESH_ID_LEN },
89 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, 90 [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
91
92 [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
93 [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
94
95 [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
96 [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 },
97 [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 },
98
99 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
100 .len = NL80211_HT_CAPABILITY_LEN },
90}; 101};
91 102
92/* message building helper */ 103/* message building helper */
@@ -106,10 +117,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
106 struct nlattr *nl_bands, *nl_band; 117 struct nlattr *nl_bands, *nl_band;
107 struct nlattr *nl_freqs, *nl_freq; 118 struct nlattr *nl_freqs, *nl_freq;
108 struct nlattr *nl_rates, *nl_rate; 119 struct nlattr *nl_rates, *nl_rate;
120 struct nlattr *nl_modes;
109 enum ieee80211_band band; 121 enum ieee80211_band band;
110 struct ieee80211_channel *chan; 122 struct ieee80211_channel *chan;
111 struct ieee80211_rate *rate; 123 struct ieee80211_rate *rate;
112 int i; 124 int i;
125 u16 ifmodes = dev->wiphy.interface_modes;
113 126
114 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); 127 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY);
115 if (!hdr) 128 if (!hdr)
@@ -118,6 +131,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
118 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx); 131 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->idx);
119 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 132 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
120 133
134 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
135 if (!nl_modes)
136 goto nla_put_failure;
137
138 i = 0;
139 while (ifmodes) {
140 if (ifmodes & 1)
141 NLA_PUT_FLAG(msg, i);
142 ifmodes >>= 1;
143 i++;
144 }
145
146 nla_nest_end(msg, nl_modes);
147
121 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); 148 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
122 if (!nl_bands) 149 if (!nl_bands)
123 goto nla_put_failure; 150 goto nla_put_failure;
@@ -272,7 +299,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
272 299
273 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 300 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
274 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); 301 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
275 /* TODO: interface type */ 302 NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
276 return genlmsg_end(msg, hdr); 303 return genlmsg_end(msg, hdr);
277 304
278 nla_put_failure: 305 nla_put_failure:
@@ -391,40 +418,56 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
391 int err, ifindex; 418 int err, ifindex;
392 enum nl80211_iftype type; 419 enum nl80211_iftype type;
393 struct net_device *dev; 420 struct net_device *dev;
394 u32 flags; 421 u32 _flags, *flags = NULL;
395 422
396 memset(&params, 0, sizeof(params)); 423 memset(&params, 0, sizeof(params));
397 424
398 if (info->attrs[NL80211_ATTR_IFTYPE]) {
399 type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
400 if (type > NL80211_IFTYPE_MAX)
401 return -EINVAL;
402 } else
403 return -EINVAL;
404
405 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev); 425 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
406 if (err) 426 if (err)
407 return err; 427 return err;
408 ifindex = dev->ifindex; 428 ifindex = dev->ifindex;
429 type = dev->ieee80211_ptr->iftype;
409 dev_put(dev); 430 dev_put(dev);
410 431
411 if (!drv->ops->change_virtual_intf) { 432 err = -EINVAL;
433 if (info->attrs[NL80211_ATTR_IFTYPE]) {
434 type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
435 if (type > NL80211_IFTYPE_MAX)
436 goto unlock;
437 }
438
439 if (!drv->ops->change_virtual_intf ||
440 !(drv->wiphy.interface_modes & (1 << type))) {
412 err = -EOPNOTSUPP; 441 err = -EOPNOTSUPP;
413 goto unlock; 442 goto unlock;
414 } 443 }
415 444
416 if (type == NL80211_IFTYPE_MESH_POINT && 445 if (info->attrs[NL80211_ATTR_MESH_ID]) {
417 info->attrs[NL80211_ATTR_MESH_ID]) { 446 if (type != NL80211_IFTYPE_MESH_POINT) {
447 err = -EINVAL;
448 goto unlock;
449 }
418 params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); 450 params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
419 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); 451 params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
420 } 452 }
421 453
454 if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) {
455 if (type != NL80211_IFTYPE_MONITOR) {
456 err = -EINVAL;
457 goto unlock;
458 }
459 err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS],
460 &_flags);
461 if (!err)
462 flags = &_flags;
463 }
422 rtnl_lock(); 464 rtnl_lock();
423 err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
424 info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
425 &flags);
426 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex, 465 err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
427 type, err ? NULL : &flags, &params); 466 type, flags, &params);
467
468 dev = __dev_get_by_index(&init_net, ifindex);
469 WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != type));
470
428 rtnl_unlock(); 471 rtnl_unlock();
429 472
430 unlock: 473 unlock:
@@ -455,7 +498,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
455 if (IS_ERR(drv)) 498 if (IS_ERR(drv))
456 return PTR_ERR(drv); 499 return PTR_ERR(drv);
457 500
458 if (!drv->ops->add_virtual_intf) { 501 if (!drv->ops->add_virtual_intf ||
502 !(drv->wiphy.interface_modes & (1 << type))) {
459 err = -EOPNOTSUPP; 503 err = -EOPNOTSUPP;
460 goto unlock; 504 goto unlock;
461 } 505 }
@@ -1125,6 +1169,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
1125 params.listen_interval = 1169 params.listen_interval =
1126 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1170 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1127 1171
1172 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1173 params.ht_capa =
1174 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1175
1128 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1176 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS],
1129 &params.station_flags)) 1177 &params.station_flags))
1130 return -EINVAL; 1178 return -EINVAL;
@@ -1188,6 +1236,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
1188 params.listen_interval = 1236 params.listen_interval =
1189 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); 1237 nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
1190 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); 1238 params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
1239 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
1240 params.ht_capa =
1241 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
1191 1242
1192 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS], 1243 if (parse_station_flags(info->attrs[NL80211_ATTR_STA_FLAGS],
1193 &params.station_flags)) 1244 &params.station_flags))
@@ -1525,6 +1576,183 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
1525 return err; 1576 return err;
1526} 1577}
1527 1578
1579static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
1580{
1581 struct cfg80211_registered_device *drv;
1582 int err;
1583 struct net_device *dev;
1584 struct bss_parameters params;
1585
1586 memset(&params, 0, sizeof(params));
1587 /* default to not changing parameters */
1588 params.use_cts_prot = -1;
1589 params.use_short_preamble = -1;
1590 params.use_short_slot_time = -1;
1591
1592 if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
1593 params.use_cts_prot =
1594 nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]);
1595 if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE])
1596 params.use_short_preamble =
1597 nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]);
1598 if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME])
1599 params.use_short_slot_time =
1600 nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]);
1601
1602 err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
1603 if (err)
1604 return err;
1605
1606 if (!drv->ops->change_bss) {
1607 err = -EOPNOTSUPP;
1608 goto out;
1609 }
1610
1611 rtnl_lock();
1612 err = drv->ops->change_bss(&drv->wiphy, dev, &params);
1613 rtnl_unlock();
1614
1615 out:
1616 cfg80211_put_dev(drv);
1617 dev_put(dev);
1618 return err;
1619}
1620
1621static const struct nla_policy
1622 reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
1623 [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
1624 [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
1625 [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
1626 [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 },
1627 [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 },
1628 [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 },
1629};
1630
1631static int parse_reg_rule(struct nlattr *tb[],
1632 struct ieee80211_reg_rule *reg_rule)
1633{
1634 struct ieee80211_freq_range *freq_range = &reg_rule->freq_range;
1635 struct ieee80211_power_rule *power_rule = &reg_rule->power_rule;
1636
1637 if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
1638 return -EINVAL;
1639 if (!tb[NL80211_ATTR_FREQ_RANGE_START])
1640 return -EINVAL;
1641 if (!tb[NL80211_ATTR_FREQ_RANGE_END])
1642 return -EINVAL;
1643 if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
1644 return -EINVAL;
1645 if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
1646 return -EINVAL;
1647
1648 reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
1649
1650 freq_range->start_freq_khz =
1651 nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
1652 freq_range->end_freq_khz =
1653 nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
1654 freq_range->max_bandwidth_khz =
1655 nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
1656
1657 power_rule->max_eirp =
1658 nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
1659
1660 if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
1661 power_rule->max_antenna_gain =
1662 nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
1663
1664 return 0;
1665}
1666
1667static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
1668{
1669 int r;
1670 char *data = NULL;
1671
1672 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
1673 return -EINVAL;
1674
1675 data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
1676
1677#ifdef CONFIG_WIRELESS_OLD_REGULATORY
1678 /* We ignore world regdom requests with the old regdom setup */
1679 if (is_world_regdom(data))
1680 return -EINVAL;
1681#endif
1682 mutex_lock(&cfg80211_drv_mutex);
1683 r = __regulatory_hint(NULL, REGDOM_SET_BY_USER, data, NULL);
1684 mutex_unlock(&cfg80211_drv_mutex);
1685 return r;
1686}
1687
1688static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
1689{
1690 struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1];
1691 struct nlattr *nl_reg_rule;
1692 char *alpha2 = NULL;
1693 int rem_reg_rules = 0, r = 0;
1694 u32 num_rules = 0, rule_idx = 0, size_of_regd;
1695 struct ieee80211_regdomain *rd = NULL;
1696
1697 if (!info->attrs[NL80211_ATTR_REG_ALPHA2])
1698 return -EINVAL;
1699
1700 if (!info->attrs[NL80211_ATTR_REG_RULES])
1701 return -EINVAL;
1702
1703 alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
1704
1705 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
1706 rem_reg_rules) {
1707 num_rules++;
1708 if (num_rules > NL80211_MAX_SUPP_REG_RULES)
1709 goto bad_reg;
1710 }
1711
1712 if (!reg_is_valid_request(alpha2))
1713 return -EINVAL;
1714
1715 size_of_regd = sizeof(struct ieee80211_regdomain) +
1716 (num_rules * sizeof(struct ieee80211_reg_rule));
1717
1718 rd = kzalloc(size_of_regd, GFP_KERNEL);
1719 if (!rd)
1720 return -ENOMEM;
1721
1722 rd->n_reg_rules = num_rules;
1723 rd->alpha2[0] = alpha2[0];
1724 rd->alpha2[1] = alpha2[1];
1725
1726 nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
1727 rem_reg_rules) {
1728 nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
1729 nla_data(nl_reg_rule), nla_len(nl_reg_rule),
1730 reg_rule_policy);
1731 r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]);
1732 if (r)
1733 goto bad_reg;
1734
1735 rule_idx++;
1736
1737 if (rule_idx > NL80211_MAX_SUPP_REG_RULES)
1738 goto bad_reg;
1739 }
1740
1741 BUG_ON(rule_idx != num_rules);
1742
1743 mutex_lock(&cfg80211_drv_mutex);
1744 r = set_regdom(rd);
1745 mutex_unlock(&cfg80211_drv_mutex);
1746 if (r)
1747 goto bad_reg;
1748
1749 return r;
1750
1751bad_reg:
1752 kfree(rd);
1753 return -EINVAL;
1754}
1755
1528static struct genl_ops nl80211_ops[] = { 1756static struct genl_ops nl80211_ops[] = {
1529 { 1757 {
1530 .cmd = NL80211_CMD_GET_WIPHY, 1758 .cmd = NL80211_CMD_GET_WIPHY,
@@ -1656,6 +1884,24 @@ static struct genl_ops nl80211_ops[] = {
1656 .policy = nl80211_policy, 1884 .policy = nl80211_policy,
1657 .flags = GENL_ADMIN_PERM, 1885 .flags = GENL_ADMIN_PERM,
1658 }, 1886 },
1887 {
1888 .cmd = NL80211_CMD_SET_BSS,
1889 .doit = nl80211_set_bss,
1890 .policy = nl80211_policy,
1891 .flags = GENL_ADMIN_PERM,
1892 },
1893 {
1894 .cmd = NL80211_CMD_SET_REG,
1895 .doit = nl80211_set_reg,
1896 .policy = nl80211_policy,
1897 .flags = GENL_ADMIN_PERM,
1898 },
1899 {
1900 .cmd = NL80211_CMD_REQ_SET_REG,
1901 .doit = nl80211_req_set_reg,
1902 .policy = nl80211_policy,
1903 .flags = GENL_ADMIN_PERM,
1904 },
1659}; 1905};
1660 1906
1661/* multicast groups */ 1907/* multicast groups */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 855bff4b3250..626dbb688499 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2,179 +2,871 @@
2 * Copyright 2002-2005, Instant802 Networks, Inc. 2 * Copyright 2002-2005, Instant802 Networks, Inc.
3 * Copyright 2005-2006, Devicescape Software, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc.
4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 4 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5 * Copyright 2008 Luis R. Rodriguez <lrodriguz@atheros.com>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
9 */ 10 */
10 11
11/* 12/**
12 * This regulatory domain control implementation is highly incomplete, it 13 * DOC: Wireless regulatory infrastructure
13 * only exists for the purpose of not regressing mac80211.
14 *
15 * For now, drivers can restrict the set of allowed channels by either
16 * not registering those channels or setting the IEEE80211_CHAN_DISABLED
17 * flag; that flag will only be *set* by this code, never *cleared.
18 * 14 *
19 * The usual implementation is for a driver to read a device EEPROM to 15 * The usual implementation is for a driver to read a device EEPROM to
20 * determine which regulatory domain it should be operating under, then 16 * determine which regulatory domain it should be operating under, then
21 * looking up the allowable channels in a driver-local table and finally 17 * looking up the allowable channels in a driver-local table and finally
22 * registering those channels in the wiphy structure. 18 * registering those channels in the wiphy structure.
23 * 19 *
24 * Alternatively, drivers that trust the regulatory domain control here 20 * Another set of compliance enforcement is for drivers to use their
25 * will register a complete set of capabilities and the control code 21 * own compliance limits which can be stored on the EEPROM. The host
26 * will restrict the set by setting the IEEE80211_CHAN_* flags. 22 * driver or firmware may ensure these are used.
23 *
24 * In addition to all this we provide an extra layer of regulatory
25 * conformance. For drivers which do not have any regulatory
26 * information CRDA provides the complete regulatory solution.
27 * For others it provides a community effort on further restrictions
28 * to enhance compliance.
29 *
30 * Note: When number of rules --> infinity we will not be able to
31 * index on alpha2 any more, instead we'll probably have to
32 * rely on some SHA1 checksum of the regdomain for example.
33 *
27 */ 34 */
28#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/list.h>
37#include <linux/random.h>
38#include <linux/nl80211.h>
39#include <linux/platform_device.h>
29#include <net/wireless.h> 40#include <net/wireless.h>
41#include <net/cfg80211.h>
30#include "core.h" 42#include "core.h"
43#include "reg.h"
31 44
32static char *ieee80211_regdom = "US"; 45/* wiphy is set if this request's initiator is REGDOM_SET_BY_DRIVER */
33module_param(ieee80211_regdom, charp, 0444); 46struct regulatory_request {
34MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); 47 struct list_head list;
35 48 struct wiphy *wiphy;
36struct ieee80211_channel_range { 49 int granted;
37 short start_freq; 50 enum reg_set_by initiator;
38 short end_freq; 51 char alpha2[2];
39 int max_power;
40 int max_antenna_gain;
41 u32 flags;
42}; 52};
43 53
44struct ieee80211_regdomain { 54static LIST_HEAD(regulatory_requests);
45 const char *code; 55DEFINE_MUTEX(cfg80211_reg_mutex);
46 const struct ieee80211_channel_range *ranges; 56
47 int n_ranges; 57/* To trigger userspace events */
58static struct platform_device *reg_pdev;
59
60/* Keep the ordering from large to small */
61static u32 supported_bandwidths[] = {
62 MHZ_TO_KHZ(40),
63 MHZ_TO_KHZ(20),
48}; 64};
49 65
50#define RANGE_PWR(_start, _end, _pwr, _ag, _flags) \ 66static struct list_head regulatory_requests;
51 { _start, _end, _pwr, _ag, _flags }
52 67
68/* Central wireless core regulatory domains, we only need two,
69 * the current one and a world regulatory domain in case we have no
70 * information to give us an alpha2 */
71static const struct ieee80211_regdomain *cfg80211_regdomain;
53 72
54/* 73/* We keep a static world regulatory domain in case of the absence of CRDA */
55 * Ideally, in the future, these definitions will be loaded from a 74static const struct ieee80211_regdomain world_regdom = {
56 * userspace table via some daemon. 75 .n_reg_rules = 1,
57 */ 76 .alpha2 = "00",
58static const struct ieee80211_channel_range ieee80211_US_channels[] = { 77 .reg_rules = {
59 /* IEEE 802.11b/g, channels 1..11 */ 78 REG_RULE(2412-10, 2462+10, 40, 6, 20,
60 RANGE_PWR(2412, 2462, 27, 6, 0), 79 NL80211_RRF_PASSIVE_SCAN |
61 /* IEEE 802.11a, channel 36*/ 80 NL80211_RRF_NO_IBSS),
62 RANGE_PWR(5180, 5180, 23, 6, 0), 81 }
63 /* IEEE 802.11a, channel 40*/
64 RANGE_PWR(5200, 5200, 23, 6, 0),
65 /* IEEE 802.11a, channel 44*/
66 RANGE_PWR(5220, 5220, 23, 6, 0),
67 /* IEEE 802.11a, channels 48..64 */
68 RANGE_PWR(5240, 5320, 23, 6, 0),
69 /* IEEE 802.11a, channels 149..165, outdoor */
70 RANGE_PWR(5745, 5825, 30, 6, 0),
71}; 82};
72 83
73static const struct ieee80211_channel_range ieee80211_JP_channels[] = { 84static const struct ieee80211_regdomain *cfg80211_world_regdom =
74 /* IEEE 802.11b/g, channels 1..14 */ 85 &world_regdom;
75 RANGE_PWR(2412, 2484, 20, 6, 0), 86
76 /* IEEE 802.11a, channels 34..48 */ 87#ifdef CONFIG_WIRELESS_OLD_REGULATORY
77 RANGE_PWR(5170, 5240, 20, 6, IEEE80211_CHAN_PASSIVE_SCAN), 88static char *ieee80211_regdom = "US";
78 /* IEEE 802.11a, channels 52..64 */ 89module_param(ieee80211_regdom, charp, 0444);
79 RANGE_PWR(5260, 5320, 20, 6, IEEE80211_CHAN_NO_IBSS | 90MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
80 IEEE80211_CHAN_RADAR), 91
81}; 92/* We assume 40 MHz bandwidth for the old regulatory work.
93 * We make emphasis we are using the exact same frequencies
94 * as before */
82 95
83static const struct ieee80211_channel_range ieee80211_EU_channels[] = { 96static const struct ieee80211_regdomain us_regdom = {
84 /* IEEE 802.11b/g, channels 1..13 */ 97 .n_reg_rules = 6,
85 RANGE_PWR(2412, 2472, 20, 6, 0), 98 .alpha2 = "US",
86 /* IEEE 802.11a, channel 36*/ 99 .reg_rules = {
87 RANGE_PWR(5180, 5180, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN), 100 /* IEEE 802.11b/g, channels 1..11 */
88 /* IEEE 802.11a, channel 40*/ 101 REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
89 RANGE_PWR(5200, 5200, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN), 102 /* IEEE 802.11a, channel 36 */
90 /* IEEE 802.11a, channel 44*/ 103 REG_RULE(5180-10, 5180+10, 40, 6, 23, 0),
91 RANGE_PWR(5220, 5220, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN), 104 /* IEEE 802.11a, channel 40 */
92 /* IEEE 802.11a, channels 48..64 */ 105 REG_RULE(5200-10, 5200+10, 40, 6, 23, 0),
93 RANGE_PWR(5240, 5320, 23, 6, IEEE80211_CHAN_NO_IBSS | 106 /* IEEE 802.11a, channel 44 */
94 IEEE80211_CHAN_RADAR), 107 REG_RULE(5220-10, 5220+10, 40, 6, 23, 0),
95 /* IEEE 802.11a, channels 100..140 */ 108 /* IEEE 802.11a, channels 48..64 */
96 RANGE_PWR(5500, 5700, 30, 6, IEEE80211_CHAN_NO_IBSS | 109 REG_RULE(5240-10, 5320+10, 40, 6, 23, 0),
97 IEEE80211_CHAN_RADAR), 110 /* IEEE 802.11a, channels 149..165, outdoor */
111 REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
112 }
98}; 113};
99 114
100#define REGDOM(_code) \ 115static const struct ieee80211_regdomain jp_regdom = {
101 { \ 116 .n_reg_rules = 3,
102 .code = __stringify(_code), \ 117 .alpha2 = "JP",
103 .ranges = ieee80211_ ##_code## _channels, \ 118 .reg_rules = {
104 .n_ranges = ARRAY_SIZE(ieee80211_ ##_code## _channels), \ 119 /* IEEE 802.11b/g, channels 1..14 */
120 REG_RULE(2412-10, 2484+10, 40, 6, 20, 0),
121 /* IEEE 802.11a, channels 34..48 */
122 REG_RULE(5170-10, 5240+10, 40, 6, 20,
123 NL80211_RRF_PASSIVE_SCAN),
124 /* IEEE 802.11a, channels 52..64 */
125 REG_RULE(5260-10, 5320+10, 40, 6, 20,
126 NL80211_RRF_NO_IBSS |
127 NL80211_RRF_DFS),
105 } 128 }
129};
106 130
107static const struct ieee80211_regdomain ieee80211_regdoms[] = { 131static const struct ieee80211_regdomain eu_regdom = {
108 REGDOM(US), 132 .n_reg_rules = 6,
109 REGDOM(JP), 133 /* This alpha2 is bogus, we leave it here just for stupid
110 REGDOM(EU), 134 * backward compatibility */
135 .alpha2 = "EU",
136 .reg_rules = {
137 /* IEEE 802.11b/g, channels 1..13 */
138 REG_RULE(2412-10, 2472+10, 40, 6, 20, 0),
139 /* IEEE 802.11a, channel 36 */
140 REG_RULE(5180-10, 5180+10, 40, 6, 23,
141 NL80211_RRF_PASSIVE_SCAN),
142 /* IEEE 802.11a, channel 40 */
143 REG_RULE(5200-10, 5200+10, 40, 6, 23,
144 NL80211_RRF_PASSIVE_SCAN),
145 /* IEEE 802.11a, channel 44 */
146 REG_RULE(5220-10, 5220+10, 40, 6, 23,
147 NL80211_RRF_PASSIVE_SCAN),
148 /* IEEE 802.11a, channels 48..64 */
149 REG_RULE(5240-10, 5320+10, 40, 6, 20,
150 NL80211_RRF_NO_IBSS |
151 NL80211_RRF_DFS),
152 /* IEEE 802.11a, channels 100..140 */
153 REG_RULE(5500-10, 5700+10, 40, 6, 30,
154 NL80211_RRF_NO_IBSS |
155 NL80211_RRF_DFS),
156 }
111}; 157};
112 158
159static const struct ieee80211_regdomain *static_regdom(char *alpha2)
160{
161 if (alpha2[0] == 'U' && alpha2[1] == 'S')
162 return &us_regdom;
163 if (alpha2[0] == 'J' && alpha2[1] == 'P')
164 return &jp_regdom;
165 if (alpha2[0] == 'E' && alpha2[1] == 'U')
166 return &eu_regdom;
167 /* Default, as per the old rules */
168 return &us_regdom;
169}
170
171static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
172{
173 if (rd == &us_regdom || rd == &jp_regdom || rd == &eu_regdom)
174 return true;
175 return false;
176}
177#else
178static inline bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
179{
180 return false;
181}
182#endif
113 183
114static const struct ieee80211_regdomain *get_regdom(void) 184static void reset_regdomains(void)
115{ 185{
116 static const struct ieee80211_channel_range 186 /* avoid freeing static information or freeing something twice */
117 ieee80211_world_channels[] = { 187 if (cfg80211_regdomain == cfg80211_world_regdom)
118 /* IEEE 802.11b/g, channels 1..11 */ 188 cfg80211_regdomain = NULL;
119 RANGE_PWR(2412, 2462, 27, 6, 0), 189 if (cfg80211_world_regdom == &world_regdom)
190 cfg80211_world_regdom = NULL;
191 if (cfg80211_regdomain == &world_regdom)
192 cfg80211_regdomain = NULL;
193 if (is_old_static_regdom(cfg80211_regdomain))
194 cfg80211_regdomain = NULL;
195
196 kfree(cfg80211_regdomain);
197 kfree(cfg80211_world_regdom);
198
199 cfg80211_world_regdom = &world_regdom;
200 cfg80211_regdomain = NULL;
201}
202
203/* Dynamic world regulatory domain requested by the wireless
204 * core upon initialization */
205static void update_world_regdomain(const struct ieee80211_regdomain *rd)
206{
207 BUG_ON(list_empty(&regulatory_requests));
208
209 reset_regdomains();
210
211 cfg80211_world_regdom = rd;
212 cfg80211_regdomain = rd;
213}
214
215bool is_world_regdom(const char *alpha2)
216{
217 if (!alpha2)
218 return false;
219 if (alpha2[0] == '0' && alpha2[1] == '0')
220 return true;
221 return false;
222}
223
224static bool is_alpha2_set(const char *alpha2)
225{
226 if (!alpha2)
227 return false;
228 if (alpha2[0] != 0 && alpha2[1] != 0)
229 return true;
230 return false;
231}
232
233static bool is_alpha_upper(char letter)
234{
235 /* ASCII A - Z */
236 if (letter >= 65 && letter <= 90)
237 return true;
238 return false;
239}
240
241static bool is_unknown_alpha2(const char *alpha2)
242{
243 if (!alpha2)
244 return false;
245 /* Special case where regulatory domain was built by driver
246 * but a specific alpha2 cannot be determined */
247 if (alpha2[0] == '9' && alpha2[1] == '9')
248 return true;
249 return false;
250}
251
252static bool is_an_alpha2(const char *alpha2)
253{
254 if (!alpha2)
255 return false;
256 if (is_alpha_upper(alpha2[0]) && is_alpha_upper(alpha2[1]))
257 return true;
258 return false;
259}
260
261static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)
262{
263 if (!alpha2_x || !alpha2_y)
264 return false;
265 if (alpha2_x[0] == alpha2_y[0] &&
266 alpha2_x[1] == alpha2_y[1])
267 return true;
268 return false;
269}
270
271static bool regdom_changed(const char *alpha2)
272{
273 if (!cfg80211_regdomain)
274 return true;
275 if (alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
276 return false;
277 return true;
278}
279
280/* This lets us keep regulatory code which is updated on a regulatory
281 * basis in userspace. */
282static int call_crda(const char *alpha2)
283{
284 char country_env[9 + 2] = "COUNTRY=";
285 char *envp[] = {
286 country_env,
287 NULL
120 }; 288 };
121 static const struct ieee80211_regdomain regdom_world = REGDOM(world);
122 int i;
123 289
124 for (i = 0; i < ARRAY_SIZE(ieee80211_regdoms); i++) 290 if (!is_world_regdom((char *) alpha2))
125 if (strcmp(ieee80211_regdom, ieee80211_regdoms[i].code) == 0) 291 printk(KERN_INFO "cfg80211: Calling CRDA for country: %c%c\n",
126 return &ieee80211_regdoms[i]; 292 alpha2[0], alpha2[1]);
293 else
294 printk(KERN_INFO "cfg80211: Calling CRDA to update world "
295 "regulatory domain\n");
296
297 country_env[8] = alpha2[0];
298 country_env[9] = alpha2[1];
127 299
128 return &regdom_world; 300 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, envp);
129} 301}
130 302
303/* This has the logic which determines when a new request
304 * should be ignored. */
305static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by,
306 char *alpha2, struct ieee80211_regdomain *rd)
307{
308 struct regulatory_request *last_request = NULL;
309
310 /* All initial requests are respected */
311 if (list_empty(&regulatory_requests))
312 return 0;
313
314 last_request = list_first_entry(&regulatory_requests,
315 struct regulatory_request, list);
131 316
132static void handle_channel(struct ieee80211_channel *chan, 317 switch (set_by) {
133 const struct ieee80211_regdomain *rd) 318 case REGDOM_SET_BY_INIT:
319 return -EINVAL;
320 case REGDOM_SET_BY_CORE:
321 /* Always respect new wireless core hints, should only
322 * come in for updating the world regulatory domain at init
323 * anyway */
324 return 0;
325 case REGDOM_SET_BY_COUNTRY_IE:
326 if (last_request->initiator == set_by) {
327 if (last_request->wiphy != wiphy) {
328 /* Two cards with two APs claiming different
329 * different Country IE alpha2s!
330 * You're special!! */
331 if (!alpha2_equal(last_request->alpha2,
332 cfg80211_regdomain->alpha2)) {
333 /* XXX: Deal with conflict, consider
334 * building a new one out of the
335 * intersection */
336 WARN_ON(1);
337 return -EOPNOTSUPP;
338 }
339 return -EALREADY;
340 }
341 /* Two consecutive Country IE hints on the same wiphy */
342 if (!alpha2_equal(cfg80211_regdomain->alpha2, alpha2))
343 return 0;
344 return -EALREADY;
345 }
346 if (WARN_ON(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2)),
347 "Invalid Country IE regulatory hint passed "
348 "to the wireless core\n")
349 return -EINVAL;
350 /* We ignore Country IE hints for now, as we haven't yet
351 * added the dot11MultiDomainCapabilityEnabled flag
352 * for wiphys */
353 return 1;
354 case REGDOM_SET_BY_DRIVER:
355 BUG_ON(!wiphy);
356 if (last_request->initiator == set_by) {
357 /* Two separate drivers hinting different things,
358 * this is possible if you have two devices present
359 * on a system with different EEPROM regulatory
360 * readings. XXX: Do intersection, we support only
361 * the first regulatory hint for now */
362 if (last_request->wiphy != wiphy)
363 return -EALREADY;
364 if (rd)
365 return -EALREADY;
366 /* Driver should not be trying to hint different
367 * regulatory domains! */
368 BUG_ON(!alpha2_equal(alpha2,
369 cfg80211_regdomain->alpha2));
370 return -EALREADY;
371 }
372 if (last_request->initiator == REGDOM_SET_BY_CORE)
373 return 0;
374 /* XXX: Handle intersection, and add the
375 * dot11MultiDomainCapabilityEnabled flag to wiphy. For now
376 * we assume the driver has this set to false, following the
377 * 802.11d dot11MultiDomainCapabilityEnabled documentation */
378 if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE)
379 return 0;
380 return 0;
381 case REGDOM_SET_BY_USER:
382 if (last_request->initiator == set_by ||
383 last_request->initiator == REGDOM_SET_BY_CORE)
384 return 0;
385 /* Drivers can use their wiphy's reg_notifier()
386 * to override any information */
387 if (last_request->initiator == REGDOM_SET_BY_DRIVER)
388 return 0;
389 /* XXX: Handle intersection */
390 if (last_request->initiator == REGDOM_SET_BY_COUNTRY_IE)
391 return -EOPNOTSUPP;
392 return 0;
393 default:
394 return -EINVAL;
395 }
396}
397
398static bool __reg_is_valid_request(const char *alpha2,
399 struct regulatory_request **request)
400{
401 struct regulatory_request *req;
402 if (list_empty(&regulatory_requests))
403 return false;
404 list_for_each_entry(req, &regulatory_requests, list) {
405 if (alpha2_equal(req->alpha2, alpha2)) {
406 *request = req;
407 return true;
408 }
409 }
410 return false;
411}
412
413/* Used by nl80211 before kmalloc'ing our regulatory domain */
414bool reg_is_valid_request(const char *alpha2)
415{
416 struct regulatory_request *request = NULL;
417 return __reg_is_valid_request(alpha2, &request);
418}
419
420/* Sanity check on a regulatory rule */
421static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
422{
423 const struct ieee80211_freq_range *freq_range = &rule->freq_range;
424 u32 freq_diff;
425
426 if (freq_range->start_freq_khz == 0 || freq_range->end_freq_khz == 0)
427 return false;
428
429 if (freq_range->start_freq_khz > freq_range->end_freq_khz)
430 return false;
431
432 freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
433
434 if (freq_range->max_bandwidth_khz > freq_diff)
435 return false;
436
437 return true;
438}
439
440static bool is_valid_rd(const struct ieee80211_regdomain *rd)
441{
442 const struct ieee80211_reg_rule *reg_rule = NULL;
443 unsigned int i;
444
445 if (!rd->n_reg_rules)
446 return false;
447
448 for (i = 0; i < rd->n_reg_rules; i++) {
449 reg_rule = &rd->reg_rules[i];
450 if (!is_valid_reg_rule(reg_rule))
451 return false;
452 }
453
454 return true;
455}
456
457/* Returns value in KHz */
458static u32 freq_max_bandwidth(const struct ieee80211_freq_range *freq_range,
459 u32 freq)
460{
461 unsigned int i;
462 for (i = 0; i < ARRAY_SIZE(supported_bandwidths); i++) {
463 u32 start_freq_khz = freq - supported_bandwidths[i]/2;
464 u32 end_freq_khz = freq + supported_bandwidths[i]/2;
465 if (start_freq_khz >= freq_range->start_freq_khz &&
466 end_freq_khz <= freq_range->end_freq_khz)
467 return supported_bandwidths[i];
468 }
469 return 0;
470}
471
472/* XXX: add support for the rest of enum nl80211_reg_rule_flags, we may
473 * want to just have the channel structure use these */
474static u32 map_regdom_flags(u32 rd_flags)
475{
476 u32 channel_flags = 0;
477 if (rd_flags & NL80211_RRF_PASSIVE_SCAN)
478 channel_flags |= IEEE80211_CHAN_PASSIVE_SCAN;
479 if (rd_flags & NL80211_RRF_NO_IBSS)
480 channel_flags |= IEEE80211_CHAN_NO_IBSS;
481 if (rd_flags & NL80211_RRF_DFS)
482 channel_flags |= IEEE80211_CHAN_RADAR;
483 return channel_flags;
484}
485
486/**
487 * freq_reg_info - get regulatory information for the given frequency
488 * @center_freq: Frequency in KHz for which we want regulatory information for
489 * @bandwidth: the bandwidth requirement you have in KHz, if you do not have one
490 * you can set this to 0. If this frequency is allowed we then set
491 * this value to the maximum allowed bandwidth.
492 * @reg_rule: the regulatory rule which we have for this frequency
493 *
494 * Use this function to get the regulatory rule for a specific frequency.
495 */
496static int freq_reg_info(u32 center_freq, u32 *bandwidth,
497 const struct ieee80211_reg_rule **reg_rule)
134{ 498{
135 int i; 499 int i;
136 u32 flags = chan->orig_flags; 500 u32 max_bandwidth = 0;
137 const struct ieee80211_channel_range *rg = NULL;
138 501
139 for (i = 0; i < rd->n_ranges; i++) { 502 if (!cfg80211_regdomain)
140 if (rd->ranges[i].start_freq <= chan->center_freq && 503 return -EINVAL;
141 chan->center_freq <= rd->ranges[i].end_freq) { 504
142 rg = &rd->ranges[i]; 505 for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) {
506 const struct ieee80211_reg_rule *rr;
507 const struct ieee80211_freq_range *fr = NULL;
508 const struct ieee80211_power_rule *pr = NULL;
509
510 rr = &cfg80211_regdomain->reg_rules[i];
511 fr = &rr->freq_range;
512 pr = &rr->power_rule;
513 max_bandwidth = freq_max_bandwidth(fr, center_freq);
514 if (max_bandwidth && *bandwidth <= max_bandwidth) {
515 *reg_rule = rr;
516 *bandwidth = max_bandwidth;
143 break; 517 break;
144 } 518 }
145 } 519 }
146 520
147 if (!rg) { 521 return !max_bandwidth;
148 /* not found */ 522}
523
524static void handle_channel(struct ieee80211_channel *chan)
525{
526 int r;
527 u32 flags = chan->orig_flags;
528 u32 max_bandwidth = 0;
529 const struct ieee80211_reg_rule *reg_rule = NULL;
530 const struct ieee80211_power_rule *power_rule = NULL;
531
532 r = freq_reg_info(MHZ_TO_KHZ(chan->center_freq),
533 &max_bandwidth, &reg_rule);
534
535 if (r) {
149 flags |= IEEE80211_CHAN_DISABLED; 536 flags |= IEEE80211_CHAN_DISABLED;
150 chan->flags = flags; 537 chan->flags = flags;
151 return; 538 return;
152 } 539 }
153 540
154 chan->flags = flags; 541 power_rule = &reg_rule->power_rule;
542
543 chan->flags = flags | map_regdom_flags(reg_rule->flags);
155 chan->max_antenna_gain = min(chan->orig_mag, 544 chan->max_antenna_gain = min(chan->orig_mag,
156 rg->max_antenna_gain); 545 (int) MBI_TO_DBI(power_rule->max_antenna_gain));
546 chan->max_bandwidth = KHZ_TO_MHZ(max_bandwidth);
157 if (chan->orig_mpwr) 547 if (chan->orig_mpwr)
158 chan->max_power = min(chan->orig_mpwr, rg->max_power); 548 chan->max_power = min(chan->orig_mpwr,
549 (int) MBM_TO_DBM(power_rule->max_eirp));
159 else 550 else
160 chan->max_power = rg->max_power; 551 chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
161} 552}
162 553
163static void handle_band(struct ieee80211_supported_band *sband, 554static void handle_band(struct ieee80211_supported_band *sband)
164 const struct ieee80211_regdomain *rd)
165{ 555{
166 int i; 556 int i;
167 557
168 for (i = 0; i < sband->n_channels; i++) 558 for (i = 0; i < sband->n_channels; i++)
169 handle_channel(&sband->channels[i], rd); 559 handle_channel(&sband->channels[i]);
170} 560}
171 561
172void wiphy_update_regulatory(struct wiphy *wiphy) 562static void update_all_wiphy_regulatory(enum reg_set_by setby)
173{ 563{
174 enum ieee80211_band band; 564 struct cfg80211_registered_device *drv;
175 const struct ieee80211_regdomain *rd = get_regdom(); 565
566 list_for_each_entry(drv, &cfg80211_drv_list, list)
567 wiphy_update_regulatory(&drv->wiphy, setby);
568}
176 569
177 for (band = 0; band < IEEE80211_NUM_BANDS; band++) 570void wiphy_update_regulatory(struct wiphy *wiphy, enum reg_set_by setby)
571{
572 enum ieee80211_band band;
573 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
178 if (wiphy->bands[band]) 574 if (wiphy->bands[band])
179 handle_band(wiphy->bands[band], rd); 575 handle_band(wiphy->bands[band]);
576 if (wiphy->reg_notifier)
577 wiphy->reg_notifier(wiphy, setby);
578 }
579}
580
581/* Caller must hold &cfg80211_drv_mutex */
582int __regulatory_hint(struct wiphy *wiphy, enum reg_set_by set_by,
583 const char *alpha2, struct ieee80211_regdomain *rd)
584{
585 struct regulatory_request *request;
586 char *rd_alpha2;
587 int r = 0;
588
589 r = ignore_request(wiphy, set_by, (char *) alpha2, rd);
590 if (r)
591 return r;
592
593 if (rd)
594 rd_alpha2 = rd->alpha2;
595 else
596 rd_alpha2 = (char *) alpha2;
597
598 switch (set_by) {
599 case REGDOM_SET_BY_CORE:
600 case REGDOM_SET_BY_COUNTRY_IE:
601 case REGDOM_SET_BY_DRIVER:
602 case REGDOM_SET_BY_USER:
603 request = kzalloc(sizeof(struct regulatory_request),
604 GFP_KERNEL);
605 if (!request)
606 return -ENOMEM;
607
608 request->alpha2[0] = rd_alpha2[0];
609 request->alpha2[1] = rd_alpha2[1];
610 request->initiator = set_by;
611 request->wiphy = wiphy;
612
613 list_add_tail(&request->list, &regulatory_requests);
614 if (rd)
615 break;
616 r = call_crda(alpha2);
617#ifndef CONFIG_WIRELESS_OLD_REGULATORY
618 if (r)
619 printk(KERN_ERR "cfg80211: Failed calling CRDA\n");
620#endif
621 break;
622 default:
623 r = -ENOTSUPP;
624 break;
625 }
626
627 return r;
628}
629
630/* If rd is not NULL and if this call fails the caller must free it */
631int regulatory_hint(struct wiphy *wiphy, const char *alpha2,
632 struct ieee80211_regdomain *rd)
633{
634 int r;
635 BUG_ON(!rd && !alpha2);
636
637 mutex_lock(&cfg80211_drv_mutex);
638
639 r = __regulatory_hint(wiphy, REGDOM_SET_BY_DRIVER, alpha2, rd);
640 if (r || !rd)
641 goto unlock_and_exit;
642
643 /* If the driver passed a regulatory domain we skipped asking
644 * userspace for one so we can now go ahead and set it */
645 r = set_regdom(rd);
646
647unlock_and_exit:
648 mutex_unlock(&cfg80211_drv_mutex);
649 return r;
650}
651EXPORT_SYMBOL(regulatory_hint);
652
653
654static void print_rd_rules(const struct ieee80211_regdomain *rd)
655{
656 unsigned int i;
657 const struct ieee80211_reg_rule *reg_rule = NULL;
658 const struct ieee80211_freq_range *freq_range = NULL;
659 const struct ieee80211_power_rule *power_rule = NULL;
660
661 printk(KERN_INFO "\t(start_freq - end_freq @ bandwidth), "
662 "(max_antenna_gain, max_eirp)\n");
663
664 for (i = 0; i < rd->n_reg_rules; i++) {
665 reg_rule = &rd->reg_rules[i];
666 freq_range = &reg_rule->freq_range;
667 power_rule = &reg_rule->power_rule;
668
669 /* There may not be documentation for max antenna gain
670 * in certain regions */
671 if (power_rule->max_antenna_gain)
672 printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), "
673 "(%d mBi, %d mBm)\n",
674 freq_range->start_freq_khz,
675 freq_range->end_freq_khz,
676 freq_range->max_bandwidth_khz,
677 power_rule->max_antenna_gain,
678 power_rule->max_eirp);
679 else
680 printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), "
681 "(N/A, %d mBm)\n",
682 freq_range->start_freq_khz,
683 freq_range->end_freq_khz,
684 freq_range->max_bandwidth_khz,
685 power_rule->max_eirp);
686 }
687}
688
689static void print_regdomain(const struct ieee80211_regdomain *rd)
690{
691
692 if (is_world_regdom(rd->alpha2))
693 printk(KERN_INFO "cfg80211: World regulatory "
694 "domain updated:\n");
695 else {
696 if (is_unknown_alpha2(rd->alpha2))
697 printk(KERN_INFO "cfg80211: Regulatory domain "
698 "changed to driver built-in settings "
699 "(unknown country)\n");
700 else
701 printk(KERN_INFO "cfg80211: Regulatory domain "
702 "changed to country: %c%c\n",
703 rd->alpha2[0], rd->alpha2[1]);
704 }
705 print_rd_rules(rd);
706}
707
708void print_regdomain_info(const struct ieee80211_regdomain *rd)
709{
710 printk(KERN_INFO "cfg80211: Regulatory domain: %c%c\n",
711 rd->alpha2[0], rd->alpha2[1]);
712 print_rd_rules(rd);
713}
714
715static int __set_regdom(const struct ieee80211_regdomain *rd)
716{
717 struct regulatory_request *request = NULL;
718
719 /* Some basic sanity checks first */
720
721 if (is_world_regdom(rd->alpha2)) {
722 if (WARN_ON(!__reg_is_valid_request(rd->alpha2, &request)))
723 return -EINVAL;
724 update_world_regdomain(rd);
725 return 0;
726 }
727
728 if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) &&
729 !is_unknown_alpha2(rd->alpha2))
730 return -EINVAL;
731
732 if (list_empty(&regulatory_requests))
733 return -EINVAL;
734
735 /* allow overriding the static definitions if CRDA is present */
736 if (!is_old_static_regdom(cfg80211_regdomain) &&
737 !regdom_changed(rd->alpha2))
738 return -EINVAL;
739
740 /* Now lets set the regulatory domain, update all driver channels
741 * and finally inform them of what we have done, in case they want
742 * to review or adjust their own settings based on their own
743 * internal EEPROM data */
744
745 if (WARN_ON(!__reg_is_valid_request(rd->alpha2, &request)))
746 return -EINVAL;
747
748 reset_regdomains();
749
750 /* Country IE parsing coming soon */
751 switch (request->initiator) {
752 case REGDOM_SET_BY_CORE:
753 case REGDOM_SET_BY_DRIVER:
754 case REGDOM_SET_BY_USER:
755 if (!is_valid_rd(rd)) {
756 printk(KERN_ERR "cfg80211: Invalid "
757 "regulatory domain detected:\n");
758 print_regdomain_info(rd);
759 return -EINVAL;
760 }
761 break;
762 case REGDOM_SET_BY_COUNTRY_IE: /* Not yet */
763 WARN_ON(1);
764 default:
765 return -EOPNOTSUPP;
766 }
767
768 /* Tada! */
769 cfg80211_regdomain = rd;
770 request->granted = 1;
771
772 return 0;
773}
774
775
776/* Use this call to set the current regulatory domain. Conflicts with
777 * multiple drivers can be ironed out later. Caller must've already
778 * kmalloc'd the rd structure. If this calls fails you should kfree()
779 * the passed rd. Caller must hold cfg80211_drv_mutex */
780int set_regdom(const struct ieee80211_regdomain *rd)
781{
782 struct regulatory_request *this_request = NULL, *prev_request = NULL;
783 int r;
784
785 if (!list_empty(&regulatory_requests))
786 prev_request = list_first_entry(&regulatory_requests,
787 struct regulatory_request, list);
788
789 /* Note that this doesn't update the wiphys, this is done below */
790 r = __set_regdom(rd);
791 if (r)
792 return r;
793
794 BUG_ON((!__reg_is_valid_request(rd->alpha2, &this_request)));
795
796 /* The initial standard core update of the world regulatory domain, no
797 * need to keep that request info around if it didn't fail. */
798 if (is_world_regdom(rd->alpha2) &&
799 this_request->initiator == REGDOM_SET_BY_CORE &&
800 this_request->granted) {
801 list_del(&this_request->list);
802 kfree(this_request);
803 this_request = NULL;
804 }
805
806 /* Remove old requests, we only leave behind the last one */
807 if (prev_request) {
808 list_del(&prev_request->list);
809 kfree(prev_request);
810 prev_request = NULL;
811 }
812
813 /* This would make this whole thing pointless */
814 BUG_ON(rd != cfg80211_regdomain);
815
816 /* update all wiphys now with the new established regulatory domain */
817 update_all_wiphy_regulatory(this_request->initiator);
818
819 print_regdomain(rd);
820
821 return r;
822}
823
824int regulatory_init(void)
825{
826 int err;
827
828 reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
829 if (IS_ERR(reg_pdev))
830 return PTR_ERR(reg_pdev);
831
832#ifdef CONFIG_WIRELESS_OLD_REGULATORY
833 cfg80211_regdomain = static_regdom(ieee80211_regdom);
834
835 printk(KERN_INFO "cfg80211: Using static regulatory domain info\n");
836 print_regdomain_info(cfg80211_regdomain);
837 /* The old code still requests for a new regdomain and if
838 * you have CRDA you get it updated, otherwise you get
839 * stuck with the static values. We ignore "EU" code as
840 * that is not a valid ISO / IEC 3166 alpha2 */
841 if (ieee80211_regdom[0] != 'E' && ieee80211_regdom[1] != 'U')
842 err = __regulatory_hint(NULL, REGDOM_SET_BY_CORE,
843 ieee80211_regdom, NULL);
844#else
845 cfg80211_regdomain = cfg80211_world_regdom;
846
847 err = __regulatory_hint(NULL, REGDOM_SET_BY_CORE, "00", NULL);
848 if (err)
849 printk(KERN_ERR "cfg80211: calling CRDA failed - "
850 "unable to update world regulatory domain, "
851 "using static definition\n");
852#endif
853
854 return 0;
855}
856
857void regulatory_exit(void)
858{
859 struct regulatory_request *req, *req_tmp;
860
861 mutex_lock(&cfg80211_drv_mutex);
862
863 reset_regdomains();
864
865 list_for_each_entry_safe(req, req_tmp, &regulatory_requests, list) {
866 list_del(&req->list);
867 kfree(req);
868 }
869 platform_device_unregister(reg_pdev);
870
871 mutex_unlock(&cfg80211_drv_mutex);
180} 872}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
new file mode 100644
index 000000000000..a33362872f3c
--- /dev/null
+++ b/net/wireless/reg.h
@@ -0,0 +1,13 @@
1#ifndef __NET_WIRELESS_REG_H
2#define __NET_WIRELESS_REG_H
3
4extern struct mutex cfg80211_reg_mutex;
5bool is_world_regdom(const char *alpha2);
6bool reg_is_valid_request(const char *alpha2);
7
8int regulatory_init(void);
9void regulatory_exit(void);
10
11int set_regdom(const struct ieee80211_regdomain *rd);
12
13#endif /* __NET_WIRELESS_REG_H */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b7754b1b73a4..ef9ccbc38752 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -34,7 +34,7 @@
34 34
35#include "xfrm_hash.h" 35#include "xfrm_hash.h"
36 36
37int sysctl_xfrm_larval_drop __read_mostly; 37int sysctl_xfrm_larval_drop __read_mostly = 1;
38 38
39#ifdef CONFIG_XFRM_STATISTICS 39#ifdef CONFIG_XFRM_STATISTICS
40DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly; 40DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics) __read_mostly;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 0a8f09c3144c..053970e8765d 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -59,6 +59,14 @@ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
59static unsigned int xfrm_state_num; 59static unsigned int xfrm_state_num;
60static unsigned int xfrm_state_genid; 60static unsigned int xfrm_state_genid;
61 61
62/* Counter indicating ongoing walk, protected by xfrm_state_lock. */
63static unsigned long xfrm_state_walk_ongoing;
64/* Counter indicating walk completion, protected by xfrm_cfg_mutex. */
65static unsigned long xfrm_state_walk_completed;
66
67/* List of outstanding state walks used to set the completed counter. */
68static LIST_HEAD(xfrm_state_walks);
69
62static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); 70static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
63static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); 71static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
64 72
@@ -191,7 +199,8 @@ static DEFINE_RWLOCK(xfrm_state_afinfo_lock);
191static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO]; 199static struct xfrm_state_afinfo *xfrm_state_afinfo[NPROTO];
192 200
193static struct work_struct xfrm_state_gc_work; 201static struct work_struct xfrm_state_gc_work;
194static HLIST_HEAD(xfrm_state_gc_list); 202static LIST_HEAD(xfrm_state_gc_leftovers);
203static LIST_HEAD(xfrm_state_gc_list);
195static DEFINE_SPINLOCK(xfrm_state_gc_lock); 204static DEFINE_SPINLOCK(xfrm_state_gc_lock);
196 205
197int __xfrm_state_delete(struct xfrm_state *x); 206int __xfrm_state_delete(struct xfrm_state *x);
@@ -403,17 +412,23 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
403 412
404static void xfrm_state_gc_task(struct work_struct *data) 413static void xfrm_state_gc_task(struct work_struct *data)
405{ 414{
406 struct xfrm_state *x; 415 struct xfrm_state *x, *tmp;
407 struct hlist_node *entry, *tmp; 416 unsigned long completed;
408 struct hlist_head gc_list;
409 417
418 mutex_lock(&xfrm_cfg_mutex);
410 spin_lock_bh(&xfrm_state_gc_lock); 419 spin_lock_bh(&xfrm_state_gc_lock);
411 gc_list.first = xfrm_state_gc_list.first; 420 list_splice_tail_init(&xfrm_state_gc_list, &xfrm_state_gc_leftovers);
412 INIT_HLIST_HEAD(&xfrm_state_gc_list);
413 spin_unlock_bh(&xfrm_state_gc_lock); 421 spin_unlock_bh(&xfrm_state_gc_lock);
414 422
415 hlist_for_each_entry_safe(x, entry, tmp, &gc_list, bydst) 423 completed = xfrm_state_walk_completed;
424 mutex_unlock(&xfrm_cfg_mutex);
425
426 list_for_each_entry_safe(x, tmp, &xfrm_state_gc_leftovers, gclist) {
427 if ((long)(x->lastused - completed) > 0)
428 break;
429 list_del(&x->gclist);
416 xfrm_state_gc_destroy(x); 430 xfrm_state_gc_destroy(x);
431 }
417 432
418 wake_up(&km_waitq); 433 wake_up(&km_waitq);
419} 434}
@@ -540,12 +555,8 @@ void __xfrm_state_destroy(struct xfrm_state *x)
540{ 555{
541 WARN_ON(x->km.state != XFRM_STATE_DEAD); 556 WARN_ON(x->km.state != XFRM_STATE_DEAD);
542 557
543 spin_lock_bh(&xfrm_state_lock);
544 list_del(&x->all);
545 spin_unlock_bh(&xfrm_state_lock);
546
547 spin_lock_bh(&xfrm_state_gc_lock); 558 spin_lock_bh(&xfrm_state_gc_lock);
548 hlist_add_head(&x->bydst, &xfrm_state_gc_list); 559 list_add_tail(&x->gclist, &xfrm_state_gc_list);
549 spin_unlock_bh(&xfrm_state_gc_lock); 560 spin_unlock_bh(&xfrm_state_gc_lock);
550 schedule_work(&xfrm_state_gc_work); 561 schedule_work(&xfrm_state_gc_work);
551} 562}
@@ -558,6 +569,8 @@ int __xfrm_state_delete(struct xfrm_state *x)
558 if (x->km.state != XFRM_STATE_DEAD) { 569 if (x->km.state != XFRM_STATE_DEAD) {
559 x->km.state = XFRM_STATE_DEAD; 570 x->km.state = XFRM_STATE_DEAD;
560 spin_lock(&xfrm_state_lock); 571 spin_lock(&xfrm_state_lock);
572 x->lastused = xfrm_state_walk_ongoing;
573 list_del_rcu(&x->all);
561 hlist_del(&x->bydst); 574 hlist_del(&x->bydst);
562 hlist_del(&x->bysrc); 575 hlist_del(&x->bysrc);
563 if (x->id.spi) 576 if (x->id.spi)
@@ -1594,6 +1607,41 @@ out:
1594} 1607}
1595EXPORT_SYMBOL(xfrm_state_walk); 1608EXPORT_SYMBOL(xfrm_state_walk);
1596 1609
1610void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
1611{
1612 walk->proto = proto;
1613 walk->state = NULL;
1614 walk->count = 0;
1615 list_add_tail(&walk->list, &xfrm_state_walks);
1616 walk->genid = ++xfrm_state_walk_ongoing;
1617}
1618EXPORT_SYMBOL(xfrm_state_walk_init);
1619
1620void xfrm_state_walk_done(struct xfrm_state_walk *walk)
1621{
1622 struct list_head *prev;
1623
1624 if (walk->state != NULL) {
1625 xfrm_state_put(walk->state);
1626 walk->state = NULL;
1627 }
1628
1629 prev = walk->list.prev;
1630 list_del(&walk->list);
1631
1632 if (prev != &xfrm_state_walks) {
1633 list_entry(prev, struct xfrm_state_walk, list)->genid =
1634 walk->genid;
1635 return;
1636 }
1637
1638 xfrm_state_walk_completed = walk->genid;
1639
1640 if (!list_empty(&xfrm_state_gc_leftovers))
1641 schedule_work(&xfrm_state_gc_work);
1642}
1643EXPORT_SYMBOL(xfrm_state_walk_done);
1644
1597 1645
1598void xfrm_replay_notify(struct xfrm_state *x, int event) 1646void xfrm_replay_notify(struct xfrm_state *x, int event)
1599{ 1647{